summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk8
-rw-r--r--cmdline/cmdline_types.h2
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/common_compiler_test.cc2
-rw-r--r--compiler/driver/compiler_driver.cc22
-rw-r--r--compiler/driver/compiler_driver.h2
-rw-r--r--compiler/driver/compiler_options.h5
-rw-r--r--compiler/image_test.cc3
-rw-r--r--compiler/jit/jit_compiler.cc16
-rw-r--r--compiler/oat_test.cc5
-rw-r--r--compiler/oat_writer.cc195
-rw-r--r--compiler/oat_writer.h24
-rw-r--r--compiler/optimizing/induction_var_analysis.cc37
-rw-r--r--compiler/optimizing/induction_var_analysis.h7
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc25
-rw-r--r--compiler/optimizing/induction_var_range.h10
-rw-r--r--compiler/optimizing/loop_optimization.cc149
-rw-r--r--compiler/optimizing/loop_optimization.h4
-rw-r--r--compiler/optimizing/nodes.h7
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc2
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc33
-rw-r--r--compiler/optimizing/sharpening.cc4
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc2
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc7
-rw-r--r--compiler/verifier_deps_test.cc (renamed from runtime/verifier/verifier_deps_test.cc)76
-rw-r--r--dex2oat/dex2oat.cc41
-rw-r--r--dexdump/Android.bp1
-rw-r--r--dexdump/dexdump.cc53
-rw-r--r--dexdump/dexdump_cfg.cc395
-rw-r--r--dexdump/dexdump_cfg.h31
-rw-r--r--dexlayout/dex_ir.cc3
-rw-r--r--dexlayout/dexlayout.cc88
-rw-r--r--dexlayout/dexlayout.h1
-rw-r--r--dexlayout/dexlayout_main.cc4
-rw-r--r--oatdump/oatdump.cc17
-rw-r--r--profman/profman.cc2
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.cc32
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.h18
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc30
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h18
-rw-r--r--runtime/arch/instruction_set_features.cc97
-rw-r--r--runtime/arch/instruction_set_features.h24
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.cc28
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.h18
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.cc25
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.h22
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc82
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h33
-rw-r--r--runtime/arch/x86_64/instruction_set_features_x86_64.h35
-rw-r--r--runtime/art_method.cc13
-rw-r--r--runtime/base/time_utils.h4
-rw-r--r--runtime/check_jni.cc29
-rw-r--r--runtime/class_linker.cc88
-rw-r--r--runtime/class_linker.h51
-rw-r--r--runtime/class_table.cc17
-rw-r--r--runtime/class_table.h10
-rw-r--r--runtime/common_throws.cc9
-rw-r--r--runtime/common_throws.h6
-rw-r--r--runtime/debugger.cc4
-rw-r--r--runtime/debugger.h1
-rw-r--r--runtime/dex_file-inl.h4
-rw-r--r--runtime/dex_file.h3
-rw-r--r--runtime/dex_instruction.h15
-rw-r--r--runtime/dex_instruction_list.h4
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h17
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/entrypoints/entrypoint_utils.h6
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc11
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc27
-rw-r--r--runtime/gc/accounting/card_table-inl.h29
-rw-r--r--runtime/gc/accounting/card_table.cc20
-rw-r--r--runtime/gc/accounting/card_table.h13
-rw-r--r--runtime/gc/accounting/mod_union_table.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.h18
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc27
-rw-r--r--runtime/gc/collector/concurrent_copying.cc27
-rw-r--r--runtime/gc/collector/garbage_collector.cc16
-rw-r--r--runtime/gc/collector/semi_space.cc5
-rw-r--r--runtime/gc/gc_pause_listener.h34
-rw-r--r--runtime/gc/heap.cc13
-rw-r--r--runtime/gc/heap.h13
-rw-r--r--runtime/gc/space/large_object_space.cc17
-rw-r--r--runtime/gc/space/large_object_space.h8
-rw-r--r--runtime/indirect_reference_table-inl.h12
-rw-r--r--runtime/indirect_reference_table.cc293
-rw-r--r--runtime/indirect_reference_table.h422
-rw-r--r--runtime/indirect_reference_table_test.cc253
-rw-r--r--runtime/interpreter/interpreter.cc85
-rw-r--r--runtime/interpreter/interpreter.h5
-rw-r--r--runtime/interpreter/interpreter_common.cc405
-rw-r--r--runtime/interpreter/interpreter_common.h77
-rw-r--r--runtime/interpreter/interpreter_mterp_impl.h1
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc129
-rw-r--r--runtime/interpreter/interpreter_switch_impl.h1
-rw-r--r--runtime/interpreter/mterp/mterp.cc35
-rw-r--r--runtime/interpreter/unstarted_runtime.cc13
-rw-r--r--runtime/java_vm_ext.cc62
-rw-r--r--runtime/jit/jit.cc2
-rw-r--r--runtime/jit/jit.h5
-rw-r--r--runtime/jni_env_ext.cc4
-rw-r--r--runtime/jni_env_ext.h6
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/jni_internal_test.cc16
-rw-r--r--runtime/mem_map.cc34
-rw-r--r--runtime/mem_map.h4
-rw-r--r--runtime/method_handles-inl.h188
-rw-r--r--runtime/method_handles.h38
-rw-r--r--runtime/mirror/object-inl.h2
-rw-r--r--runtime/mirror/object.h2
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc28
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc58
-rw-r--r--runtime/oat_file.h4
-rw-r--r--runtime/openjdkjvmti/events.cc74
-rw-r--r--runtime/openjdkjvmti/events.h2
-rw-r--r--runtime/openjdkjvmti/heap.cc4
-rw-r--r--runtime/openjdkjvmti/object_tagging.cc36
-rw-r--r--runtime/openjdkjvmti/object_tagging.h10
-rw-r--r--runtime/parsed_options.cc6
-rw-r--r--runtime/reflection.cc2
-rw-r--r--runtime/runtime.cc22
-rw-r--r--runtime/runtime.h2
-rw-r--r--runtime/thread.cc4
-rw-r--r--runtime/ti/agent.cc119
-rw-r--r--runtime/ti/agent.h62
-rw-r--r--runtime/utils.cc364
-rw-r--r--runtime/utils.h6
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h8
-rw-r--r--runtime/vdex_file.cc7
-rw-r--r--runtime/vdex_file.h4
-rw-r--r--runtime/verifier/reg_type_cache.cc3
-rw-r--r--runtime/verifier/verifier_deps.cc40
-rw-r--r--runtime/verifier/verifier_deps.h12
-rw-r--r--runtime/well_known_classes.cc2
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--test/115-native-bridge/nativebridge.cc57
-rw-r--r--test/151-OpenFileLimit/expected.txt2
-rw-r--r--test/151-OpenFileLimit/info.txt5
-rwxr-xr-xtest/151-OpenFileLimit/run21
-rw-r--r--test/151-OpenFileLimit/src/Main.java6
-rw-r--r--test/530-checker-loops2/src/Main.java12
-rw-r--r--test/552-checker-sharpening/src/Main.java33
-rw-r--r--test/621-checker-new-instance/expected.txt0
-rw-r--r--test/621-checker-new-instance/info.txt1
-rw-r--r--test/621-checker-new-instance/src/Main.java53
-rw-r--r--test/907-get-loaded-classes/get_loaded_classes.cc13
-rwxr-xr-xtest/907-get-loaded-classes/run2
-rwxr-xr-xtest/908-gc-start-finish/build17
-rw-r--r--test/908-gc-start-finish/expected.txt12
-rw-r--r--test/908-gc-start-finish/gc_callbacks.cc105
-rw-r--r--test/908-gc-start-finish/gc_callbacks.h30
-rw-r--r--test/908-gc-start-finish/info.txt1
-rwxr-xr-xtest/908-gc-start-finish/run43
-rw-r--r--test/908-gc-start-finish/src/Main.java79
-rw-r--r--test/909-attach-agent/attach.cc79
-rw-r--r--test/909-attach-agent/attach.h30
-rwxr-xr-xtest/909-attach-agent/build17
-rw-r--r--test/909-attach-agent/expected.txt3
-rw-r--r--test/909-attach-agent/info.txt1
-rwxr-xr-xtest/909-attach-agent/run27
-rw-r--r--test/909-attach-agent/src/Main.java35
-rwxr-xr-xtest/956-methodhandles/build3
-rw-r--r--test/956-methodhandles/src/Main.java17
-rw-r--r--test/Android.bp2
-rw-r--r--test/Android.run-test.mk14
-rw-r--r--test/VerifierDeps/MyClassWithNoSuper.smali16
-rw-r--r--test/VerifierDeps/MyClassWithNoSuperButFailures.smali21
-rw-r--r--test/VerifierDeps/MyVerificationFailure.smali21
-rw-r--r--test/dexdump/invoke-polymorphic.dexbin0 -> 1160 bytes
-rw-r--r--test/dexdump/invoke-polymorphic.lst3
-rw-r--r--test/dexdump/invoke-polymorphic.txt109
-rw-r--r--test/dexdump/invoke-polymorphic.xml33
-rw-r--r--test/ti-agent/common_load.cc7
-rwxr-xr-xtools/buildbot-build.sh10
177 files changed, 4307 insertions, 1929 deletions
diff --git a/Android.mk b/Android.mk
index 2647268d07..b2716cdf96 100644
--- a/Android.mk
+++ b/Android.mk
@@ -568,3 +568,11 @@ TEST_ART_TARGET_SYNC_DEPS :=
# m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt
.PHONY: art-boot-image
art-boot-image: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME)
+
+.PHONY: art-job-images
+art-job-images: \
+ $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \
+ $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \
+ $(HOST_OUT_EXECUTABLES)/dex2oats \
+ $(HOST_OUT_EXECUTABLES)/dex2oatds \
+ $(HOST_OUT_EXECUTABLES)/profman
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 72d7df31fb..13a323533c 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -407,7 +407,7 @@ struct CmdlineType<std::vector<ti::Agent>> : CmdlineTypeParser<std::vector<ti::A
Result ParseAndAppend(const std::string& args,
std::vector<ti::Agent>& existing_value) {
- existing_value.push_back(ti::Agent::Create(args));
+ existing_value.emplace_back(args);
return Result::SuccessNoValue();
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 61f682c2bd..0b148598cf 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -342,6 +342,7 @@ art_cc_test {
"utils/string_reference_test.cc",
"utils/swap_space_test.cc",
"utils/test_dex_file_builder_test.cc",
+ "verifier_deps_test.cc",
"jni/jni_cfi_test.cc",
"optimizing/codegen_test.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 51bf9ea3bd..2f9164c0e0 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -156,7 +156,7 @@ void CommonCompilerTest::SetUp() {
const InstructionSet instruction_set = kRuntimeISA;
// Take the default set of instruction features from the build.
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
runtime_->SetInstructionSet(instruction_set);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index dbde41c46e..56b4ebd608 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -72,6 +72,7 @@
#include "verifier/method_verifier.h"
#include "verifier/method_verifier-inl.h"
#include "verifier/verifier_log_mode.h"
+#include "verifier/verifier_deps.h"
namespace art {
@@ -1968,6 +1969,7 @@ class VerifyClassVisitor : public CompilationVisitor {
hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
Handle<mirror::Class> klass(
hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ verifier::MethodVerifier::FailureKind failure_kind;
if (klass.Get() == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1980,7 +1982,8 @@ class VerifyClassVisitor : public CompilationVisitor {
Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
soa.Self(), dex_file, false)));
std::string error_msg;
- if (verifier::MethodVerifier::VerifyClass(soa.Self(),
+ failure_kind =
+ verifier::MethodVerifier::VerifyClass(soa.Self(),
&dex_file,
dex_cache,
class_loader,
@@ -1988,15 +1991,15 @@ class VerifyClassVisitor : public CompilationVisitor {
Runtime::Current()->GetCompilerCallbacks(),
true /* allow soft failures */,
log_level_,
- &error_msg) ==
- verifier::MethodVerifier::kHardFailure) {
+ &error_msg);
+ if (failure_kind == verifier::MethodVerifier::kHardFailure) {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
manager_->GetCompiler()->SetHadHardVerifierFailure();
}
} else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
CHECK(klass->IsResolved()) << klass->PrettyClass();
- class_linker->VerifyClass(soa.Self(), klass, log_level_);
+ failure_kind = class_linker->VerifyClass(soa.Self(), klass, log_level_);
if (klass->IsErroneous()) {
// ClassLinker::VerifyClass throws, which isn't useful in the compiler.
@@ -2008,13 +2011,18 @@ class VerifyClassVisitor : public CompilationVisitor {
CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
<< klass->PrettyDescriptor() << ": state=" << klass->GetStatus();
- // It is *very* problematic if there are verification errors in the boot classpath. For example,
- // we rely on things working OK without verification when the decryption dialog is brought up.
- // So abort in a debug build if we find this violated.
+ // It is *very* problematic if there are verification errors in the boot classpath.
+ // For example, we rely on things working OK without verification when the
+ // decryption dialog is brought up. So abort in a debug build if we find this violated.
DCHECK(!manager_->GetCompiler()->GetCompilerOptions().IsBootImage() || klass->IsVerified())
<< "Boot classpath class " << klass->PrettyClass()
<< " failed to fully verify.";
+ } else {
+ // Make the skip a soft failure, essentially being considered as verify at runtime.
+ failure_kind = verifier::MethodVerifier::kSoftFailure;
}
+ verifier::VerifierDeps::MaybeRecordVerificationStatus(
+ dex_file, class_def.class_idx_, failure_kind);
soa.Self()->AssertNoPendingException();
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 9a4dd857fc..4a48f9c841 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -50,6 +50,7 @@ class DexCache;
namespace verifier {
class MethodVerifier;
+class VerifierDepsTest;
} // namespace verifier
class BitVector;
@@ -578,6 +579,7 @@ class CompilerDriver {
const BitVector* current_dex_to_dex_methods_;
friend class CompileClassVisitor;
+ friend class verifier::VerifierDepsTest;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 4eb695493b..56b632d6b4 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -29,6 +29,10 @@
namespace art {
+namespace verifier {
+ class VerifierDepsTest;
+}
+
class DexFile;
class CompilerOptions FINAL {
@@ -338,6 +342,7 @@ class CompilerOptions FINAL {
friend class Dex2Oat;
friend class CommonCompilerTest;
+ friend class verifier::VerifierDepsTest;
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 9e94b9d861..8fdf6fca68 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -260,7 +260,8 @@ void CompilationHelper::Compile(CompilerDriver* driver,
OatWriter* const oat_writer = oat_writers[i].get();
ElfWriter* const elf_writer = elf_writers[i].get();
std::vector<const DexFile*> cur_dex_files(1u, class_path[i]);
- oat_writer->PrepareLayout(driver, writer.get(), cur_dex_files, &patcher);
+ oat_writer->Initialize(driver, writer.get(), cur_dex_files);
+ oat_writer->PrepareLayout(&patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
elf_writer->PrepareDynamicSection(rodata_size,
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index c398703bb8..f83d37cdf2 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -124,30 +124,30 @@ JitCompiler::JitCompiler() {
if (option.starts_with("--instruction-set-variant=")) {
StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
VLOG(compiler) << "JIT instruction set variant " << str;
- instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
- instruction_set, str.as_string(), &error_msg));
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set, str.as_string(), &error_msg);
if (instruction_set_features_ == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
} else if (option.starts_with("--instruction-set-features=")) {
StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
VLOG(compiler) << "JIT instruction set features " << str;
- if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
- instruction_set, "default", &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set, "default", &error_msg);
if (instruction_set_features_ == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
}
- instruction_set_features_.reset(
- instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ instruction_set_features_ =
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg);
if (instruction_set_features_ == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
}
}
if (instruction_set_features_ == nullptr) {
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
}
cumulative_logger_.reset(new CumulativeLogger("jit times"));
compiler_driver_.reset(new CompilerDriver(
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index ffeff760c6..64ee574889 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -92,7 +92,7 @@ class OatTest : public CommonCompilerTest {
const std::vector<std::string>& compiler_options,
/*out*/std::string* error_msg) {
ASSERT_TRUE(error_msg != nullptr);
- insn_features_.reset(InstructionSetFeatures::FromVariant(insn_set, "default", error_msg));
+ insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg);
ASSERT_TRUE(insn_features_ != nullptr) << error_msg;
compiler_options_.reset(new CompilerOptions);
for (const std::string& option : compiler_options) {
@@ -203,7 +203,8 @@ class OatTest : public CommonCompilerTest {
}
linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
instruction_set_features_.get());
- oat_writer.PrepareLayout(compiler_driver_.get(), nullptr, dex_files, &patcher);
+ oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
+ oat_writer.PrepareLayout(&patcher);
size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer.GetOatSize() - rodata_size;
elf_writer->PrepareDynamicSection(rodata_size,
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 52134e8b9b..6cbca7a6dc 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -299,6 +299,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings)
vdex_size_(0u),
vdex_dex_files_offset_(0u),
vdex_verifier_deps_offset_(0u),
+ vdex_quickening_info_offset_(0u),
oat_size_(0u),
bss_start_(0u),
bss_size_(0u),
@@ -314,6 +315,8 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings)
size_dex_file_(0),
size_verifier_deps_(0),
size_verifier_deps_alignment_(0),
+ size_quickening_info_(0),
+ size_quickening_info_alignment_(0),
size_interpreter_to_interpreter_bridge_(0),
size_interpreter_to_compiled_code_bridge_(0),
size_jni_dlsym_lookup_(0),
@@ -519,15 +522,9 @@ bool OatWriter::WriteAndOpenDexFiles(
return true;
}
-void OatWriter::PrepareLayout(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files,
- linker::MultiOatRelativePatcher* relative_patcher) {
+void OatWriter::PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher) {
CHECK(write_state_ == WriteState::kPrepareLayout);
- compiler_driver_ = compiler;
- image_writer_ = image_writer;
- dex_files_ = &dex_files;
relative_patcher_ = relative_patcher;
SetMultiOatRelativePatcherAdjustment();
@@ -706,9 +703,10 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
public:
- InitCodeMethodVisitor(OatWriter* writer, size_t offset)
+ InitCodeMethodVisitor(OatWriter* writer, size_t offset, size_t quickening_info_offset)
: OatDexMethodVisitor(writer, offset),
- debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()) {
+ debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()),
+ current_quickening_info_offset_(quickening_info_offset) {
writer_->absolute_patch_locations_.reserve(
writer_->compiler_driver_->GetNonRelativeLinkerPatchCount());
}
@@ -726,6 +724,9 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+ if (it.GetMethodCodeItem() != nullptr) {
+ current_quickening_info_offset_ += sizeof(uint32_t);
+ }
if (compiled_method != nullptr) {
// Derived from CompiledMethod.
uint32_t quick_code_offset = 0;
@@ -771,15 +772,28 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
uint32_t vmap_table_offset = method_header->vmap_table_offset_;
- // If we don't have quick code, then we must have a vmap, as that is how the dex2dex
- // compiler records its transformations.
- DCHECK(!quick_code.empty() || vmap_table_offset != 0);
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
- if (vmap_table_offset != 0u && code_offset != 0u) {
- vmap_table_offset += code_offset;
- DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets";
+ if (!compiled_method->GetQuickCode().empty()) {
+ // If the code is compiled, we write the offset of the stack map relative
+ // to the code,
+ if (vmap_table_offset != 0u) {
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
+ } else {
+ if (kIsVdexEnabled) {
+ // We write the offset in the .vdex file.
+ DCHECK_EQ(vmap_table_offset, 0u);
+ vmap_table_offset = current_quickening_info_offset_;
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ current_quickening_info_offset_ += map.size() * sizeof(map.front());
+ } else {
+ // We write the offset of the quickening info relative to the code.
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
}
uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
@@ -878,6 +892,9 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
// Cache of compiler's --debuggable option.
const bool debuggable_;
+
+ // Offset in the vdex file for the quickening info.
+ uint32_t current_quickening_info_offset_;
};
class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
@@ -893,21 +910,25 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
if (compiled_method != nullptr) {
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
- DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
+ // If vdex is enabled, we only emit the stack map of compiled code. The quickening info will
+ // be in the vdex file.
+ if (!compiled_method->GetQuickCode().empty() || !kIsVdexEnabled) {
+ DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
- ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
- uint32_t map_size = map.size() * sizeof(map[0]);
- if (map_size != 0u) {
- size_t offset = dedupe_map_.GetOrCreate(
- map.data(),
- [this, map_size]() {
- uint32_t new_offset = offset_;
- offset_ += map_size;
- return new_offset;
- });
- // Code offset is not initialized yet, so set the map offset to 0u-offset.
- DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
- oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ uint32_t map_size = map.size() * sizeof(map[0]);
+ if (map_size != 0u) {
+ size_t offset = dedupe_map_.GetOrCreate(
+ map.data(),
+ [this, map_size]() {
+ uint32_t new_offset = offset_;
+ offset_ += map_size;
+ return new_offset;
+ });
+ // Code offset is not initialized yet, so set the map offset to 0u-offset.
+ DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
+ oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ }
}
++method_offsets_index_;
}
@@ -1372,7 +1393,10 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
<< compiled_method->GetVmapTable().size() << " " << map_offset << " "
<< dex_file_->PrettyMethod(it.GetMemberIndex());
- if (map_offset != 0u) {
+ // If vdex is enabled, only emit the map for compiled code. The quickening info
+ // is emitted in the vdex already.
+ if (map_offset != 0u &&
+ !(kIsVdexEnabled && compiled_method->GetQuickCode().empty())) {
// Transform map_offset to actual oat data offset.
map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
DCHECK_NE(map_offset, 0u);
@@ -1539,21 +1563,18 @@ size_t OatWriter::InitOatCode(size_t offset) {
}
size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
- #define VISIT(VisitorType) \
- do { \
- VisitorType visitor(this, offset); \
- bool success = VisitDexMethods(&visitor); \
- DCHECK(success); \
- offset = visitor.GetOffset(); \
- } while (false)
+ InitCodeMethodVisitor code_visitor(this, offset, vdex_quickening_info_offset_);
+ bool success = VisitDexMethods(&code_visitor);
+ DCHECK(success);
+ offset = code_visitor.GetOffset();
- VISIT(InitCodeMethodVisitor);
if (HasImage()) {
- VISIT(InitImageMethodVisitor);
+ InitImageMethodVisitor image_visitor(this, offset);
+ success = VisitDexMethods(&image_visitor);
+ DCHECK(success);
+ offset = image_visitor.GetOffset();
}
- #undef VISIT
-
return offset;
}
@@ -1626,6 +1647,90 @@ bool OatWriter::WriteRodata(OutputStream* out) {
return true;
}
+class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
+ public:
+ WriteQuickeningInfoMethodVisitor(OatWriter* writer, OutputStream* out, uint32_t offset)
+ : DexMethodVisitor(writer, offset),
+ out_(out),
+ written_bytes_(0u) {}
+
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
+ const ClassDataItemIterator& it) {
+ if (it.GetMethodCodeItem() == nullptr) {
+ // No CodeItem. Native or abstract method.
+ return true;
+ }
+
+ uint32_t method_idx = it.GetMemberIndex();
+ CompiledMethod* compiled_method =
+ writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
+
+ uint32_t length = 0;
+ const uint8_t* data = nullptr;
+ // VMap only contains quickening info if this method is not compiled.
+ if (compiled_method != nullptr && compiled_method->GetQuickCode().empty()) {
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ data = map.data();
+ length = map.size() * sizeof(map.front());
+ }
+
+ if (!out_->WriteFully(&length, sizeof(length)) ||
+ !out_->WriteFully(data, length)) {
+ PLOG(ERROR) << "Failed to write quickening info for "
+ << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation();
+ return false;
+ }
+ offset_ += sizeof(length) + length;
+ written_bytes_ += sizeof(length) + length;
+ return true;
+ }
+
+ size_t GetNumberOfWrittenBytes() const {
+ return written_bytes_;
+ }
+
+ private:
+ OutputStream* const out_;
+ size_t written_bytes_;
+};
+
+bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
+ if (!kIsVdexEnabled) {
+ return true;
+ }
+
+ size_t initial_offset = vdex_size_;
+ size_t start_offset = RoundUp(initial_offset, 4u);
+
+ vdex_size_ = start_offset;
+ vdex_quickening_info_offset_ = vdex_size_;
+ size_quickening_info_alignment_ = start_offset - initial_offset;
+
+ off_t actual_offset = vdex_out->Seek(start_offset, kSeekSet);
+ if (actual_offset != static_cast<off_t>(start_offset)) {
+ PLOG(ERROR) << "Failed to seek to quickening info section. Actual: " << actual_offset
+ << " Expected: " << start_offset
+ << " Output: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ WriteQuickeningInfoMethodVisitor visitor(this, vdex_out, start_offset);
+ if (!VisitDexMethods(&visitor)) {
+ PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ size_quickening_info_ = visitor.GetNumberOfWrittenBytes();
+ vdex_size_ += size_quickening_info_;
+ return true;
+}
+
bool OatWriter::WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps) {
if (!kIsVdexEnabled) {
return true;
@@ -1717,6 +1822,8 @@ bool OatWriter::WriteCode(OutputStream* out) {
DO_STAT(size_dex_file_);
DO_STAT(size_verifier_deps_);
DO_STAT(size_verifier_deps_alignment_);
+ DO_STAT(size_quickening_info_);
+ DO_STAT(size_quickening_info_alignment_);
DO_STAT(size_interpreter_to_interpreter_bridge_);
DO_STAT(size_interpreter_to_compiled_code_bridge_);
DO_STAT(size_jni_dlsym_lookup_);
@@ -2434,9 +2541,11 @@ bool OatWriter::WriteVdexHeader(OutputStream* vdex_out) {
DCHECK_NE(vdex_verifier_deps_offset_, 0u);
size_t dex_section_size = vdex_verifier_deps_offset_ - vdex_dex_files_offset_;
- size_t verifier_deps_section_size = vdex_size_ - vdex_verifier_deps_offset_;
+ size_t verifier_deps_section_size = vdex_quickening_info_offset_ - vdex_verifier_deps_offset_;
+ size_t quickening_info_section_size = vdex_size_ - vdex_quickening_info_offset_;
- VdexFile::Header vdex_header(dex_section_size, verifier_deps_section_size);
+ VdexFile::Header vdex_header(
+ dex_section_size, verifier_deps_section_size, quickening_info_section_size);
if (!vdex_out->WriteFully(&vdex_header, sizeof(VdexFile::Header))) {
PLOG(ERROR) << "Failed to write vdex header. File: " << vdex_out->GetLocation();
return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 1cc193b341..3d08ad3173 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -118,6 +118,10 @@ class OatWriter {
// - AddRawDexFileSource().
// Then the user must call in order
// - WriteAndOpenDexFiles()
+ // - Initialize()
+ // - WriteVerifierDeps()
+ // - WriteQuickeningInfo()
+ // - WriteVdexHeader()
// - PrepareLayout(),
// - WriteRodata(),
// - WriteCode(),
@@ -154,14 +158,20 @@ class OatWriter {
bool verify,
/*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
+ bool WriteQuickeningInfo(OutputStream* vdex_out);
bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
bool WriteVdexHeader(OutputStream* vdex_out);
+ // Initialize the writer with the given parameters.
+ void Initialize(const CompilerDriver* compiler,
+ ImageWriter* image_writer,
+ const std::vector<const DexFile*>& dex_files) {
+ compiler_driver_ = compiler;
+ image_writer_ = image_writer;
+ dex_files_ = &dex_files;
+ }
// Prepare layout of remaining data.
- void PrepareLayout(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files,
- linker::MultiOatRelativePatcher* relative_patcher);
+ void PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher);
// Write the rest of .rodata section (ClassOffsets[], OatClass[], maps).
bool WriteRodata(OutputStream* out);
// Write the code to the .text section.
@@ -239,6 +249,7 @@ class OatWriter {
class InitImageMethodVisitor;
class WriteCodeMethodVisitor;
class WriteMapMethodVisitor;
+ class WriteQuickeningInfoMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
// with a given DexMethodVisitor.
@@ -325,6 +336,9 @@ class OatWriter {
// Offset of section holding VerifierDeps inside Vdex.
size_t vdex_verifier_deps_offset_;
+ // Offset of section holding quickening info inside Vdex.
+ size_t vdex_quickening_info_offset_;
+
// Size required for Oat data structures.
size_t oat_size_;
@@ -368,6 +382,8 @@ class OatWriter {
uint32_t size_dex_file_;
uint32_t size_verifier_deps_;
uint32_t size_verifier_deps_alignment_;
+ uint32_t size_quickening_info_;
+ uint32_t size_quickening_info_alignment_;
uint32_t size_interpreter_to_interpreter_bridge_;
uint32_t size_interpreter_to_compiled_code_bridge_;
uint32_t size_jni_dlsym_lookup_;
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 38937bf488..1d1921a246 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -23,12 +23,12 @@ namespace art {
* Since graph traversal may enter a SCC at any position, an initial representation may be rotated,
* along dependences, viz. any of (a, b, c, d), (d, a, b, c) (c, d, a, b), (b, c, d, a) assuming
* a chain of dependences (mutual independent items may occur in arbitrary order). For proper
- * classification, the lexicographically first entry-phi is rotated to the front.
+ * classification, the lexicographically first loop-phi is rotated to the front.
*/
static void RotateEntryPhiFirst(HLoopInformation* loop,
ArenaVector<HInstruction*>* scc,
ArenaVector<HInstruction*>* new_scc) {
- // Find very first entry-phi.
+ // Find very first loop-phi.
const HInstructionList& phis = loop->GetHeader()->GetPhis();
HInstruction* phi = nullptr;
size_t phi_pos = -1;
@@ -41,7 +41,7 @@ static void RotateEntryPhiFirst(HLoopInformation* loop,
}
}
- // If found, bring that entry-phi to front.
+ // If found, bring that loop-phi to front.
if (phi != nullptr) {
new_scc->clear();
for (size_t i = 0; i < size; i++) {
@@ -94,7 +94,9 @@ HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
type_(Primitive::kPrimVoid),
induction_(std::less<HLoopInformation*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
+ graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ cycles_(std::less<HPhi*>(),
+ graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
void HInductionVarAnalysis::Run() {
@@ -245,13 +247,13 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
const size_t size = scc_.size();
DCHECK_GE(size, 1u);
- // Rotate proper entry-phi to front.
+ // Rotate proper loop-phi to front.
if (size > 1) {
ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
RotateEntryPhiFirst(loop, &scc_, &other);
}
- // Analyze from entry-phi onwards.
+ // Analyze from loop-phi onwards.
HInstruction* phi = scc_[0];
if (!phi->IsLoopHeaderPhi()) {
return;
@@ -263,6 +265,9 @@ void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
return;
}
+ // Store interesting cycle.
+ AssignCycle(phi->AsPhi());
+
// Singleton is wrap-around induction if all internal links have the same meaning.
if (size == 1) {
InductionInfo* update = TransferPhi(loop, phi, /* input_index */ 1);
@@ -366,6 +371,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferAddSub(Indu
// can be combined with an invariant to yield a similar result. Even two linear inputs can
// be combined. All other combinations fail, however.
if (a != nullptr && b != nullptr) {
+ type_ = Narrowest(type_, Narrowest(a->type, b->type));
if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
return CreateInvariantOp(op, a, b);
} else if (a->induction_class == kLinear && b->induction_class == kLinear) {
@@ -402,6 +408,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferMul(Inducti
// can be multiplied with an invariant to yield a similar but multiplied result.
// Two non-invariant inputs cannot be multiplied, however.
if (a != nullptr && b != nullptr) {
+ type_ = Narrowest(type_, Narrowest(a->type, b->type));
if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
return CreateInvariantOp(kMul, a, b);
} else if (a->induction_class == kInvariant) {
@@ -442,6 +449,7 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::TransferNeg(Inducti
// Transfer over a unary negation: an invariant, linear, wrap-around, or periodic input
// yields a similar but negated induction as result.
if (a != nullptr) {
+ type_ = Narrowest(type_, a->type);
if (a->induction_class == kInvariant) {
return CreateInvariantOp(kNeg, nullptr, a);
}
@@ -941,6 +949,23 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::CreateSimplifiedInv
return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, b->type);
}
+
+void HInductionVarAnalysis::AssignCycle(HPhi* phi) {
+ ArenaSet<HInstruction*>* set = &cycles_.Put(phi, ArenaSet<HInstruction*>(
+ graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
+ for (HInstruction* i : scc_) {
+ set->insert(i);
+ }
+}
+
+ArenaSet<HInstruction*>* HInductionVarAnalysis::LookupCycle(HPhi* phi) {
+ auto it = cycles_.find(phi);
+ if (it != cycles_.end()) {
+ return &it->second;
+ }
+ return nullptr;
+}
+
bool HInductionVarAnalysis::IsExact(InductionInfo* info, int64_t* value) {
return InductionVarRange(this).IsConstant(info, InductionVarRange::kExact, value);
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index d19078248c..70271799d2 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -214,6 +214,8 @@ class HInductionVarAnalysis : public HOptimization {
InductionInfo* LookupInfo(HLoopInformation* loop, HInstruction* instruction);
InductionInfo* CreateConstant(int64_t value, Primitive::Type type);
InductionInfo* CreateSimplifiedInvariant(InductionOp op, InductionInfo* a, InductionInfo* b);
+ void AssignCycle(HPhi* phi);
+ ArenaSet<HInstruction*>* LookupCycle(HPhi* phi);
// Constants.
bool IsExact(InductionInfo* info, /*out*/ int64_t* value);
@@ -240,6 +242,11 @@ class HInductionVarAnalysis : public HOptimization {
*/
ArenaSafeMap<HLoopInformation*, ArenaSafeMap<HInstruction*, InductionInfo*>> induction_;
+ /**
+ * Preserves induction cycle information for each loop-phi.
+ */
+ ArenaSafeMap<HPhi*, ArenaSet<HInstruction*>> cycles_;
+
friend class InductionVarAnalysisTest;
friend class InductionVarRange;
friend class InductionVarRangeTest;
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 7599c8fd79..031f1d74a8 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -740,6 +740,31 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) {
EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str());
}
+TEST_F(InductionVarAnalysisTest, ByteInductionDerivedIntLoopControl) {
+ // Setup:
+ // for (int i = 0; i < 100; i++) {
+ // k = (byte) i;
+ // a[k] = 0;
+ // k = k + 1
+ // a[k] = 0;
+ // }
+ BuildLoopNest(1);
+ HInstruction* conv = InsertInstruction(
+ new (&allocator_) HTypeConversion(Primitive::kPrimByte, basic_[0], -1), 0);
+ HInstruction* store1 = InsertArrayStore(conv, 0);
+ HInstruction* add = InsertInstruction(
+ new (&allocator_) HAdd(Primitive::kPrimInt, conv, constant1_), 0);
+ HInstruction* store2 = InsertArrayStore(add, 0);
+
+ PerformInductionVarAnalysis();
+
+ // Byte induction (k) is "transferred" over conversion into addition (k + 1).
+ // This means only values within byte range can be trusted (even though
+ // addition can jump out of the range of course).
+ EXPECT_STREQ("((1) * i + (0)):PrimByte", GetInductionInfo(store1->InputAt(1), 0).c_str());
+ EXPECT_STREQ("((1) * i + (1)):PrimByte", GetInductionInfo(store2->InputAt(1), 0).c_str());
+}
+
TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
// Setup:
// for (byte i = -128; i < 127; i++) { // just fits!
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 2f70046a27..034cf32b2d 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -136,10 +136,20 @@ class InductionVarRange {
*/
void ReVisit(HLoopInformation* loop) {
induction_analysis_->induction_.erase(loop);
+ for (HInstructionIterator it(loop->GetHeader()->GetPhis()); !it.Done(); it.Advance()) {
+ induction_analysis_->cycles_.erase(it.Current()->AsPhi());
+ }
induction_analysis_->VisitLoop(loop);
}
/**
+ * Lookup an interesting cycle associated with an entry phi.
+ */
+ ArenaSet<HInstruction*>* LookupCycle(HPhi* phi) const {
+ return induction_analysis_->LookupCycle(phi);
+ }
+
+ /**
* Checks if header logic of a loop terminates.
*/
bool IsFinite(HLoopInformation* loop) const;
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index b88e73b979..51be1d1e91 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -20,82 +20,6 @@
namespace art {
-// Detects a potential induction cycle. Note that the actual induction
-// information is queried later if its last value is really needed.
-static bool IsPhiInduction(HPhi* phi, ArenaSet<HInstruction*>* iset) {
- DCHECK(iset->empty());
- HInputsRef inputs = phi->GetInputs();
- if (inputs.size() == 2) {
- HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
- HInstruction* op = inputs[1];
- if (op->GetBlock()->GetLoopInformation() == loop_info) {
- // Chase a simple chain back to phi.
- while (!op->IsPhi()) {
- // Binary operation with single use in same loop.
- if (!op->IsBinaryOperation() || !op->GetUses().HasExactlyOneElement()) {
- return false;
- }
- // Chase back either through left or right operand.
- iset->insert(op);
- HInstruction* a = op->InputAt(0);
- HInstruction* b = op->InputAt(1);
- if (a->GetBlock()->GetLoopInformation() == loop_info && b != phi) {
- op = a;
- } else if (b->GetBlock()->GetLoopInformation() == loop_info) {
- op = b;
- } else {
- return false;
- }
- }
- // Closed the cycle?
- if (op == phi) {
- iset->insert(phi);
- return true;
- }
- }
- }
- return false;
-}
-
-// Find: phi: Phi(init, addsub)
-// s: SuspendCheck
-// c: Condition(phi, bound)
-// i: If(c)
-// TODO: Find a less pattern matching approach?
-static bool IsEmptyHeader(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
- DCHECK(iset->empty());
- HInstruction* phi = block->GetFirstPhi();
- if (phi != nullptr && phi->GetNext() == nullptr && IsPhiInduction(phi->AsPhi(), iset)) {
- HInstruction* s = block->GetFirstInstruction();
- if (s != nullptr && s->IsSuspendCheck()) {
- HInstruction* c = s->GetNext();
- if (c != nullptr && c->IsCondition() && c->GetUses().HasExactlyOneElement()) {
- HInstruction* i = c->GetNext();
- if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
- iset->insert(c);
- iset->insert(s);
- return true;
- }
- }
- }
- }
- return false;
-}
-
-// Does the loop-body consist of induction cycle and direct control flow only?
-static bool IsEmptyBody(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
- if (block->GetFirstPhi() == nullptr) {
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (!instruction->IsGoto() && iset->find(instruction) == iset->end()) {
- return false;
- }
- }
- return true;
- }
- return false;
-}
-
// Remove the instruction from the graph. A bit more elaborate than the usual
// instruction removal, since there may be a cycle in the use structure.
static void RemoveFromCycle(HInstruction* instruction) {
@@ -242,7 +166,7 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
HPhi* phi = it.Current()->AsPhi();
iset_->clear();
int32_t use_count = 0;
- if (IsPhiInduction(phi, iset_) &&
+ if (IsPhiInduction(phi) &&
IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
TryReplaceWithLastValue(phi, use_count, preheader)) {
for (HInstruction* i : *iset_) {
@@ -256,15 +180,14 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- // Remove instructions that are dead, usually resulting from eliminating induction cycles.
+ // Remove instructions that are dead.
for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
HInstruction* instruction = i.Current();
if (instruction->IsDeadAndRemovable()) {
block->RemoveInstruction(instruction);
}
}
- // Remove trivial control flow blocks from the loop-body, again usually resulting
- // from eliminating induction cycles.
+ // Remove trivial control flow blocks from the loop-body.
if (block->GetPredecessors().size() == 1 &&
block->GetSuccessors().size() == 1 &&
block->GetFirstInstruction()->IsGoto()) {
@@ -314,8 +237,8 @@ void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
// subsequent index uses, if any, with the last value and remove the loop.
iset_->clear();
int32_t use_count = 0;
- if (IsEmptyHeader(header, iset_) &&
- IsEmptyBody(body, iset_) &&
+ if (IsEmptyHeader(header) &&
+ IsEmptyBody(body) &&
IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
TryReplaceWithLastValue(header->GetFirstPhi(), use_count, preheader)) {
body->DisconnectAndDelete();
@@ -333,6 +256,68 @@ void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
}
}
+bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
+ ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
+ if (set != nullptr) {
+ for (HInstruction* i : *set) {
+ // Check that, other than phi, instruction are removable with uses contained in the cycle.
+ // TODO: investigate what cases are no longer in the graph.
+ if (i != phi) {
+ if (!i->IsInBlock() || !i->IsRemovable()) {
+ return false;
+ }
+ for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
+ if (set->find(use.GetUser()) == set->end()) {
+ return false;
+ }
+ }
+ }
+ }
+ DCHECK(iset_->empty());
+ iset_->insert(set->begin(), set->end()); // copy
+ return true;
+ }
+ return false;
+}
+
+// Find: phi: Phi(init, addsub)
+// s: SuspendCheck
+// c: Condition(phi, bound)
+// i: If(c)
+// TODO: Find a less pattern matching approach?
+bool HLoopOptimization::IsEmptyHeader(HBasicBlock* block) {
+ DCHECK(iset_->empty());
+ HInstruction* phi = block->GetFirstPhi();
+ if (phi != nullptr && phi->GetNext() == nullptr && IsPhiInduction(phi->AsPhi())) {
+ HInstruction* s = block->GetFirstInstruction();
+ if (s != nullptr && s->IsSuspendCheck()) {
+ HInstruction* c = s->GetNext();
+ if (c != nullptr && c->IsCondition() && c->GetUses().HasExactlyOneElement()) {
+ HInstruction* i = c->GetNext();
+ if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
+ iset_->insert(c);
+ iset_->insert(s);
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
+ if (block->GetFirstPhi() == nullptr) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
/*out*/ int32_t* use_count) {
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 4113357035..e18d17531e 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -64,6 +64,10 @@ class HLoopOptimization : public HOptimization {
void SimplifyBlocks(LoopNode* node);
void RemoveIfEmptyInnerLoop(LoopNode* node);
+ bool IsPhiInduction(HPhi* phi);
+ bool IsEmptyHeader(HBasicBlock* block);
+ bool IsEmptyBody(HBasicBlock* block);
+
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
/*out*/ int32_t* use_count);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6f4f3c9505..257ccea799 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1931,7 +1931,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return !HasEnvironmentUses() && GetUses().HasExactlyOneElement();
}
- bool IsDeadAndRemovable() const {
+ bool IsRemovable() const {
return
!HasSideEffects() &&
!CanThrow() &&
@@ -1939,11 +1939,14 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
!IsControlFlow() &&
!IsNativeDebugInfo() &&
!IsParameterValue() &&
- !HasUses() &&
// If we added an explicit barrier then we should keep it.
!IsMemoryBarrier();
}
+ bool IsDeadAndRemovable() const {
+ return IsRemovable() && !HasUses();
+ }
+
// Does this instruction strictly dominate `other_instruction`?
// Returns false if this instruction and `other_instruction` are the same.
// Aborts if this instruction and `other_instruction` are both phis.
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index bacf9940ca..013e110b87 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -52,7 +52,7 @@ class OptimizingCFITest : public CFITest {
void SetUpFrame(InstructionSet isa) {
// Setup simple context.
std::string error;
- isa_features_.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
+ isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
graph_ = CreateGraph(&allocator_);
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index a4df9e5503..7b66ef3627 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -129,6 +129,7 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
} else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) {
// Pass the initialization duty to the `HLoadClass` instruction,
// and remove the instruction from the graph.
+ DCHECK(load_class->HasEnvironment());
load_class->SetMustGenerateClinitCheck(true);
check->GetBlock()->RemoveInstruction(check);
}
@@ -136,7 +137,7 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
+ const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
// Change the entrypoint to kQuickAllocObject if either:
// - the class is finalizable (only kQuickAllocObject handles finalizable classes),
// - the class needs access checks (we do not know if it's finalizable),
@@ -144,19 +145,25 @@ void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
instruction->SetEntrypoint(kQuickAllocObject);
instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex()), 0);
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- if (has_only_one_use &&
- !instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // We can remove the load class from the graph. If it needed access checks, we delegate
- // the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
+ if (has_only_one_use) {
+ // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
+ // do it manually if possible.
+ if (!load_class->CanThrow()) {
+ // If the load class can not throw, it has no side effects and can be removed if there is
+ // only one use.
+ load_class->GetBlock()->RemoveInstruction(load_class);
+ } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
+ CanMoveClinitCheck(load_class, instruction)) {
+ // The allocation entry point that deals with access checks does not work with inlined
+ // methods, so we need to check whether this allocation comes from an inlined method.
+ // We also need to make the same check as for moving clinit check, whether the HLoadClass
+ // has the clinit check responsibility or not (HLoadClass can throw anyway).
+ // If it needed access checks, we delegate the access check to the allocation.
+ if (load_class->NeedsAccessCheck()) {
+ instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
+ }
+ load_class->GetBlock()->RemoveInstruction(load_class);
}
- load_class->GetBlock()->RemoveInstruction(load_class);
}
}
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index df1b351249..fd1db592bb 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -162,7 +162,6 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
? compilation_unit_.GetDexCache()
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
mirror::Class* klass = dex_cache->GetResolvedType(type_index);
-
if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
@@ -312,8 +311,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(string);
} else {
- // FIXME: Disabled because of BSS root visiting issues. Bug: 32124939
- // desired_load_kind = HLoadString::LoadKind::kBssEntry;
+ desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
}
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 61b7f08518..1e71d06b49 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2830,7 +2830,7 @@ void Thumb2Assembler::strexd(Register rd, Register rt, Register rt2, Register rn
void Thumb2Assembler::clrex(Condition cond) {
CheckCondition(cond);
- int32_t encoding = B31 | B30 | B29 | B27 | B28 | B25 | B24 | B23 |
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 | B24 | B23 |
B21 | B20 |
0xf << 16 |
B15 |
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index d0799d6112..30e8f4e604 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -207,6 +207,13 @@ TEST_F(AssemblerThumb2Test, strexd) {
DriverStr(expected, "strexd");
}
+TEST_F(AssemblerThumb2Test, clrex) {
+ __ clrex();
+
+ const char* expected = "clrex\n";
+ DriverStr(expected, "clrex");
+}
+
TEST_F(AssemblerThumb2Test, LdrdStrd) {
__ ldrd(arm::R0, arm::Address(arm::R2, 8));
__ ldrd(arm::R0, arm::Address(arm::R12));
diff --git a/runtime/verifier/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 71203e688e..9664e43641 100644
--- a/runtime/verifier/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -14,14 +14,17 @@
* limitations under the License.
*/
-#include "verifier_deps.h"
+// Test is in compiler, as it uses compiler related code.
+#include "verifier/verifier_deps.h"
#include "class_linker.h"
-#include "common_runtime_test.h"
+#include "compiler/common_compiler_test.h"
+#include "compiler/driver/compiler_options.h"
+#include "compiler/driver/compiler_driver.h"
#include "compiler_callbacks.h"
#include "dex_file.h"
#include "handle_scope-inl.h"
-#include "method_verifier-inl.h"
+#include "verifier/method_verifier-inl.h"
#include "mirror/class_loader.h"
#include "runtime.h"
#include "thread.h"
@@ -47,10 +50,10 @@ class VerifierDepsCompilerCallbacks : public CompilerCallbacks {
verifier::VerifierDeps* deps_;
};
-class VerifierDepsTest : public CommonRuntimeTest {
+class VerifierDepsTest : public CommonCompilerTest {
public:
void SetUpRuntimeOptions(RuntimeOptions* options) {
- CommonRuntimeTest::SetUpRuntimeOptions(options);
+ CommonCompilerTest::SetUpRuntimeOptions(options);
callbacks_.reset(new VerifierDepsCompilerCallbacks());
}
@@ -147,23 +150,17 @@ class VerifierDepsTest : public CommonRuntimeTest {
void VerifyDexFile() {
std::string error_msg;
- ScopedObjectAccess soa(Thread::Current());
-
- LoadDexFile(&soa);
- SetVerifierDeps({ dex_file_ });
-
- for (size_t i = 0; i < dex_file_->NumClassDefs(); i++) {
- const char* descriptor = dex_file_->GetClassDescriptor(dex_file_->GetClassDef(i));
- mirror::Class* klass = FindClassByName(descriptor, &soa);
- if (klass != nullptr) {
- MethodVerifier::VerifyClass(Thread::Current(),
- klass,
- nullptr,
- true,
- HardFailLogMode::kLogWarning,
- &error_msg);
- }
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ LoadDexFile(&soa);
}
+ SetVerifierDeps({ dex_file_ });
+ TimingLogger timings("Verify", false, false);
+ std::vector<const DexFile*> dex_files;
+ dex_files.push_back(dex_file_);
+ compiler_options_->boot_image_ = false;
+ compiler_driver_->InitializeThreadPools();
+ compiler_driver_->Verify(class_loader_, dex_files, &timings);
}
bool TestAssignabilityRecording(const std::string& dst,
@@ -184,6 +181,21 @@ class VerifierDepsTest : public CommonRuntimeTest {
return true;
}
+ bool HasUnverifiedClass(const std::string& cls) {
+ const DexFile::TypeId* type_id = dex_file_->FindTypeId(cls.c_str());
+ DCHECK(type_id != nullptr);
+ uint16_t index = dex_file_->GetIndexForTypeId(*type_id);
+ MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+ for (const auto& dex_dep : verifier_deps_->dex_deps_) {
+ for (uint16_t entry : dex_dep.second->unverified_classes_) {
+ if (index == entry) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
// Iterates over all assignability records and tries to find an entry which
// matches the expected destination/source pair.
bool HasAssignable(const std::string& expected_destination,
@@ -361,6 +373,7 @@ class VerifierDepsTest : public CommonRuntimeTest {
bool has_classes = false;
bool has_fields = false;
bool has_methods = false;
+ bool has_unverified_classes = false;
for (auto& entry : verifier_deps_->dex_deps_) {
has_strings |= !entry.second->strings_.empty();
@@ -371,9 +384,15 @@ class VerifierDepsTest : public CommonRuntimeTest {
has_methods |= !entry.second->direct_methods_.empty();
has_methods |= !entry.second->virtual_methods_.empty();
has_methods |= !entry.second->interface_methods_.empty();
+ has_unverified_classes |= !entry.second->unverified_classes_.empty();
}
- return has_strings && has_assignability && has_classes && has_fields && has_methods;
+ return has_strings &&
+ has_assignability &&
+ has_classes &&
+ has_fields &&
+ has_methods &&
+ has_unverified_classes;
}
std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
@@ -1056,5 +1075,18 @@ TEST_F(VerifierDepsTest, EncodeDecode) {
ASSERT_TRUE(verifier_deps_->Equals(decoded_deps));
}
+TEST_F(VerifierDepsTest, UnverifiedClasses) {
+ VerifyDexFile();
+ ASSERT_FALSE(HasUnverifiedClass("LMyThread;"));
+ // Test that a class with a soft failure is recorded.
+ ASSERT_TRUE(HasUnverifiedClass("LMain;"));
+ // Test that a class with hard failure is recorded.
+ ASSERT_TRUE(HasUnverifiedClass("LMyVerificationFailure;"));
+ // Test that a class with unresolved super is recorded.
+ ASSERT_FALSE(HasUnverifiedClass("LMyClassWithNoSuper;"));
+ // Test that a class with unresolved super and hard failure is recorded.
+ ASSERT_TRUE(HasUnverifiedClass("LMyClassWithNoSuperButFailures;"));
+}
+
} // namespace verifier
} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0ce13626a2..28d628920c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -637,9 +637,8 @@ class Dex2Oat FINAL {
void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) {
DCHECK(option.starts_with("--instruction-set-variant="));
StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(
- instruction_set_, str.as_string(), &parser_options->error_msg));
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_, str.as_string(), &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("%s", parser_options->error_msg.c_str());
}
@@ -648,19 +647,18 @@ class Dex2Oat FINAL {
void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) {
DCHECK(option.starts_with("--instruction-set-features="));
StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
- if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(
- instruction_set_, "default", &parser_options->error_msg));
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_, "default", &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
parser_options->error_msg.c_str());
}
}
- instruction_set_features_.reset(
+ instruction_set_features_ =
instruction_set_features_->AddFeaturesFromString(str.as_string(),
- &parser_options->error_msg));
- if (instruction_set_features_.get() == nullptr) {
+ &parser_options->error_msg);
+ if (instruction_set_features_ == nullptr) {
Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str());
}
}
@@ -828,9 +826,8 @@ class Dex2Oat FINAL {
// If no instruction set feature was given, use the default one for the target
// instruction set.
if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_.reset(
- InstructionSetFeatures::FromVariant(
- instruction_set_, "default", &parser_options->error_msg));
+ instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_, "default", &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
parser_options->error_msg.c_str());
@@ -1777,6 +1774,14 @@ class Dex2Oat FINAL {
}
}
+ // Initialize the writers with the compiler driver, image writer, and their
+ // dex files. The writers were created without those being there yet.
+ for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
+ std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
+ std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
+ oat_writer->Initialize(driver_.get(), image_writer_.get(), dex_files);
+ }
+
{
TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
DCHECK(IsBootImage() || oat_files_.size() == 1u);
@@ -1791,6 +1796,11 @@ class Dex2Oat FINAL {
return false;
}
+ if (!oat_writers_[i]->WriteQuickeningInfo(vdex_out.get())) {
+ LOG(ERROR) << "Failed to write quickening info into VDEX " << vdex_file->GetPath();
+ return false;
+ }
+
// VDEX finalized, seek back to the beginning and write the header.
if (!oat_writers_[i]->WriteVdexHeader(vdex_out.get())) {
LOG(ERROR) << "Failed to write vdex header into VDEX " << vdex_file->GetPath();
@@ -1799,15 +1809,14 @@ class Dex2Oat FINAL {
}
}
- linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
+ linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
std::unique_ptr<ElfWriter>& elf_writer = elf_writers_[i];
std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
- std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
- oat_writer->PrepareLayout(driver_.get(), image_writer_.get(), dex_files, &patcher);
+ oat_writer->PrepareLayout(&patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 3e589f7c5e..60ce363dbe 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -18,6 +18,7 @@ art_cc_binary {
name: "dexdump2",
host_supported: true,
srcs: [
+ "dexdump_cfg.cc",
"dexdump_main.cc",
"dexdump.cc",
],
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 204293499c..30de28eaee 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -42,9 +42,10 @@
#include <sstream>
#include <vector>
+#include "base/stringprintf.h"
+#include "dexdump_cfg.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "utils.h"
namespace art {
@@ -780,9 +781,11 @@ static void dumpLocalsCb(void* /*context*/, const DexFile::LocalInfo& entry) {
static std::unique_ptr<char[]> indexString(const DexFile* pDexFile,
const Instruction* pDecInsn,
size_t bufSize) {
+ static const u4 kInvalidIndex = std::numeric_limits<u4>::max();
std::unique_ptr<char[]> buf(new char[bufSize]);
// Determine index and width of the string.
u4 index = 0;
+ u4 secondary_index = kInvalidIndex;
u4 width = 4;
switch (Instruction::FormatOf(pDecInsn->Opcode())) {
// SOME NOT SUPPORTED:
@@ -806,6 +809,12 @@ static std::unique_ptr<char[]> indexString(const DexFile* pDexFile,
index = pDecInsn->VRegC();
width = 4;
break;
+ case Instruction::k45cc:
+ case Instruction::k4rcc:
+ index = pDecInsn->VRegB();
+ secondary_index = pDecInsn->VRegH();
+ width = 4;
+ break;
default:
break;
} // switch
@@ -870,6 +879,26 @@ static std::unique_ptr<char[]> indexString(const DexFile* pDexFile,
case Instruction::kIndexFieldOffset:
outSize = snprintf(buf.get(), bufSize, "[obj+%0*x]", width, index);
break;
+ case Instruction::kIndexMethodAndProtoRef: {
+ std::string method("<method?>");
+ std::string proto("<proto?>");
+ if (index < pDexFile->GetHeader().method_ids_size_) {
+ const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(index);
+ const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
+ const Signature signature = pDexFile->GetMethodSignature(pMethodId);
+ const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
+ method = StringPrintf("%s.%s:%s",
+ backDescriptor, name, signature.ToString().c_str());
+ }
+ if (secondary_index < pDexFile->GetHeader().proto_ids_size_) {
+ const DexFile::ProtoId& protoId = pDexFile->GetProtoId(secondary_index);
+ const Signature signature = pDexFile->GetProtoSignature(protoId);
+ proto = signature.ToString();
+ }
+ outSize = snprintf(buf.get(), bufSize, "%s, %s // method@%0*x, proto@%0*x",
+ method.c_str(), proto.c_str(), width, index, width, secondary_index);
+ }
+ break;
// SOME NOT SUPPORTED:
// case Instruction::kIndexVaries:
// case Instruction::kIndexInlineMethod:
@@ -1043,7 +1072,8 @@ static void dumpInstruction(const DexFile* pDexFile,
case Instruction::k32x: // op vAAAA, vBBBB
fprintf(gOutFile, " v%d, v%d", pDecInsn->VRegA(), pDecInsn->VRegB());
break;
- case Instruction::k35c: { // op {vC, vD, vE, vF, vG}, thing@BBBB
+ case Instruction::k35c: // op {vC, vD, vE, vF, vG}, thing@BBBB
+ case Instruction::k45cc: { // op {vC, vD, vE, vF, vG}, method@BBBB, proto@HHHH
// NOT SUPPORTED:
// case Instruction::k35ms: // [opt] invoke-virtual+super
// case Instruction::k35mi: // [opt] inline invoke
@@ -1061,10 +1091,10 @@ static void dumpInstruction(const DexFile* pDexFile,
break;
}
case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
+ case Instruction::k4rcc: { // op {vCCCC .. v(CCCC+AA-1)}, method@BBBB, proto@HHHH
// NOT SUPPORTED:
// case Instruction::k3rms: // [opt] invoke-virtual+super/range
// case Instruction::k3rmi: // [opt] execute-inline/range
- {
// This doesn't match the "dx" output when some of the args are
// 64-bit values -- dx only shows the first register.
fputs(" {", gOutFile);
@@ -1328,7 +1358,7 @@ static void dumpCfg(const DexFile* dex_file,
if (code_item != nullptr) {
std::ostringstream oss;
DumpMethodCFG(dex_file, dex_method_idx, oss);
- fprintf(gOutFile, "%s", oss.str().c_str());
+ fputs(oss.str().c_str(), gOutFile);
}
}
@@ -1551,10 +1581,15 @@ static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
/*
* Dumps the requested sections of the file.
*/
-static void processDexFile(const char* fileName, const DexFile* pDexFile) {
+static void processDexFile(const char* fileName,
+ const DexFile* pDexFile, size_t i, size_t n) {
if (gOptions.verbose) {
- fprintf(gOutFile, "Opened '%s', DEX version '%.3s'\n",
- fileName, pDexFile->GetHeader().magic_ + 4);
+ fputs("Opened '", gOutFile);
+ fputs(fileName, gOutFile);
+ if (n > 1) {
+ fprintf(gOutFile, ":%s", DexFile::GetMultiDexClassesDexName(i).c_str());
+ }
+ fprintf(gOutFile, "', DEX version '%.3s'\n", pDexFile->GetHeader().magic_ + 4);
}
// Headers.
@@ -1612,8 +1647,8 @@ int processFile(const char* fileName) {
if (gOptions.checksumOnly) {
fprintf(gOutFile, "Checksum verified\n");
} else {
- for (size_t i = 0; i < dex_files.size(); i++) {
- processDexFile(fileName, dex_files[i].get());
+ for (size_t i = 0, n = dex_files.size(); i < n; i++) {
+ processDexFile(fileName, dex_files[i].get(), i, n);
}
}
return 0;
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
new file mode 100644
index 0000000000..9e581280da
--- /dev/null
+++ b/dexdump/dexdump_cfg.cc
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file for control flow graph dumping for the dexdump utility.
+ */
+
+#include "dexdump_cfg.h"
+
+#include <inttypes.h>
+#include <ostream>
+#include <map>
+#include <set>
+
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+
+namespace art {
+
+static void dumpMethodCFGImpl(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item,
+ std::ostream& os) {
+ os << "digraph {\n";
+ os << " # /* " << dex_file->PrettyMethod(dex_method_idx, true) << " */\n";
+
+ std::set<uint32_t> dex_pc_is_branch_target;
+ {
+ // Go and populate.
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (inst->IsBranch()) {
+ dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset());
+ } else if (inst->IsSwitch()) {
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ dex_pc_is_branch_target.insert(dex_pc + offset);
+ }
+ }
+ }
+ }
+
+ // Create nodes for "basic blocks."
+ std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts.
+ std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs.
+
+ {
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ bool first_in_block = true;
+ bool force_new_block = false;
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ if (dex_pc == 0 ||
+ (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) ||
+ force_new_block) {
+ uint32_t id = dex_pc_to_node_id.size();
+ if (id > 0) {
+ // End last node.
+ os << "}\"];\n";
+ }
+ // Start next node.
+ os << " node" << id << " [shape=record,label=\"{";
+ dex_pc_to_node_id.insert(std::make_pair(dex_pc, id));
+ first_in_block = true;
+ force_new_block = false;
+ }
+
+ // Register instruction.
+ dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1));
+
+ // Print instruction.
+ if (!first_in_block) {
+ os << " | ";
+ } else {
+ first_in_block = false;
+ }
+
+ // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'.
+ os << "<" << "p" << dex_pc << ">";
+ os << " 0x" << std::hex << dex_pc << std::dec << ": ";
+ std::string inst_str = inst->DumpString(dex_file);
+ size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars
+ // we need to escape.
+ while (cur_start != std::string::npos) {
+ size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1);
+ if (next_escape == std::string::npos) {
+ os << inst_str.substr(cur_start, inst_str.size() - cur_start);
+ break;
+ } else {
+ os << inst_str.substr(cur_start, next_escape - cur_start);
+ // Escape all necessary characters.
+ while (next_escape < inst_str.size()) {
+ char c = inst_str.at(next_escape);
+ if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
+ os << '\\' << c;
+ } else {
+ break;
+ }
+ next_escape++;
+ }
+ if (next_escape >= inst_str.size()) {
+ next_escape = std::string::npos;
+ }
+ cur_start = next_escape;
+ }
+ }
+
+ // Force a new block for some fall-throughs and some instructions that terminate the "local"
+ // control flow.
+ force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd();
+ }
+ // Close last node.
+ if (dex_pc_to_node_id.size() > 0) {
+ os << "}\"];\n";
+ }
+ }
+
+ // Create edges between them.
+ {
+ std::ostringstream regular_edges;
+ std::ostringstream taken_edges;
+ std::ostringstream exception_edges;
+
+ // Common set of exception edges.
+ std::set<uint32_t> exception_targets;
+
+ // These blocks (given by the first dex pc) need exception per dex-pc handling in a second
+ // pass. In the first pass we try and see whether we can use a common set of edges.
+ std::set<uint32_t> blocks_with_detailed_exceptions;
+
+ {
+ uint32_t last_node_id = std::numeric_limits<uint32_t>::max();
+ uint32_t old_dex_pc = 0;
+ uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max();
+ const Instruction* inst = Instruction::At(code_item->insns_);
+ for (uint32_t dex_pc = 0;
+ dex_pc < code_item->insns_size_in_code_units_;
+ old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
+ {
+ auto it = dex_pc_to_node_id.find(dex_pc);
+ if (it != dex_pc_to_node_id.end()) {
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+
+ block_start_dex_pc = dex_pc;
+
+ // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things
+ // like switch data.
+ uint32_t old_last = last_node_id;
+ last_node_id = it->second;
+ if (old_last != std::numeric_limits<uint32_t>::max()) {
+ regular_edges << " node" << old_last << ":p" << old_dex_pc
+ << " -> node" << last_node_id << ":p" << dex_pc
+ << ";\n";
+ }
+ }
+
+ // Look at the exceptions of the first entry.
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ exception_targets.insert(catch_it.GetHandlerAddress());
+ }
+ }
+
+ // Handle instruction.
+
+ // Branch: something with at most two targets.
+ if (inst->IsBranch()) {
+ const int32_t offset = inst->GetTargetOffset();
+ const bool conditional = !inst->IsUnconditional();
+
+ auto target_it = dex_pc_to_node_id.find(dex_pc + offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (dex_pc + offset)
+ << ";\n";
+ }
+ if (!conditional) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ } else if (inst->IsSwitch()) {
+ // TODO: Iterate through all switch targets.
+ const uint16_t* insns = code_item->insns_ + dex_pc;
+ /* make sure the start of the switch is in range */
+ int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
+ /* offset to switch table is a relative branch-style offset */
+ const uint16_t* switch_insns = insns + switch_offset;
+ uint32_t switch_count = switch_insns[1];
+ int32_t targets_offset;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ targets_offset = 2 + 2 * switch_count;
+ }
+ /* make sure the end of the switch is in range */
+ /* verify each switch target */
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset =
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
+ static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t abs_offset = dex_pc + offset;
+ auto target_it = dex_pc_to_node_id.find(abs_offset);
+ if (target_it != dex_pc_to_node_id.end()) {
+ // TODO: value label.
+ taken_edges << " node" << last_node_id << ":p" << dex_pc
+ << " -> node" << target_it->second << ":p" << (abs_offset)
+ << ";\n";
+ }
+ }
+ }
+
+ // Exception edges. If this is not the first instruction in the block
+ if (block_start_dex_pc != dex_pc) {
+ std::set<uint32_t> current_handler_pcs;
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ current_handler_pcs.insert(catch_it.GetHandlerAddress());
+ }
+ if (current_handler_pcs != exception_targets) {
+ exception_targets.clear(); // Clear so we don't do something at the end.
+ blocks_with_detailed_exceptions.insert(block_start_dex_pc);
+ }
+ }
+
+ if (inst->IsReturn() ||
+ (inst->Opcode() == Instruction::THROW) ||
+ (inst->IsBranch() && inst->IsUnconditional())) {
+ // No fall-through.
+ last_node_id = std::numeric_limits<uint32_t>::max();
+ }
+ }
+ // Finish up the last block, if it had common exceptions.
+ if (!exception_targets.empty()) {
+ // It seems the last block had common exception handlers. Add the exception edges now.
+ uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
+ for (uint32_t handler_pc : exception_targets) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << node_id
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+ }
+ exception_targets.clear();
+ }
+ }
+
+ // Second pass for detailed exception blocks.
+ // TODO
+ // Exception edges. If this is not the first instruction in the block
+ for (uint32_t dex_pc : blocks_with_detailed_exceptions) {
+ const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]);
+ uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second;
+ while (true) {
+ CatchHandlerIterator catch_it(*code_item, dex_pc);
+ if (catch_it.HasNext()) {
+ std::set<uint32_t> handled_targets;
+ for (; catch_it.HasNext(); catch_it.Next()) {
+ uint32_t handler_pc = catch_it.GetHandlerAddress();
+ auto it = handled_targets.find(handler_pc);
+ if (it == handled_targets.end()) {
+ auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
+ if (node_id_it != dex_pc_to_incl_id.end()) {
+ exception_edges << " node" << this_node_id << ":p" << dex_pc
+ << " -> node" << node_id_it->second << ":p" << handler_pc
+ << ";\n";
+ }
+
+ // Mark as done.
+ handled_targets.insert(handler_pc);
+ }
+ }
+ }
+ if (inst->IsBasicBlockEnd()) {
+ break;
+ }
+
+ // Loop update. Have a break-out if the next instruction is a branch target and thus in
+ // another block.
+ dex_pc += inst->SizeInCodeUnits();
+ if (dex_pc >= code_item->insns_size_in_code_units_) {
+ break;
+ }
+ if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) {
+ break;
+ }
+ inst = inst->Next();
+ }
+ }
+
+ // Write out the sub-graphs to make edges styled.
+ os << "\n";
+ os << " subgraph regular_edges {\n";
+ os << " edge [color=\"#000000\",weight=.3,len=3];\n\n";
+ os << " " << regular_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph taken_edges {\n";
+ os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n";
+ os << " " << taken_edges.str() << "\n";
+ os << " }\n\n";
+
+ os << " subgraph exception_edges {\n";
+ os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n";
+ os << " " << exception_edges.str() << "\n";
+ os << " }\n\n";
+ }
+
+ os << "}\n";
+}
+
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
+ // This is painful, we need to find the code item. That means finding the class, and then
+ // iterating the table.
+ if (dex_method_idx >= dex_file->NumMethodIds()) {
+ os << "Could not find method-idx.";
+ return;
+ }
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
+
+ const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
+ if (class_def == nullptr) {
+ os << "Could not find class-def.";
+ return;
+ }
+
+ const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ if (class_data == nullptr) {
+ os << "No class data.";
+ return;
+ }
+
+ ClassDataItemIterator it(*dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ it.Next();
+ }
+
+ // Find method, and dump it.
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == dex_method_idx) {
+ dumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
+ return;
+ }
+ it.Next();
+ }
+
+ // Otherwise complain.
+ os << "Something went wrong, didn't find the method in the class data.";
+}
+
+} // namespace art
diff --git a/dexdump/dexdump_cfg.h b/dexdump/dexdump_cfg.h
new file mode 100644
index 0000000000..64e5f9af60
--- /dev/null
+++ b/dexdump/dexdump_cfg.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_DEXDUMP_DEXDUMP_CFG_H_
+#define ART_DEXDUMP_DEXDUMP_CFG_H_
+
+#include <inttypes.h>
+#include <ostream>
+
+namespace art {
+
+class DexFile;
+
+void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+
+} // namespace art
+
+#endif // ART_DEXDUMP_DEXDUMP_CFG_H_
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 0f07f23a88..c3c763fee8 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -122,6 +122,8 @@ static bool GetIdFromInstruction(Collections& collections,
// case Instruction::k3rms:
// case Instruction::k35mi:
// case Instruction::k3rmi:
+ case Instruction::k45cc:
+ case Instruction::k4rcc:
index = dec_insn->VRegB();
break;
case Instruction::k31c:
@@ -150,6 +152,7 @@ static bool GetIdFromInstruction(Collections& collections,
}
break;
case Instruction::kIndexMethodRef:
+ case Instruction::kIndexMethodAndProtoRef:
if (index < collections.MethodIdsSize()) {
method_ids->push_back(collections.GetMethodId(index));
return true;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index a9ae55fd8b..2b30a1be08 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -30,6 +30,7 @@
#include <sstream>
#include <vector>
+#include "base/stringprintf.h"
#include "dex_ir_builder.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
@@ -722,9 +723,11 @@ static void DumpLocalInfo(const dex_ir::CodeItem* code) {
static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
const Instruction* dec_insn,
size_t buf_size) {
+ static const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
std::unique_ptr<char[]> buf(new char[buf_size]);
// Determine index and width of the string.
uint32_t index = 0;
+ uint32_t secondary_index = kInvalidIndex;
uint32_t width = 4;
switch (Instruction::FormatOf(dec_insn->Opcode())) {
// SOME NOT SUPPORTED:
@@ -748,6 +751,12 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
index = dec_insn->VRegC();
width = 4;
break;
+ case Instruction::k45cc:
+ case Instruction::k4rcc:
+ index = dec_insn->VRegB();
+ secondary_index = dec_insn->VRegH();
+ width = 4;
+ break;
default:
break;
} // switch
@@ -815,6 +824,24 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
// SOME NOT SUPPORTED:
// case Instruction::kIndexVaries:
// case Instruction::kIndexInlineMethod:
+ case Instruction::kIndexMethodAndProtoRef: {
+ std::string method("<method?>");
+ std::string proto("<proto?>");
+ if (index < header->GetCollections().MethodIdsSize()) {
+ dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(index);
+ const char* name = method_id->Name()->Data();
+ std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+ method = StringPrintf("%s.%s:%s", back_descriptor, name, type_descriptor.c_str());
+ }
+ if (secondary_index < header->GetCollections().ProtoIdsSize()) {
+ dex_ir::ProtoId* proto_id = header->GetCollections().GetProtoId(secondary_index);
+ proto = GetSignatureForProtoId(proto_id);
+ }
+ outSize = snprintf(buf.get(), buf_size, "%s, %s // method@%0*x, proto@%0*x",
+ method.c_str(), proto.c_str(), width, index, width, secondary_index);
+ }
+ break;
default:
outSize = snprintf(buf.get(), buf_size, "<?>");
break;
@@ -984,7 +1011,8 @@ static void DumpInstruction(dex_ir::Header* header, const dex_ir::CodeItem* code
case Instruction::k32x: // op vAAAA, vBBBB
fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
break;
- case Instruction::k35c: { // op {vC, vD, vE, vF, vG}, thing@BBBB
+ case Instruction::k35c: // op {vC, vD, vE, vF, vG}, thing@BBBB
+ case Instruction::k45cc: { // op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH
// NOT SUPPORTED:
// case Instruction::k35ms: // [opt] invoke-virtual+super
// case Instruction::k35mi: // [opt] inline invoke
@@ -1001,7 +1029,8 @@ static void DumpInstruction(dex_ir::Header* header, const dex_ir::CodeItem* code
fprintf(out_file_, "}, %s", index_buf.get());
break;
}
- case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
+ case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
+ case Instruction::k4rcc: // op {vCCCC .. v(CCCC+AA-1)}, meth@BBBB, proto@HHHH
// NOT SUPPORTED:
// case Instruction::k3rms: // [opt] invoke-virtual+super/range
// case Instruction::k3rmi: // [opt] execute-inline/range
@@ -1257,49 +1286,6 @@ static void DumpIField(dex_ir::Header* header, uint32_t idx, uint32_t flags, int
}
/*
- * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item
- * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a
- * tool, so this is not performance-critical.
- */
-
-static void DumpCFG(const DexFile* dex_file,
- uint32_t dex_method_idx,
- const DexFile::CodeItem* code) {
- if (code != nullptr) {
- std::ostringstream oss;
- DumpMethodCFG(dex_file, dex_method_idx, oss);
- fprintf(out_file_, "%s", oss.str().c_str());
- }
-}
-
-static void DumpCFG(const DexFile* dex_file, int idx) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == nullptr) { // empty class such as a marker interface?
- return;
- }
- ClassDataItemIterator it(*dex_file, class_data);
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
- while (it.HasNextDirectMethod()) {
- DumpCFG(dex_file,
- it.GetMemberIndex(),
- it.GetMethodCodeItem());
- it.Next();
- }
- while (it.HasNextVirtualMethod()) {
- DumpCFG(dex_file,
- it.GetMemberIndex(),
- it.GetMethodCodeItem());
- it.Next();
- }
-}
-
-/*
* Dumps the class.
*
* Note "idx" is a DexClassDef index, not a DexTypeId index.
@@ -1307,10 +1293,7 @@ static void DumpCFG(const DexFile* dex_file, int idx) {
* If "*last_package" is nullptr or does not match the current class' package,
* the value will be replaced with a newly-allocated string.
*/
-static void DumpClass(const DexFile* dex_file,
- dex_ir::Header* header,
- int idx,
- char** last_package) {
+static void DumpClass(dex_ir::Header* header, int idx, char** last_package) {
dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx);
// Omitting non-public class.
if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) {
@@ -1325,11 +1308,6 @@ static void DumpClass(const DexFile* dex_file,
DumpClassAnnotations(header, idx);
}
- if (options_.show_cfg_) {
- DumpCFG(dex_file, idx);
- return;
- }
-
// For the XML output, show the package name. Ideally we'd gather
// up the classes, sort them, and dump them alphabetically so the
// package name wouldn't jump around, but that's not a great plan
@@ -1532,7 +1510,7 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file, size_
char* package = nullptr;
const uint32_t class_defs_size = header->GetCollections().ClassDefsSize();
for (uint32_t i = 0; i < class_defs_size; i++) {
- DumpClass(dex_file, header.get(), i, &package);
+ DumpClass(header.get(), i, &package);
} // for
// Free the last package allocated.
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index c01eb79ecf..a5bd99284e 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -44,7 +44,6 @@ struct Options {
bool exports_only_;
bool ignore_bad_checksum_;
bool show_annotations_;
- bool show_cfg_;
bool show_file_headers_;
bool show_section_headers_;
bool verbose_;
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 2203fba325..825dd50355 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -51,7 +51,6 @@ static void Usage(void) {
fprintf(stderr, " -d : disassemble code sections\n");
fprintf(stderr, " -e : display exported items only\n");
fprintf(stderr, " -f : display summary information from file header\n");
- fprintf(stderr, " -g : display CFG for dex\n");
fprintf(stderr, " -h : display file header details\n");
fprintf(stderr, " -i : ignore checksum failures\n");
fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
@@ -99,9 +98,6 @@ int DexlayoutDriver(int argc, char** argv) {
case 'f': // display outer file header
options_.show_file_headers_ = true;
break;
- case 'g': // display cfg
- options_.show_cfg_ = true;
- break;
case 'h': // display section headers, i.e. all meta-data
options_.show_section_headers_ = true;
break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c7bf231e03..da0db01386 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -64,6 +64,7 @@
#include "string_reference.h"
#include "thread_list.h"
#include "type_lookup_table.h"
+#include "vdex_file.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -114,13 +115,13 @@ class OatSymbolizer FINAL {
bool Symbolize() {
const InstructionSet isa = oat_file_->GetOatHeader().GetInstructionSet();
- const InstructionSetFeatures* features = InstructionSetFeatures::FromBitmap(
+ std::unique_ptr<const InstructionSetFeatures> features = InstructionSetFeatures::FromBitmap(
isa, oat_file_->GetOatHeader().GetInstructionSetFeaturesBitmap());
File* elf_file = OS::CreateEmptyFile(output_name_.c_str());
std::unique_ptr<BufferedOutputStream> output_stream(
MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file)));
- builder_.reset(new ElfBuilder<ElfTypes>(isa, features, output_stream.get()));
+ builder_.reset(new ElfBuilder<ElfTypes>(isa, features.get(), output_stream.get()));
builder_->Start();
@@ -1029,13 +1030,19 @@ class OatDumper {
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable());
}
- uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
+ uint32_t vmap_table_offset = method_header == nullptr ? 0 : method_header->vmap_table_offset_;
vios->Stream() << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
- if (vmap_table_offset > oat_file_.Size()) {
+
+ size_t vmap_table_offset_limit =
+ (kIsVdexEnabled && IsMethodGeneratedByDexToDexCompiler(oat_method, code_item))
+ ? oat_file_.GetVdexFile()->Size()
+ : method_header->GetCode() - oat_file_.Begin();
+ if (vmap_table_offset >= vmap_table_offset_limit) {
vios->Stream() << StringPrintf("WARNING: "
"vmap table offset 0x%08x is past end of file 0x%08zx. "
"vmap table offset was loaded from offset 0x%08x.\n",
- vmap_table_offset, oat_file_.Size(),
+ vmap_table_offset,
+ vmap_table_offset_limit,
oat_method.GetVmapTableOffsetOffset());
success = false;
} else if (options_.dump_vmap_) {
diff --git a/profman/profman.cc b/profman/profman.cc
index b17816be0c..bfef834bd9 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -354,7 +354,7 @@ class ProfMan FINAL {
}
int GenerateTestProfile() {
- int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY);
+ int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (profile_test_fd < 0) {
std::cerr << "Cannot open " << test_profile_ << strerror(errno);
return -1;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 6945eb0802..b49857381b 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -565,7 +565,6 @@ art_cc_test {
"utils_test.cc",
"verifier/method_verifier_test.cc",
"verifier/reg_type_test.cc",
- "verifier/verifier_deps_test.cc",
"zip_archive_test.cc",
],
shared_libs: [
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index c3a5829979..c81a93c368 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -33,7 +33,7 @@ extern "C" bool artCheckForArmSdivInstruction();
namespace art {
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
// Assume all ARM processors are SMP.
// TODO: set the SMP support based on variant.
@@ -69,7 +69,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants),
variant)) {
*error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
- return nullptr;
+ return ArmFeaturesUniquePtr();
}
// Warn if the variant is unknown.
// TODO: some of the variants below may have feature support, but that support is currently
@@ -97,17 +97,17 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
<< ") using conservative defaults";
}
}
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool has_div = (bitmap & kDivBitfield) != 0;
bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
- return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() {
const bool smp = true;
#if defined(__ARM_ARCH_EXT_IDIV__)
const bool has_div = true;
@@ -119,10 +119,10 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
#else
const bool has_lpae = false;
#endif
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -157,10 +157,10 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() {
bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
bool has_div = false;
@@ -180,7 +180,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
}
#endif
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
@@ -198,7 +198,7 @@ static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATT
#endif
}
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromAssembly() {
const bool smp = true;
// See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
@@ -226,7 +226,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
#else
const bool has_lpae = false;
#endif
- return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+ return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
}
bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
@@ -265,7 +265,8 @@ std::string ArmInstructionSetFeatures::GetFeatureString() const {
return result;
}
-const InstructionSetFeatures* ArmInstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+ArmInstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
bool has_div = has_div_;
@@ -284,7 +285,8 @@ const InstructionSetFeatures* ArmInstructionSetFeatures::AddFeaturesFromSplitStr
return nullptr;
}
}
- return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd));
}
} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index 221bf1fbc4..204d1d76cc 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -21,29 +21,31 @@
namespace art {
+class ArmInstructionSetFeatures;
+using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
+
// Instruction set features relevant to the ARM architecture.
class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static ArmFeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const ArmInstructionSetFeatures* FromCppDefines();
+ static ArmFeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromCpuInfo();
+ static ArmFeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromHwcap();
+ static ArmFeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const ArmInstructionSetFeatures* FromAssembly();
+ static ArmFeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -71,7 +73,7 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
protected:
// Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index cad13b29d9..4e7dea3f48 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -19,12 +19,13 @@
#include <fstream>
#include <sstream>
+#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
namespace art {
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
const bool smp = true; // Conservative default.
@@ -52,22 +53,23 @@ const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
// The variants that need a fix for 843419 are the same that need a fix for 835769.
bool needs_a53_843419_fix = needs_a53_835769_fix;
- return new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix);
+ return Arm64FeaturesUniquePtr(
+ new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool is_a53 = (bitmap & kA53Bitfield) != 0;
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCppDefines() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() {
const bool smp = true;
const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCpuInfo() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -89,16 +91,16 @@ const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCpuInfo() {
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromHwcap() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromHwcap() {
bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromAssembly() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
@@ -130,7 +132,8 @@ std::string Arm64InstructionSetFeatures::GetFeatureString() const {
return result;
}
-const InstructionSetFeatures* Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool is_a53 = fix_cortex_a53_835769_;
for (auto i = features.begin(); i != features.end(); i++) {
@@ -144,7 +147,8 @@ const InstructionSetFeatures* Arm64InstructionSetFeatures::AddFeaturesFromSplitS
return nullptr;
}
}
- return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
}
} // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index abd7e83248..e51aa1c43d 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -21,29 +21,31 @@
namespace art {
+class Arm64InstructionSetFeatures;
+using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
+
// Instruction set features relevant to the ARM64 architecture.
class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static Arm64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const Arm64InstructionSetFeatures* FromCppDefines();
+ static Arm64FeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromCpuInfo();
+ static Arm64FeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const Arm64InstructionSetFeatures* FromHwcap();
+ static Arm64FeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const Arm64InstructionSetFeatures* FromAssembly();
+ static Arm64FeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -70,7 +72,7 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
protected:
// Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 898f83a8d8..b32391f6b0 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -29,29 +29,28 @@
namespace art {
-const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg) {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariant(
+ InstructionSet isa, const std::string& variant, std::string* error_msg) {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (isa) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(ArmInstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(Arm64InstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(MipsInstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
break;
case kX86:
- result = X86InstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(X86InstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
+ result.reset(X86_64InstructionSetFeatures::FromVariant(variant, error_msg).release());
break;
default:
UNIMPLEMENTED(FATAL) << isa;
@@ -61,28 +60,28 @@ const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
- uint32_t bitmap) {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap(InstructionSet isa,
+ uint32_t bitmap) {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (isa) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(ArmInstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(Arm64InstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(MipsInstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
break;
case kX86:
- result = X86InstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(X86InstructionSetFeatures::FromBitmap(bitmap).release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
+ result.reset(X86_64InstructionSetFeatures::FromBitmap(bitmap).release());
break;
default:
UNIMPLEMENTED(FATAL) << isa;
@@ -92,27 +91,27 @@ const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDefines() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromCppDefines();
+ result.reset(ArmInstructionSetFeatures::FromCppDefines().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromCppDefines();
+ result.reset(Arm64InstructionSetFeatures::FromCppDefines().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromCppDefines();
+ result.reset(MipsInstructionSetFeatures::FromCppDefines().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromCppDefines();
break;
case kX86:
- result = X86InstructionSetFeatures::FromCppDefines();
+ result.reset(X86InstructionSetFeatures::FromCppDefines().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromCppDefines();
+ result.reset(X86_64InstructionSetFeatures::FromCppDefines().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -122,27 +121,27 @@ const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
}
-const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromCpuInfo();
+ result.reset(ArmInstructionSetFeatures::FromCpuInfo().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromCpuInfo();
+ result.reset(Arm64InstructionSetFeatures::FromCpuInfo().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromCpuInfo();
+ result.reset(MipsInstructionSetFeatures::FromCpuInfo().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromCpuInfo();
break;
case kX86:
- result = X86InstructionSetFeatures::FromCpuInfo();
+ result.reset(X86InstructionSetFeatures::FromCpuInfo().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromCpuInfo();
+ result.reset(X86_64InstructionSetFeatures::FromCpuInfo().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -151,27 +150,27 @@ const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromHwcap();
+ result.reset(ArmInstructionSetFeatures::FromHwcap().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromHwcap();
+ result.reset(Arm64InstructionSetFeatures::FromHwcap().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromHwcap();
+ result.reset(MipsInstructionSetFeatures::FromHwcap().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromHwcap();
break;
case kX86:
- result = X86InstructionSetFeatures::FromHwcap();
+ result.reset(X86InstructionSetFeatures::FromHwcap().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromHwcap();
+ result.reset(X86_64InstructionSetFeatures::FromHwcap().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -180,27 +179,27 @@ const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
- const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromAssembly() {
+ std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result = ArmInstructionSetFeatures::FromAssembly();
+ result.reset(ArmInstructionSetFeatures::FromAssembly().release());
break;
case kArm64:
- result = Arm64InstructionSetFeatures::FromAssembly();
+ result.reset(Arm64InstructionSetFeatures::FromAssembly().release());
break;
case kMips:
- result = MipsInstructionSetFeatures::FromAssembly();
+ result.reset(MipsInstructionSetFeatures::FromAssembly().release());
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromAssembly();
break;
case kX86:
- result = X86InstructionSetFeatures::FromAssembly();
+ result.reset(X86InstructionSetFeatures::FromAssembly().release());
break;
case kX86_64:
- result = X86_64InstructionSetFeatures::FromAssembly();
+ result.reset(X86_64InstructionSetFeatures::FromAssembly().release());
break;
default:
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -209,11 +208,11 @@ const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
return result;
}
-const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeaturesFromString(
const std::string& feature_list, std::string* error_msg) const {
if (feature_list.empty()) {
*error_msg = "No instruction set features specified";
- return nullptr;
+ return std::unique_ptr<const InstructionSetFeatures>();
}
std::vector<std::string> features;
Split(feature_list, ',', &features);
@@ -223,7 +222,7 @@ const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
for (auto it = features.begin(); it != features.end();) {
if (use_default) {
*error_msg = "Unexpected instruction set features after 'default'";
- return nullptr;
+ return std::unique_ptr<const InstructionSetFeatures>();
}
std::string feature = Trim(*it);
bool erase = false;
@@ -233,7 +232,7 @@ const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
erase = true;
} else {
*error_msg = "Unexpected instruction set features before 'default'";
- return nullptr;
+ return std::unique_ptr<const InstructionSetFeatures>();
}
} else if (feature == "smp") {
smp = true;
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index d10ae21ae3..d84bc02495 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
#define ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
+#include <memory>
#include <ostream>
#include <vector>
@@ -36,31 +37,32 @@ class X86_64InstructionSetFeatures;
class InstructionSetFeatures {
public:
// Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg);
+ static std::unique_ptr<const InstructionSetFeatures> FromVariant(InstructionSet isa,
+ const std::string& variant,
+ std::string* error_msg);
// Parse a bitmap for the given isa and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
+ static std::unique_ptr<const InstructionSetFeatures> FromBitmap(InstructionSet isa,
+ uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
- static const InstructionSetFeatures* FromCppDefines();
+ static std::unique_ptr<const InstructionSetFeatures> FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const InstructionSetFeatures* FromCpuInfo();
+ static std::unique_ptr<const InstructionSetFeatures> FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const InstructionSetFeatures* FromHwcap();
+ static std::unique_ptr<const InstructionSetFeatures> FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const InstructionSetFeatures* FromAssembly();
+ static std::unique_ptr<const InstructionSetFeatures> FromAssembly();
// Parse a string of the form "div,-atomic_ldrd_strd" adding and removing these features to
// create a new InstructionSetFeatures.
- const InstructionSetFeatures* AddFeaturesFromString(const std::string& feature_list,
- std::string* error_msg) const WARN_UNUSED;
+ std::unique_ptr<const InstructionSetFeatures> AddFeaturesFromString(
+ const std::string& feature_list, std::string* error_msg) const WARN_UNUSED;
// Are these features the same as the other given features?
virtual bool Equals(const InstructionSetFeatures* other) const = 0;
@@ -107,7 +109,7 @@ class InstructionSetFeatures {
const std::string& variant);
// Add architecture specific features in sub-classes.
- virtual const InstructionSetFeatures*
+ virtual std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(bool smp, const std::vector<std::string>& features,
std::string* error_msg) const = 0;
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index b3a98667d0..a95b6f604c 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -19,6 +19,7 @@
#include <fstream>
#include <sstream>
+#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -63,7 +64,7 @@ static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bi
}
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
bool smp = true; // Conservative default.
@@ -97,18 +98,19 @@ const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant;
}
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(
+ uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
bool r6 = (bitmap & kR6) != 0;
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() {
// Assume conservative defaults.
const bool smp = true;
@@ -117,10 +119,10 @@ const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
bool r6;
GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCpuInfo() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
// Assume conservative defaults.
@@ -147,15 +149,15 @@ const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCpuInfo() {
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromHwcap() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromAssembly() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromAssembly() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
@@ -201,7 +203,8 @@ std::string MipsInstructionSetFeatures::GetFeatureString() const {
return result;
}
-const InstructionSetFeatures* MipsInstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+MipsInstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool fpu_32bit = fpu_32bit_;
bool mips_isa_gte2 = mips_isa_gte2_;
@@ -225,7 +228,8 @@ const InstructionSetFeatures* MipsInstructionSetFeatures::AddFeaturesFromSplitSt
return nullptr;
}
}
- return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+ return std::unique_ptr<const InstructionSetFeatures>(
+ new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
}
} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 2d54988683..c2a28dc7fa 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -23,29 +23,31 @@
namespace art {
+class MipsInstructionSetFeatures;
+using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
+
// Instruction set features relevant to the MIPS architecture.
class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static MipsFeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const MipsInstructionSetFeatures* FromCppDefines();
+ static MipsFeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromCpuInfo();
+ static MipsFeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const MipsInstructionSetFeatures* FromHwcap();
+ static MipsFeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const MipsInstructionSetFeatures* FromAssembly();
+ static MipsFeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -77,7 +79,7 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
protected:
// Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
- virtual const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 5c0c91422b..490a8d2df3 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -24,27 +24,27 @@
namespace art {
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromVariant(
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
if (variant != "default" && variant != "mips64r6") {
LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
}
bool smp = true; // Conservative default.
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool smp = (bitmap & kSmpBitfield) != 0;
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCppDefines() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() {
const bool smp = true;
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCpuInfo() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -65,15 +65,15 @@ const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCpuInfo()
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- return new Mips64InstructionSetFeatures(smp);
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromHwcap() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromAssembly() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromAssembly() {
UNIMPLEMENTED(WARNING);
return FromCppDefines();
}
@@ -99,7 +99,8 @@ std::string Mips64InstructionSetFeatures::GetFeatureString() const {
return result;
}
-const InstructionSetFeatures* Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
auto i = features.begin();
if (i != features.end()) {
@@ -108,7 +109,7 @@ const InstructionSetFeatures* Mips64InstructionSetFeatures::AddFeaturesFromSplit
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
- return new Mips64InstructionSetFeatures(smp);
+ return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(smp));
}
} // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index d5d6012007..2e66235506 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -21,29 +21,32 @@
namespace art {
+class Mips64InstructionSetFeatures;
+using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
+
// Instruction set features relevant to the MIPS64 architecture.
class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
public:
// Process a CPU variant string like "r4000" and create InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
+ static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
+ std::string* error_msg);
// Parse a bitmap and create an InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+ static Mips64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const Mips64InstructionSetFeatures* FromCppDefines();
+ static Mips64FeaturesUniquePtr FromCppDefines();
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromCpuInfo();
+ static Mips64FeaturesUniquePtr FromCpuInfo();
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const Mips64InstructionSetFeatures* FromHwcap();
+ static Mips64FeaturesUniquePtr FromHwcap();
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const Mips64InstructionSetFeatures* FromAssembly();
+ static Mips64FeaturesUniquePtr FromAssembly();
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -59,8 +62,9 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
protected:
// Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
- virtual const InstructionSetFeatures*
- AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::unique_ptr<const InstructionSetFeatures>
+ AddFeaturesFromSplitString(const bool smp,
+ const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE;
private:
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 0093e82008..90b55a97f6 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -49,7 +49,34 @@ static constexpr const char* x86_variants_with_popcnt[] = {
"silvermont",
};
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
+X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
+ bool smp,
+ bool has_SSSE3,
+ bool has_SSE4_1,
+ bool has_SSE4_2,
+ bool has_AVX,
+ bool has_AVX2,
+ bool has_POPCNT) {
+ if (x86_64) {
+ return X86FeaturesUniquePtr(new X86_64InstructionSetFeatures(smp,
+ has_SSSE3,
+ has_SSE4_1,
+ has_SSE4_2,
+ has_AVX,
+ has_AVX2,
+ has_POPCNT));
+ } else {
+ return X86FeaturesUniquePtr(new X86InstructionSetFeatures(smp,
+ has_SSSE3,
+ has_SSE4_1,
+ has_SSE4_2,
+ has_AVX,
+ has_AVX2,
+ has_POPCNT));
+ }
+}
+
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED,
bool x86_64) {
bool smp = true; // Conservative default.
@@ -75,17 +102,10 @@ const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant;
}
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t bitmap,
- bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromBitmap(uint32_t bitmap, bool x86_64) {
bool smp = (bitmap & kSmpBitfield) != 0;
bool has_SSSE3 = (bitmap & kSsse3Bitfield) != 0;
bool has_SSE4_1 = (bitmap & kSse4_1Bitfield) != 0;
@@ -93,16 +113,10 @@ const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t
bool has_AVX = (bitmap & kAvxBitfield) != 0;
bool has_AVX2 = (bitmap & kAvxBitfield) != 0;
bool has_POPCNT = (bitmap & kPopCntBitfield) != 0;
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
- has_AVX, has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
- has_AVX, has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
const bool smp = true;
#ifndef __SSSE3__
@@ -141,16 +155,10 @@ const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool
const bool has_POPCNT = true;
#endif
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
// Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
// the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
bool smp = false;
@@ -198,21 +206,15 @@ const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCpuInfo(bool x86
} else {
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromHwcap(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromHwcap(bool x86_64) {
UNIMPLEMENTED(WARNING);
return FromCppDefines(x86_64);
}
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromAssembly(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromAssembly(bool x86_64) {
UNIMPLEMENTED(WARNING);
return FromCppDefines(x86_64);
}
@@ -281,7 +283,7 @@ std::string X86InstructionSetFeatures::GetFeatureString() const {
return result;
}
-const InstructionSetFeatures* X86InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures> X86InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, bool x86_64,
std::string* error_msg) const {
bool has_SSSE3 = has_SSSE3_;
@@ -321,13 +323,7 @@ const InstructionSetFeatures* X86InstructionSetFeatures::AddFeaturesFromSplitStr
return nullptr;
}
}
- if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- } else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2, has_POPCNT);
- }
+ return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
}
} // namespace art
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 2aa8ae6055..672892e5a5 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -21,30 +21,34 @@
namespace art {
+class X86InstructionSetFeatures;
+using X86FeaturesUniquePtr = std::unique_ptr<const X86InstructionSetFeatures>;
+
// Instruction set features relevant to the X86 architecture.
class X86InstructionSetFeatures : public InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg,
- bool x86_64 = false);
+ static X86FeaturesUniquePtr FromVariant(const std::string& variant,
+ std::string* error_msg,
+ bool x86_64 = false);
// Parse a bitmap and create an InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromBitmap(uint32_t bitmap, bool x86_64 = false);
+ static X86FeaturesUniquePtr FromBitmap(uint32_t bitmap,
+ bool x86_64 = false);
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const X86InstructionSetFeatures* FromCppDefines(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromCppDefines(bool x86_64 = false);
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromCpuInfo(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromCpuInfo(bool x86_64 = false);
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const X86InstructionSetFeatures* FromHwcap(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromHwcap(bool x86_64 = false);
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const X86InstructionSetFeatures* FromAssembly(bool x86_64 = false);
+ static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
@@ -64,13 +68,13 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
- virtual const InstructionSetFeatures*
+ virtual std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE {
return AddFeaturesFromSplitString(smp, features, false, error_msg);
}
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
bool x86_64, std::string* error_msg) const;
@@ -85,6 +89,15 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
has_POPCNT_(has_POPCNT) {
}
+ static X86FeaturesUniquePtr Create(bool x86_64,
+ bool smp,
+ bool has_SSSE3,
+ bool has_SSE4_1,
+ bool has_SSE4_2,
+ bool has_AVX,
+ bool has_AVX2,
+ bool has_POPCNT);
+
private:
// Bitmap positions for encoding features as a bitmap.
enum {
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index 0840f89a21..bc0f708e20 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -21,41 +21,42 @@
namespace art {
+class X86_64InstructionSetFeatures;
+using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
+
// Instruction set features relevant to the X86_64 architecture.
class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
public:
// Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg) {
- return X86InstructionSetFeatures::FromVariant(variant, error_msg, true)
- ->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
+ return Convert(X86InstructionSetFeatures::FromVariant(variant, error_msg, true));
}
// Parse a bitmap and create an InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromBitmap(uint32_t bitmap) {
- return X86InstructionSetFeatures::FromBitmap(bitmap, true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromBitmap(uint32_t bitmap) {
+ return Convert(X86InstructionSetFeatures::FromBitmap(bitmap, true));
}
// Turn C pre-processor #defines into the equivalent instruction set features.
- static const X86_64InstructionSetFeatures* FromCppDefines() {
- return X86InstructionSetFeatures::FromCppDefines(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromCppDefines() {
+ return Convert(X86InstructionSetFeatures::FromCppDefines(true));
}
// Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromCpuInfo() {
- return X86InstructionSetFeatures::FromCpuInfo(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromCpuInfo() {
+ return Convert(X86InstructionSetFeatures::FromCpuInfo(true));
}
// Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
// InstructionSetFeatures.
- static const X86_64InstructionSetFeatures* FromHwcap() {
- return X86InstructionSetFeatures::FromHwcap(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromHwcap() {
+ return Convert(X86InstructionSetFeatures::FromHwcap(true));
}
// Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
// InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const X86_64InstructionSetFeatures* FromAssembly() {
- return X86InstructionSetFeatures::FromAssembly(true)->AsX86_64InstructionSetFeatures();
+ static X86_64FeaturesUniquePtr FromAssembly() {
+ return Convert(X86InstructionSetFeatures::FromAssembly(true));
}
InstructionSet GetInstructionSet() const OVERRIDE {
@@ -66,7 +67,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
- const InstructionSetFeatures*
+ std::unique_ptr<const InstructionSetFeatures>
AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
std::string* error_msg) const OVERRIDE {
return X86InstructionSetFeatures::AddFeaturesFromSplitString(smp, features, true, error_msg);
@@ -79,6 +80,10 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
has_AVX2, has_POPCNT) {
}
+ static X86_64FeaturesUniquePtr Convert(X86FeaturesUniquePtr&& in) {
+ return X86_64FeaturesUniquePtr(in.release()->AsX86_64InstructionSetFeatures());
+ }
+
friend class X86InstructionSetFeatures;
DISALLOW_COPY_AND_ASSIGN(X86_64InstructionSetFeatures);
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 3065f68fc4..c550a1b6bd 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -468,7 +468,18 @@ const uint8_t* ArtMethod::GetQuickenedInfo(PointerSize pointer_size) {
if (!found || (oat_method.GetQuickCode() != nullptr)) {
return nullptr;
}
- return oat_method.GetVmapTable();
+ if (kIsVdexEnabled) {
+ const OatQuickMethodHeader* header = oat_method.GetOatQuickMethodHeader();
+ // OatMethod without a header: no quickening table.
+ if (header == nullptr) {
+ return nullptr;
+ }
+ // The table is in the .vdex file.
+ const OatFile::OatDexFile* oat_dex_file = GetDexCache()->GetDexFile()->GetOatDexFile();
+ return oat_dex_file->GetOatFile()->DexBegin() + header->vmap_table_offset_;
+ } else {
+ return oat_method.GetVmapTable();
+ }
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
diff --git a/runtime/base/time_utils.h b/runtime/base/time_utils.h
index 55d2764576..383b52fb33 100644
--- a/runtime/base/time_utils.h
+++ b/runtime/base/time_utils.h
@@ -73,9 +73,11 @@ static constexpr inline uint64_t MsToNs(uint64_t ms) {
}
#if defined(__APPLE__)
-// No clocks to specify on OS/X, fake value to pass to routines that require a clock.
+#ifndef CLOCK_REALTIME
+// No clocks to specify on OS/X < 10.12, fake value to pass to routines that require a clock.
#define CLOCK_REALTIME 0xebadf00d
#endif
+#endif
// Sleep for the given number of nanoseconds, a bad way to handle contention.
void NanoSleep(uint64_t ns);
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index a1ce30b3fc..5399dc5206 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -277,7 +277,7 @@ class ScopedCheck {
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o.Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("field operation on invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_object)),
java_object);
return false;
}
@@ -632,17 +632,17 @@ class ScopedCheck {
bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
- found_kind = GetIndirectRefKind(obj);
+ found_kind = IndirectReferenceTable::GetIndirectRefKind(obj);
if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
found_kind = kLocal;
}
} else {
- found_kind = GetIndirectRefKind(obj);
+ found_kind = IndirectReferenceTable::GetIndirectRefKind(obj);
}
if (obj != nullptr && found_kind != expected_kind) {
AbortF("expected reference of kind %s but found %s: %p",
- ToStr<IndirectRefKind>(expected_kind).c_str(),
- ToStr<IndirectRefKind>(GetIndirectRefKind(obj)).c_str(),
+ GetIndirectRefKindString(expected_kind),
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(obj)),
obj);
return false;
}
@@ -773,7 +773,7 @@ class ScopedCheck {
// Either java_object is invalid or is a cleared weak.
IndirectRef ref = reinterpret_cast<IndirectRef>(java_object);
bool okay;
- if (GetIndirectRefKind(ref) != kWeakGlobal) {
+ if (IndirectReferenceTable::GetIndirectRefKind(ref) != kWeakGlobal) {
okay = false;
} else {
obj = soa.Vm()->DecodeWeakGlobal(soa.Self(), ref);
@@ -781,8 +781,10 @@ class ScopedCheck {
}
if (!okay) {
AbortF("%s is an invalid %s: %p (%p)",
- what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
- java_object, obj.Ptr());
+ what,
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_object)),
+ java_object,
+ obj.Ptr());
return false;
}
}
@@ -790,8 +792,10 @@ class ScopedCheck {
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj.Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("%s is an invalid %s: %p (%p)",
- what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
- java_object, obj.Ptr());
+ what,
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_object)),
+ java_object,
+ obj.Ptr());
return false;
}
@@ -1116,8 +1120,9 @@ class ScopedCheck {
if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a.Ptr()))) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("jarray is an invalid %s: %p (%p)",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
- java_array, a.Ptr());
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_array)),
+ java_array,
+ a.Ptr());
return false;
} else if (!a->IsArrayInstance()) {
AbortF("jarray argument has non-array type: %s", a->PrettyTypeOf().c_str());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cea8377c58..c23b1b1f1a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1180,8 +1180,7 @@ class FixupArtMethodArrayVisitor : public ArtMethodVisitor {
<< resolved_types << " is not in image starting at "
<< reinterpret_cast<void*>(header_.GetImageBegin());
if (!is_copied || in_image_space) {
- // Go through the array so that we don't need to do a slow map lookup.
- method->SetDexCacheResolvedTypes(*reinterpret_cast<GcRoot<mirror::Class>**>(resolved_types),
+ method->SetDexCacheResolvedTypes(method->GetDexCache()->GetResolvedTypes(),
kRuntimePointerSize);
}
}
@@ -1197,8 +1196,7 @@ class FixupArtMethodArrayVisitor : public ArtMethodVisitor {
<< resolved_methods << " is not in image starting at "
<< reinterpret_cast<void*>(header_.GetImageBegin());
if (!is_copied || in_image_space) {
- // Go through the array so that we don't need to do a slow map lookup.
- method->SetDexCacheResolvedMethods(*reinterpret_cast<ArtMethod***>(resolved_methods),
+ method->SetDexCacheResolvedMethods(method->GetDexCache()->GetResolvedMethods(),
kRuntimePointerSize);
}
}
@@ -1241,6 +1239,20 @@ class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
gc::accounting::HeapBitmap* const live_bitmap_;
};
+// Copies data from one array to another array at the same position
+// if pred returns false. If there is a page of continuous data in
+// the src array for which pred consistently returns true then
+// corresponding page in the dst array will not be touched.
+// This should reduce number of allocated physical pages.
+template <class T, class NullPred>
+static void CopyNonNull(const T* src, size_t count, T* dst, const NullPred& pred) {
+ for (size_t i = 0; i < count; ++i) {
+ if (!pred(src[i])) {
+ dst[i] = src[i];
+ }
+ }
+}
+
bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1283,7 +1295,7 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
}
// Only add the classes to the class loader after the points where we can return false.
for (size_t i = 0; i < num_dex_caches; i++) {
- ObjPtr<mirror::DexCache> const dex_cache = dex_caches->Get(i);
+ ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
const DexFile* const dex_file = dex_cache->GetDexFile();
const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) {
@@ -1332,11 +1344,12 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) {
DCHECK(types[j].IsNull());
}
- std::copy_n(image_resolved_types, num_types, types);
- // Store a pointer to the new location for fast ArtMethod patching without requiring map.
- // This leaves random garbage at the start of the dex cache array, but nobody should ever
- // read from it again.
- *reinterpret_cast<GcRoot<mirror::Class>**>(image_resolved_types) = types;
+ CopyNonNull(image_resolved_types,
+ num_types,
+ types,
+ [](const GcRoot<mirror::Class>& elem) {
+ return elem.IsNull();
+ });
dex_cache->SetResolvedTypes(types);
}
if (num_methods != 0u) {
@@ -1346,9 +1359,12 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
for (size_t j = 0; kIsDebugBuild && j < num_methods; ++j) {
DCHECK(methods[j] == nullptr);
}
- std::copy_n(image_resolved_methods, num_methods, methods);
- // Store a pointer to the new location for fast ArtMethod patching without requiring map.
- *reinterpret_cast<ArtMethod***>(image_resolved_methods) = methods;
+ CopyNonNull(image_resolved_methods,
+ num_methods,
+ methods,
+ [] (const ArtMethod* method) {
+ return method == nullptr;
+ });
dex_cache->SetResolvedMethods(methods);
}
if (num_fields != 0u) {
@@ -1357,7 +1373,12 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
for (size_t j = 0; kIsDebugBuild && j < num_fields; ++j) {
DCHECK(fields[j] == nullptr);
}
- std::copy_n(dex_cache->GetResolvedFields(), num_fields, fields);
+ CopyNonNull(dex_cache->GetResolvedFields(),
+ num_fields,
+ fields,
+ [] (const ArtField* field) {
+ return field == nullptr;
+ });
dex_cache->SetResolvedFields(fields);
}
if (num_method_types != 0u) {
@@ -1391,7 +1412,11 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
/*allow_failure*/true);
CHECK(existing_dex_cache == nullptr);
StackHandleScope<1> hs3(self);
- RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache));
+ Handle<mirror::DexCache> h_dex_cache = hs3.NewHandle(dex_cache);
+ RegisterDexFileLocked(*dex_file, h_dex_cache);
+ if (kIsDebugBuild) {
+ dex_cache.Assign(h_dex_cache.Get()); // Update dex_cache, used below in debug build.
+ }
}
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
@@ -1781,6 +1806,12 @@ bool ClassLinker::AddImageSpace(
<< reinterpret_cast<const void*>(section_end);
}
}
+ if (!oat_file->GetBssGcRoots().empty()) {
+ // Insert oat file to class table for visiting .bss GC roots.
+ class_table->InsertOatFile(oat_file);
+ }
+ } else {
+ DCHECK(oat_file->GetBssGcRoots().empty());
}
if (added_class_table) {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -2235,8 +2266,7 @@ mirror::Class* ClassLinker::EnsureResolved(Thread* self,
}
CHECK(h_class->IsRetired());
// Get the updated class from class table.
- klass = LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor),
- h_class.Get()->GetClassLoader());
+ klass = LookupClass(self, descriptor, h_class.Get()->GetClassLoader());
}
// Wait for the class if it has not already been linked.
@@ -3242,6 +3272,10 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file,
WriterMutexLock mu(self, dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
+ // Another thread managed to initialize the dex cache faster, so use that DexCache.
+ // If this thread encountered OOME, ignore it.
+ DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
+ self->ClearException();
return dex_cache.Ptr();
}
if (h_dex_cache.Get() == nullptr) {
@@ -3696,9 +3730,8 @@ bool ClassLinker::AttemptSupertypeVerification(Thread* self,
return false;
}
-void ClassLinker::VerifyClass(Thread* self,
- Handle<mirror::Class> klass,
- verifier::HardFailLogMode log_level) {
+verifier::MethodVerifier::FailureKind ClassLinker::VerifyClass(
+ Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
{
// TODO: assert that the monitor on the Class is held
ObjectLock<mirror::Class> lock(self, klass);
@@ -3719,16 +3752,16 @@ void ClassLinker::VerifyClass(Thread* self,
// this class as a parent to another.
if (klass->IsErroneous()) {
ThrowEarlierClassFailure(klass.Get());
- return;
+ return verifier::MethodVerifier::kHardFailure;
}
// Don't attempt to re-verify if already sufficiently verified.
if (klass->IsVerified()) {
EnsureSkipAccessChecksMethods(klass);
- return;
+ return verifier::MethodVerifier::kNoFailure;
}
if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
- return;
+ return verifier::MethodVerifier::kNoFailure;
}
if (klass->GetStatus() == mirror::Class::kStatusResolved) {
@@ -3744,7 +3777,7 @@ void ClassLinker::VerifyClass(Thread* self,
if (!Runtime::Current()->IsVerificationEnabled()) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
EnsureSkipAccessChecksMethods(klass);
- return;
+ return verifier::MethodVerifier::kNoFailure;
}
}
@@ -3754,7 +3787,7 @@ void ClassLinker::VerifyClass(Thread* self,
// If we have a superclass and we get a hard verification failure we can return immediately.
if (supertype.Get() != nullptr && !AttemptSupertypeVerification(self, klass, supertype)) {
CHECK(self->IsExceptionPending()) << "Verification error should be pending.";
- return;
+ return verifier::MethodVerifier::kHardFailure;
}
// Verify all default super-interfaces.
@@ -3781,7 +3814,7 @@ void ClassLinker::VerifyClass(Thread* self,
} else if (UNLIKELY(!AttemptSupertypeVerification(self, klass, iface))) {
// We had a hard failure while verifying this interface. Just return immediately.
CHECK(self->IsExceptionPending()) << "Verification error should be pending.";
- return;
+ return verifier::MethodVerifier::kHardFailure;
} else if (UNLIKELY(!iface->IsVerified())) {
// We softly failed to verify the iface. Stop checking and clean up.
// Put the iface into the supertype handle so we know what caused us to fail.
@@ -3807,8 +3840,8 @@ void ClassLinker::VerifyClass(Thread* self,
// oat_file_class_status == mirror::Class::kStatusError => !preverified
DCHECK(!(oat_file_class_status == mirror::Class::kStatusError) || !preverified);
- verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
std::string error_msg;
+ verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
if (!preverified) {
Runtime* runtime = Runtime::Current();
verifier_failure = verifier::MethodVerifier::VerifyClass(self,
@@ -3881,6 +3914,7 @@ void ClassLinker::VerifyClass(Thread* self,
EnsureSkipAccessChecksMethods(klass);
}
}
+ return verifier_failure;
}
void ClassLinker::EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 239e973dba..e99dfe3b53 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -36,6 +36,7 @@
#include "jni.h"
#include "mirror/class.h"
#include "object_callbacks.h"
+#include "verifier/method_verifier.h"
#include "verifier/verifier_log_mode.h"
namespace art {
@@ -171,20 +172,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
- // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
- // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
- // class-loader chain could be handled, false otherwise, i.e., a non-supported class-loader
- // was encountered while walking the parent chain (currently only BootClassLoader and
- // PathClassLoader are supported).
- bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self,
- const char* descriptor,
- size_t hash,
- Handle<mirror::ClassLoader> class_loader,
- ObjPtr<mirror::Class>* result)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
-
// Finds a class by its descriptor using the "system" class loader, ie by searching the
// boot_class_path_.
mirror::Class* FindSystemClass(Thread* self, const char* descriptor)
@@ -215,10 +202,11 @@ class ClassLinker {
// by the given 'class_loader'.
mirror::Class* LookupClass(Thread* self,
const char* descriptor,
- size_t hash,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor), class_loader);
+ }
// Finds all the classes with the given descriptor, regardless of ClassLoader.
void LookupClasses(const char* descriptor, std::vector<ObjPtr<mirror::Class>>& classes)
@@ -483,9 +471,10 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- void VerifyClass(Thread* self,
- Handle<mirror::Class> klass,
- verifier::HardFailLogMode log_level = verifier::HardFailLogMode::kLogNone)
+ verifier::MethodVerifier::FailureKind VerifyClass(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ verifier::HardFailLogMode log_level = verifier::HardFailLogMode::kLogNone)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool VerifyClassUsingOatFile(const DexFile& dex_file,
@@ -804,6 +793,29 @@ class ClassLinker {
void FixupStaticTrampolines(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
+ // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
+ // class-loader chain could be handled, false otherwise, i.e., a non-supported class-loader
+ // was encountered while walking the parent chain (currently only BootClassLoader and
+ // PathClassLoader are supported).
+ bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ Thread* self,
+ const char* descriptor,
+ size_t hash,
+ Handle<mirror::ClassLoader> class_loader,
+ ObjPtr<mirror::Class>* result)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_);
+
+ // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
+ // by the given 'class_loader'. Uses the provided hash for the descriptor.
+ mirror::Class* LookupClass(Thread* self,
+ const char* descriptor,
+ size_t hash,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1187,6 +1199,7 @@ class ClassLinker {
friend struct CompilationHelper; // For Compile in ImageTest.
friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
+ friend class VMClassLoader; // for LookupClass and FindClassInPathClassLoader.
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
friend class JniInternalTest; // for GetRuntimeQuickGenericJniStub
ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for DexLock, and RegisterDexFileLocked
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 97c0abdda3..b44104e299 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -170,14 +170,27 @@ bool ClassTable::InsertStrongRoot(ObjPtr<mirror::Object> obj) {
const DexFile* dex_file = ObjPtr<mirror::DexCache>::DownCast(obj)->GetDexFile();
if (dex_file != nullptr && dex_file->GetOatDexFile() != nullptr) {
const OatFile* oat_file = dex_file->GetOatDexFile()->GetOatFile();
- if (!oat_file->GetBssGcRoots().empty() && !ContainsElement(oat_files_, oat_file)) {
- oat_files_.push_back(oat_file);
+ if (!oat_file->GetBssGcRoots().empty()) {
+ InsertOatFileLocked(oat_file); // Ignore return value.
}
}
}
return true;
}
+bool ClassTable::InsertOatFile(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), lock_);
+ return InsertOatFileLocked(oat_file);
+}
+
+bool ClassTable::InsertOatFileLocked(const OatFile* oat_file) {
+ if (ContainsElement(oat_files_, oat_file)) {
+ return false;
+ }
+ oat_files_.push_back(oat_file);
+ return true;
+}
+
size_t ClassTable::WriteToMemory(uint8_t* ptr) const {
ReaderMutexLock mu(Thread::Current(), lock_);
ClassSet combined;
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 1344990fbf..bc9eaf4c2d 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -141,6 +141,11 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return true if we inserted the oat file, false if it already exists.
+ bool InsertOatFile(const OatFile* oat_file)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
REQUIRES(!lock_)
@@ -168,6 +173,11 @@ class ClassTable {
private:
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
+ // Return true if we inserted the oat file, false if it already exists.
+ bool InsertOatFileLocked(const OatFile* oat_file)
+ REQUIRES(lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Lock to guard inserting and removing.
mutable ReaderWriterMutex lock_;
// We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index b0aba59b6d..0251776de9 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -686,6 +686,15 @@ void ThrowRuntimeException(const char* fmt, ...) {
va_end(args);
}
+// SecurityException
+
+void ThrowSecurityException(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ThrowException("Ljava/lang/SecurityException;", nullptr, fmt, &args);
+ va_end(args);
+}
+
// Stack overflow.
void ThrowStackOverflowError(Thread* self) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 5d0bc1211e..76ea2ae6c8 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -215,6 +215,12 @@ void ThrowRuntimeException(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+// SecurityException
+
+void ThrowSecurityException(const char* fmt, ...)
+ __attribute__((__format__(__printf__, 1, 2)))
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
// Stack overflow.
void ThrowStackOverflowError(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e2b8f5158e..1da888e4b7 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -551,6 +551,10 @@ void Dbg::SetJdwpAllowed(bool allowed) {
gJdwpAllowed = allowed;
}
+bool Dbg::IsJdwpAllowed() {
+ return gJdwpAllowed;
+}
+
DebugInvokeReq* Dbg::GetInvokeReq() {
return Thread::Current()->GetInvokeReq();
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 5d0315e9a1..3b4a5e16b0 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -202,6 +202,7 @@ std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rh
class Dbg {
public:
static void SetJdwpAllowed(bool allowed);
+ static bool IsJdwpAllowed();
static void StartJdwp();
static void StopJdwp();
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index 108a5af908..621b2c5f21 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -90,6 +90,10 @@ inline const Signature DexFile::GetMethodSignature(const MethodId& method_id) co
return Signature(this, GetProtoId(method_id.proto_idx_));
}
+inline const Signature DexFile::GetProtoSignature(const ProtoId& proto_id) const {
+ return Signature(this, proto_id);
+}
+
inline const char* DexFile::GetMethodName(const MethodId& method_id) const {
return StringDataByIdx(method_id.name_idx_);
}
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 20f3a9caea..da828dc403 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -639,6 +639,9 @@ class DexFile {
// Returns a representation of the signature of a method id.
const Signature GetMethodSignature(const MethodId& method_id) const;
+ // Returns a representation of the signature of a proto id.
+ const Signature GetProtoSignature(const ProtoId& proto_id) const;
+
// Returns the name of a method id.
const char* GetMethodName(const MethodId& method_id) const;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index f437fdef1c..8eb1a79420 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -126,13 +126,14 @@ class Instruction {
enum IndexType {
kIndexUnknown = 0,
- kIndexNone, // has no index
- kIndexTypeRef, // type reference index
- kIndexStringRef, // string reference index
- kIndexMethodRef, // method reference index
- kIndexFieldRef, // field reference index
- kIndexFieldOffset, // field offset (for static linked fields)
- kIndexVtableOffset // vtable offset (for static linked methods)
+ kIndexNone, // has no index
+ kIndexTypeRef, // type reference index
+ kIndexStringRef, // string reference index
+ kIndexMethodRef, // method reference index
+ kIndexFieldRef, // field reference index
+ kIndexFieldOffset, // field offset (for static linked fields)
+ kIndexVtableOffset, // vtable offset (for static linked methods)
+ kIndexMethodAndProtoRef // method and a proto reference index (for invoke-polymorphic)
};
enum Flags {
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 3194c1a86b..e537afe313 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -269,8 +269,8 @@
V(0xF7, UNUSED_F7, "unused-f7", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xF8, UNUSED_F8, "unused-f8", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xF9, UNUSED_F9, "unused-f9", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero | kExperimental) \
- V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kExperimental) \
+ V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero | kExperimental) \
+ V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kExperimental) \
V(0xFC, UNUSED_FC, "unused-fc", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFD, UNUSED_FD, "unused-fd", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, kVerifyError) \
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 31811fb96a..ed60f598d1 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -483,15 +483,15 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite);
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- mirror::Object** this_object,
+ ObjPtr<mirror::Object>* this_object,
ArtMethod* referrer,
Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
- mirror::Object* null_this = nullptr;
- HandleWrapper<mirror::Object> h_this(
+ ObjPtr<mirror::Object> null_this = nullptr;
+ HandleWrapperObjPtr<mirror::Object> h_this(
hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
constexpr ClassLinker::ResolveMode resolve_mode =
access_check ? ClassLinker::kForceICCECheck
@@ -560,7 +560,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
// defaults. What we actually need is a GetContainingClass that says which classes virtuals
// this method is coming from.
StackHandleScope<2> hs2(self);
- HandleWrapper<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
+ HandleWrapperObjPtr<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
Handle<mirror::Class> h_referring_class(hs2.NewHandle(referrer->GetDeclaringClass()));
const uint16_t method_type_idx =
h_referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
@@ -652,7 +652,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
- mirror::Object** this_object, \
+ ObjPtr<mirror::Object>* this_object, \
ArtMethod* referrer, \
Thread* self)
#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
@@ -722,8 +722,11 @@ inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFiel
}
// Fast path method resolution that can't throw exceptions.
-inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object,
- ArtMethod* referrer, bool access_check, InvokeType type) {
+inline ArtMethod* FindMethodFast(uint32_t method_idx,
+ ObjPtr<mirror::Object> this_object,
+ ArtMethod* referrer,
+ bool access_check,
+ InvokeType type) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
if (UNLIKELY(this_object == nullptr && type != kStatic)) {
return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index cbefbbac5d..1ccb4b004c 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -237,7 +237,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
}
}
-bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) {
+bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPayload* payload) {
DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("null array in FILL_ARRAY_DATA");
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 20c8401111..bcddfb0508 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -156,7 +156,7 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx,
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- mirror::Object** this_object,
+ ObjPtr<mirror::Object>* this_object,
ArtMethod* referrer,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -171,7 +171,7 @@ inline ArtField* FindFieldFast(uint32_t field_idx,
// Fast path method resolution that can't throw exceptions.
inline ArtMethod* FindMethodFast(uint32_t method_idx,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer,
bool access_check,
InvokeType type)
@@ -203,7 +203,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
-bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
+bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPayload* payload)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 4d47b83185..d4384182b2 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -75,6 +75,10 @@ extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread*
!dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
+ DCHECK(
+ !class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
+ << "Oat file with .bss GC roots was not registered in class table: "
+ << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
// Note that we emit the barrier before the compiled code stores the string as GC root.
// This is OK as there is no suspend point point in between.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 330c742354..670dadcd4d 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -15,13 +15,18 @@
*/
#include "art_method-inl.h"
+#include "base/casts.h"
#include "entrypoints/entrypoint_utils-inl.h"
+#include "indirect_reference_table.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
namespace art {
+static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
+static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
+
template <bool kDynamicFast>
static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
@@ -45,7 +50,7 @@ extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_o
extern uint32_t JniMethodFastStart(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
DCHECK(env != nullptr);
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
if (kIsDebugBuild) {
@@ -60,7 +65,7 @@ extern uint32_t JniMethodFastStart(Thread* self) {
extern uint32_t JniMethodStart(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
DCHECK(env != nullptr);
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
if (!native_method->IsFastNative()) {
@@ -117,7 +122,7 @@ static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
env->CheckNoHeldMonitors();
}
env->locals.SetSegmentState(env->local_ref_cookie);
- env->local_ref_cookie = saved_local_ref_cookie;
+ env->local_ref_cookie = bit_cast<IRTSegmentState>(saved_local_ref_cookie);
self->PopHandleScope();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 0bb65815bf..fe82878699 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2121,7 +2121,9 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
template<InvokeType type, bool access_check>
-static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
+static TwoWordReturn artInvokeCommon(uint32_t method_idx,
+ ObjPtr<mirror::Object> this_object,
+ Thread* self,
ArtMethod** sp) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
@@ -2136,7 +2138,9 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method,
+ method = FindMethodFromCode<type, access_check>(method_idx,
+ &this_object,
+ caller_method,
self);
visitor.FixupReferences();
}
@@ -2162,7 +2166,7 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o
#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>( \
- uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
+ uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
@@ -2190,9 +2194,13 @@ extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
}
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
- uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
+ uint32_t method_idx,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ Thread* self,
+ ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // For static, this_object is not required and may be random garbage. Don't pass it down so that
+ // it doesn't cause ObjPtr alignment failure check.
+ return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp);
}
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
@@ -2211,10 +2219,11 @@ extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
// is there for consistency but should not be used, as some architectures overwrite it
// in the assembly trampoline.
extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,
- mirror::Object* this_object,
+ mirror::Object* raw_this_object,
Thread* self,
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Object> this_object(raw_this_object);
ScopedQuickEntrypointChecks sqec(self);
StackHandleScope<1> hs(self);
Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
@@ -2285,7 +2294,9 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method,
+ method = FindMethodFromCode<kInterface, false>(dex_method_idx,
+ &this_object,
+ caller_method,
self);
visitor.FixupReferences();
}
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index f72f219a0d..6ff53597e4 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -50,13 +50,17 @@ static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* addre
}
template <bool kClearCard, typename Visitor>
-inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
- const Visitor& visitor, const uint8_t minimum_age) const {
+inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap,
+ uint8_t* const scan_begin,
+ uint8_t* const scan_end,
+ const Visitor& visitor,
+ const uint8_t minimum_age) {
DCHECK_GE(scan_begin, reinterpret_cast<uint8_t*>(bitmap->HeapBegin()));
// scan_end is the byte after the last byte we scan.
DCHECK_LE(scan_end, reinterpret_cast<uint8_t*>(bitmap->HeapLimit()));
- uint8_t* card_cur = CardFromAddr(scan_begin);
- uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+ uint8_t* const card_begin = CardFromAddr(scan_begin);
+ uint8_t* const card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
+ uint8_t* card_cur = card_begin;
CheckCardValid(card_cur);
CheckCardValid(card_end);
size_t cards_scanned = 0;
@@ -67,9 +71,6 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin
uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
++cards_scanned;
- if (kClearCard) {
- *card_cur = 0;
- }
}
++card_cur;
}
@@ -99,9 +100,6 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin
<< "card " << static_cast<size_t>(*card) << " intptr_t " << (start_word & 0xFF);
bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
++cards_scanned;
- if (kClearCard) {
- *card = 0;
- }
}
start_word >>= 8;
start += kCardSize;
@@ -116,13 +114,14 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin
uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
++cards_scanned;
- if (kClearCard) {
- *card_cur = 0;
- }
}
++card_cur;
}
+ if (kClearCard) {
+ ClearCardRange(scan_begin, scan_end);
+ }
+
return cards_scanned;
}
@@ -135,7 +134,9 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, uint8_t* scan_begin
* us to know which cards got cleared.
*/
template <typename Visitor, typename ModifiedVisitor>
-inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
+inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin,
+ uint8_t* scan_end,
+ const Visitor& visitor,
const ModifiedVisitor& modified) {
uint8_t* card_cur = CardFromAddr(scan_begin);
uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 121da37389..450659791d 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -97,36 +97,18 @@ CardTable::~CardTable() {
// Destroys MemMap via std::unique_ptr<>.
}
-void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
- // TODO: clear just the range of the table that has been modified
- uint8_t* card_start = CardFromAddr(space->Begin());
- uint8_t* card_end = CardFromAddr(space->End()); // Make sure to round up.
- memset(reinterpret_cast<void*>(card_start), kCardClean, card_end - card_start);
-}
-
void CardTable::ClearCardTable() {
static_assert(kCardClean == 0, "kCardClean must be 0");
mem_map_->MadviseDontNeedAndZero();
}
void CardTable::ClearCardRange(uint8_t* start, uint8_t* end) {
- if (!kMadviseZeroes) {
- memset(start, 0, end - start);
- return;
- }
CHECK_ALIGNED(reinterpret_cast<uintptr_t>(start), kCardSize);
CHECK_ALIGNED(reinterpret_cast<uintptr_t>(end), kCardSize);
static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* start_card = CardFromAddr(start);
uint8_t* end_card = CardFromAddr(end);
- uint8_t* round_start = AlignUp(start_card, kPageSize);
- uint8_t* round_end = AlignDown(end_card, kPageSize);
- if (round_start < round_end) {
- madvise(round_start, round_end - round_start, MADV_DONTNEED);
- }
- // Handle unaligned regions at start / end.
- memset(start_card, 0, std::min(round_start, end_card) - start_card);
- memset(std::max(round_end, start_card), 0, end_card - std::max(round_end, start_card));
+ ZeroAndReleasePages(start_card, end_card - start_card);
}
bool CardTable::AddrIsInCardTable(const void* addr) const {
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 969bfb7182..68ef15d0cf 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -98,15 +98,19 @@ class CardTable {
* us to know which cards got cleared.
*/
template <typename Visitor, typename ModifiedVisitor>
- void ModifyCardsAtomic(uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor,
+ void ModifyCardsAtomic(uint8_t* scan_begin,
+ uint8_t* scan_end,
+ const Visitor& visitor,
const ModifiedVisitor& modified);
// For every dirty at least minumum age between begin and end invoke the visitor with the
// specified argument. Returns how many cards the visitor was run on.
template <bool kClearCard, typename Visitor>
- size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end,
+ size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap,
+ uint8_t* scan_begin,
+ uint8_t* scan_end,
const Visitor& visitor,
- const uint8_t minimum_age = kCardDirty) const
+ const uint8_t minimum_age = kCardDirty)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -119,9 +123,6 @@ class CardTable {
// Clear a range of cards that covers start to end, start and end must be aligned to kCardSize.
void ClearCardRange(uint8_t* start, uint8_t* end);
- // Resets all of the bytes in the card table which do not map to the image space.
- void ClearSpaceCards(space::ContinuousSpace* space);
-
// Returns the first address in the heap which maps to this card.
void* AddrFromCard(const uint8_t *card_addr) const ALWAYS_INLINE;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 14f59977d6..0325535a1b 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -168,7 +168,7 @@ class ModUnionScanImageRootVisitor {
bool* const contains_reference_to_other_space_;
};
-void ModUnionTableReferenceCache::ClearCards() {
+void ModUnionTableReferenceCache::ProcessCards() {
CardTable* card_table = GetHeap()->GetCardTable();
ModUnionAddToCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
@@ -525,7 +525,7 @@ class CardBitVisitor {
ModUnionTable::CardBitmap* const card_bitmap_;
};
-void ModUnionTableCardCache::ClearCards() {
+void ModUnionTableCardCache::ProcessCards() {
CardTable* const card_table = GetHeap()->GetCardTable();
ModUnionAddToCardBitmapVisitor visitor(card_bitmap_.get(), card_table);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index b6792c4f1f..591365f33a 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -55,10 +55,10 @@ class ModUnionTable {
virtual ~ModUnionTable() {}
- // Clear cards which map to a memory range of a space. This doesn't immediately update the
- // mod-union table, as updating the mod-union table may have an associated cost, such as
- // determining references to track.
- virtual void ClearCards() = 0;
+ // Process cards for a memory range of a space. This doesn't immediately update the mod-union
+ // table, as updating the mod-union table may have an associated cost, such as determining
+ // references to track.
+ virtual void ProcessCards() = 0;
// Set all the cards.
virtual void SetCards() = 0;
@@ -66,9 +66,9 @@ class ModUnionTable {
// Clear all of the table.
virtual void ClearTable() = 0;
- // Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
- // before a call to update, for example, back-to-back sticky GCs. Also mark references to other
- // spaces which are stored in the mod-union table.
+ // Update the mod-union table using data stored by ProcessCards. There may be multiple
+ // ProcessCards before a call to update, for example, back-to-back sticky GCs. Also mark
+ // references to other spaces which are stored in the mod-union table.
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0;
// Visit all of the objects that may contain references to other spaces.
@@ -117,7 +117,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ClearCards() OVERRIDE;
+ void ProcessCards() OVERRIDE;
// Update table based on cleared cards and mark all references to the other spaces.
void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
@@ -164,7 +164,7 @@ class ModUnionTableCardCache : public ModUnionTable {
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- virtual void ClearCards() OVERRIDE;
+ virtual void ProcessCards() OVERRIDE;
// Mark all references to the alloc space(s).
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 2810f58584..cf63b30d43 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -214,7 +214,7 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
ASSERT_TRUE(other_space_ref2 != nullptr);
obj1->Set(1, other_space_ref1);
obj2->Set(3, other_space_ref2);
- table->ClearCards();
+ table->ProcessCards();
std::set<mirror::Object*> visited_before;
CollectVisitedVisitor collector_before(&visited_before);
table->UpdateAndMarkReferences(&collector_before);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index e2f5a1d7fc..f4d0bc7dbf 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -118,31 +118,8 @@ void SpaceBitmap<kAlignment>::ClearRange(const mirror::Object* begin, const mirr
}
const uintptr_t start_index = OffsetToIndex(begin_offset);
const uintptr_t end_index = OffsetToIndex(end_offset);
- Atomic<uintptr_t>* const mem_begin = &bitmap_begin_[start_index];
- Atomic<uintptr_t>* const mem_end = &bitmap_begin_[end_index];
- Atomic<uintptr_t>* const page_begin = AlignUp(mem_begin, kPageSize);
- Atomic<uintptr_t>* const page_end = AlignDown(mem_end, kPageSize);
- if (!kMadviseZeroes || page_begin >= page_end) {
- // No possible area to madvise.
- std::fill(reinterpret_cast<uint8_t*>(mem_begin),
- reinterpret_cast<uint8_t*>(mem_end),
- 0);
- } else {
- // Spans one or more pages.
- DCHECK_LE(mem_begin, page_begin);
- DCHECK_LE(page_begin, page_end);
- DCHECK_LE(page_end, mem_end);
- std::fill(reinterpret_cast<uint8_t*>(mem_begin),
- reinterpret_cast<uint8_t*>(page_begin),
- 0);
- CHECK_NE(madvise(page_begin,
- reinterpret_cast<uint8_t*>(page_end) - reinterpret_cast<uint8_t*>(page_begin),
- MADV_DONTNEED),
- -1) << "madvise failed";
- std::fill(reinterpret_cast<uint8_t*>(page_end),
- reinterpret_cast<uint8_t*>(mem_end),
- 0);
- }
+ ZeroAndReleasePages(reinterpret_cast<uint8_t*>(&bitmap_begin_[start_index]),
+ (end_index - start_index) * sizeof(*bitmap_begin_));
}
template<size_t kAlignment>
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 13af67eb3e..6dfab8b566 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -486,9 +486,14 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
// Table is non null for boot image and zygote spaces. It is only null for application image
// spaces.
if (table != nullptr) {
- // TODO: Add preclean outside the pause.
- table->ClearCards();
+ // TODO: Consider adding precleaning outside the pause.
+ table->ProcessCards();
table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
+ // Since the cards are recorded in the mod-union table and this is paused, we can clear
+ // the cards for the space (to madvise).
+ TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
+ card_table->ClearCardRange(space->Begin(),
+ AlignDown(space->End(), accounting::CardTable::kCardSize));
} else {
// TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
// pause because app image spaces are all dirty pages anyways.
@@ -1513,8 +1518,9 @@ void ConcurrentCopying::MarkZygoteLargeObjects() {
accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
// Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
- reinterpret_cast<uintptr_t>(los->End()),
+ std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
+ reinterpret_cast<uintptr_t>(range.second),
[mark_bitmap, los, self](mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2325,9 +2331,13 @@ void ConcurrentCopying::FinishPhase() {
MutexLock mu(self, mark_stack_lock_);
CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
}
- region_space_ = nullptr;
{
- MutexLock mu(Thread::Current(), skipped_blocks_lock_);
+ TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
+ // We do not currently use the region space cards at all, madvise them away to save ram.
+ heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
+ }
+ {
+ MutexLock mu(self, skipped_blocks_lock_);
skipped_blocks_map_.clear();
}
{
@@ -2339,10 +2349,9 @@ void ConcurrentCopying::FinishPhase() {
if (kUseBakerReadBarrier && kFilterModUnionCards) {
TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
- gc::Heap* const heap = Runtime::Current()->GetHeap();
for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
- accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
// Filter out cards that don't need to be set.
if (table != nullptr) {
table->FilterCards();
@@ -2351,7 +2360,7 @@ void ConcurrentCopying::FinishPhase() {
}
if (kUseBakerReadBarrier) {
TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
- DCHECK(rb_mark_bit_stack_.get() != nullptr);
+ DCHECK(rb_mark_bit_stack_ != nullptr);
const auto* limit = rb_mark_bit_stack_->End();
for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 18c4adf608..ed16854d66 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -25,6 +25,8 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "gc/accounting/heap_bitmap.h"
+#include "gc/gc_pause_listener.h"
+#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "thread-inl.h"
@@ -156,12 +158,22 @@ void GarbageCollector::ResetMeasurements() {
GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector)
: start_time_(NanoTime()), collector_(collector) {
- Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
}
GarbageCollector::ScopedPause::~ScopedPause() {
collector_->RegisterPause(NanoTime() - start_time_);
- Runtime::Current()->GetThreadList()->ResumeAll();
+ Runtime* runtime = Runtime::Current();
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
+ runtime->GetThreadList()->ResumeAll();
}
// Returns the current GC iteration and assocated info.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2cb17671e5..2ff4a3f57a 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -408,8 +408,9 @@ void SemiSpace::MarkReachableObjects() {
// classes (primitive array classes) that could move though they
// don't contain any other references.
accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
- large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
- reinterpret_cast<uintptr_t>(los->End()),
+ std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
+ large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
+ reinterpret_cast<uintptr_t>(range.second),
[this](mirror::Object* obj)
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
ScanObject(obj);
diff --git a/runtime/gc/gc_pause_listener.h b/runtime/gc/gc_pause_listener.h
new file mode 100644
index 0000000000..da35d2a0de
--- /dev/null
+++ b/runtime/gc/gc_pause_listener.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_
+#define ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_
+
+namespace art {
+namespace gc {
+
+class GcPauseListener {
+ public:
+ virtual ~GcPauseListener() {}
+
+ virtual void StartPause() = 0;
+ virtual void EndPause() = 0;
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_GC_PAUSE_LISTENER_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 918b8db301..5de004b7a3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -58,6 +58,7 @@
#include "gc/space/zygote_space.h"
#include "gc/task_processor.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "gc_pause_listener.h"
#include "heap-inl.h"
#include "image.h"
#include "intern_table.h"
@@ -611,6 +612,8 @@ Heap::Heap(size_t initial_size,
concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
"",
measure_gc_performance);
+ DCHECK(region_space_ != nullptr);
+ concurrent_copying_collector_->SetRegionSpace(region_space_);
garbage_collectors_.push_back(concurrent_copying_collector_);
}
if (MayUseCollector(kCollectorTypeMC)) {
@@ -2707,7 +2710,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
collector = semi_space_collector_;
break;
case kCollectorTypeCC:
- concurrent_copying_collector_->SetRegionSpace(region_space_);
collector = concurrent_copying_collector_;
break;
case kCollectorTypeMC:
@@ -3326,7 +3328,7 @@ void Heap::ProcessCards(TimingLogger* timings,
const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
"ImageModUnionClearCards";
TimingLogger::ScopedTiming t2(name, timings);
- table->ClearCards();
+ table->ProcessCards();
} else if (use_rem_sets && rem_set != nullptr) {
DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
<< static_cast<int>(collector_type_);
@@ -4216,6 +4218,13 @@ void Heap::RemoveAllocationListener() {
}
}
+void Heap::SetGcPauseListener(GcPauseListener* l) {
+ gc_pause_listener_.StoreRelaxed(l);
+}
+
+void Heap::RemoveGcPauseListener() {
+ gc_pause_listener_.StoreRelaxed(nullptr);
+}
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6d37140e81..e8eb69e35c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -61,6 +61,7 @@ namespace gc {
class AllocationListener;
class AllocRecordObjectMap;
+class GcPauseListener;
class ReferenceProcessor;
class TaskProcessor;
@@ -811,6 +812,16 @@ class Heap {
// reasons, we assume it stays valid when we read it (so that we don't require a lock).
void RemoveAllocationListener();
+ // Install a gc pause listener.
+ void SetGcPauseListener(GcPauseListener* l);
+ // Get the currently installed gc pause listener, or null.
+ GcPauseListener* GetGcPauseListener() {
+ return gc_pause_listener_.LoadAcquire();
+ }
+ // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
+ // reasons, we assume it stays valid when we read it (so that we don't require a lock).
+ void RemoveGcPauseListener();
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -1377,6 +1388,8 @@ class Heap {
// An installed allocation listener.
Atomic<AllocationListener*> alloc_listener_;
+ // An installed GC Pause listener.
+ Atomic<GcPauseListener*> gc_pause_listener_;
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2d5d7cbfa6..e71a397039 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -606,9 +606,12 @@ collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
+ std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
- reinterpret_cast<uintptr_t>(Begin()),
- reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
+ reinterpret_cast<uintptr_t>(range.first),
+ reinterpret_cast<uintptr_t>(range.second),
+ SweepCallback,
+ &scc);
return scc.freed;
}
@@ -617,6 +620,16 @@ void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
UNIMPLEMENTED(FATAL);
}
+std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
+ MutexLock mu(Thread::Current(), lock_);
+ return std::make_pair(Begin(), End());
+}
+
+std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
+ MutexLock mu(Thread::Current(), lock_);
+ return std::make_pair(Begin(), End());
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 0320e79f38..38e28b131d 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -104,6 +104,10 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
// objects.
virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
+ // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
+ // End() from different allocations.
+ virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
+
protected:
explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
@@ -139,6 +143,8 @@ class LargeObjectMapSpace : public LargeObjectSpace {
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+
protected:
struct LargeObject {
MemMap* mem_map;
@@ -172,6 +178,8 @@ class FreeListSpace FINAL : public LargeObjectSpace {
void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
void Dump(std::ostream& os) const REQUIRES(!lock_);
+ std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+
protected:
FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
size_t GetSlotIndexForAddress(uintptr_t address) const {
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e357fa66a4..9c634fa861 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -43,15 +43,15 @@ inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
iref));
return false;
}
- const int topIndex = segment_state_.parts.topIndex;
- int idx = ExtractIndex(iref);
- if (UNLIKELY(idx >= topIndex)) {
+ const uint32_t top_index = segment_state_.top_index;
+ uint32_t idx = ExtractIndex(iref);
+ if (UNLIKELY(idx >= top_index)) {
std::string msg = StringPrintf(
"JNI ERROR (app bug): accessed stale %s %p (index %d in a table of size %d)",
GetIndirectRefKindString(kind_),
iref,
idx,
- topIndex);
+ top_index);
AbortIfNoCheckJNI(msg);
return false;
}
@@ -68,7 +68,9 @@ inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
}
// Make sure that the entry at "idx" is correctly paired with "iref".
-inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
+inline bool IndirectReferenceTable::CheckEntry(const char* what,
+ IndirectRef iref,
+ uint32_t idx) const {
IndirectRef checkRef = ToIndirectRef(idx);
if (UNLIKELY(checkRef != iref)) {
std::string msg = StringPrintf(
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 7389c73096..c7371191b7 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -32,6 +32,7 @@
namespace art {
static constexpr bool kDumpStackOnNonLocalReference = false;
+static constexpr bool kDebugIRT = false;
const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
switch (kind) {
@@ -60,9 +61,13 @@ void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
IndirectRefKind desired_kind,
+ ResizableCapacity resizable,
std::string* error_msg)
- : kind_(desired_kind),
- max_entries_(max_count) {
+ : segment_state_(kIRTFirstSegment),
+ kind_(desired_kind),
+ max_entries_(max_count),
+ current_num_holes_(0),
+ resizable_(resizable) {
CHECK(error_msg != nullptr);
CHECK_NE(desired_kind, kHandleScopeOrInvalid);
@@ -78,60 +83,210 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
} else {
table_ = nullptr;
}
- segment_state_.all = IRT_FIRST_SEGMENT;
+ segment_state_ = kIRTFirstSegment;
+ last_known_previous_state_ = kIRTFirstSegment;
}
IndirectReferenceTable::~IndirectReferenceTable() {
}
+void IndirectReferenceTable::ConstexprChecks() {
+ // Use this for some assertions. They can't be put into the header as C++ wants the class
+ // to be complete.
+
+ // Check kind.
+ static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
+ "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
+ "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
+ "Kind encoding error");
+
+ // Check serial.
+ static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
+
+ // Table index.
+ static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
+}
+
bool IndirectReferenceTable::IsValid() const {
return table_mem_map_.get() != nullptr;
}
-IndirectRef IndirectReferenceTable::Add(uint32_t cookie, ObjPtr<mirror::Object> obj) {
- IRTSegmentState prevState;
- prevState.all = cookie;
- size_t topIndex = segment_state_.parts.topIndex;
+// Holes:
+//
+// To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
+// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
+// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
+// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
+// scans when there are no holes, the number of known holes should be tracked.
+//
+// A previous implementation stored the top index and the number of holes as the segment state.
+// This constraints the maximum number of references to 16-bit. We want to relax this, as it
+// is easy to require more references (e.g., to list all classes in large applications). Thus,
+// the implicitly stack-stored state, the IRTSegmentState, is only the top index.
+//
+// Thus, hole count is a local property of the current segment, and needs to be recovered when
+// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
+// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
+// hole count is correct.
+//
+// To be able to detect segment changes, we require an additional local field that can describe
+// the known segment. This is last_known_previous_state_. The requirement will become clear with
+// the following (some non-trivial) cases that have to be supported:
+//
+// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+// reference
+// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+// reference
+//
+// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
+// segment changes above. The condition is simply that the last known state is greater than or
+// equal to the current previous state, and smaller than the current state (top index). The
+// condition is conservative as it adds O(1) overhead to operations on an empty segment.
+
+static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
+ size_t count = 0;
+ for (size_t index = from; index != to; ++index) {
+ if (table[index].GetReference()->IsNull()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
+ if (last_known_previous_state_.top_index >= segment_state_.top_index ||
+ last_known_previous_state_.top_index < prev_state.top_index) {
+ const size_t top_index = segment_state_.top_index;
+ size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
+
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ Recovered holes: "
+ << " Current prev=" << prev_state.top_index
+ << " Current top_index=" << top_index
+ << " Old num_holes=" << current_num_holes_
+ << " New num_holes=" << count;
+ }
+
+ current_num_holes_ = count;
+ last_known_previous_state_ = prev_state;
+ } else if (kDebugIRT) {
+ LOG(INFO) << "No need to recover holes";
+ }
+}
+
+ALWAYS_INLINE
+static inline void CheckHoleCount(IrtEntry* table,
+ size_t exp_num_holes,
+ IRTSegmentState prev_state,
+ IRTSegmentState cur_state) {
+ if (kIsDebugBuild) {
+ size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
+ CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
+ << " topIndex=" << cur_state.top_index;
+ }
+}
+
+bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
+ CHECK_GT(new_size, max_entries_);
+
+ const size_t table_bytes = new_size * sizeof(IrtEntry);
+ std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
+ nullptr,
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ error_msg));
+ if (new_map == nullptr) {
+ return false;
+ }
+
+ memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+ table_mem_map_ = std::move(new_map);
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ max_entries_ = new_size;
+
+ return true;
+}
+
+IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
+ ObjPtr<mirror::Object> obj) {
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
+ << " top_index=" << segment_state_.top_index
+ << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ size_t top_index = segment_state_.top_index;
CHECK(obj != nullptr);
VerifyObject(obj);
DCHECK(table_ != nullptr);
- DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
- if (topIndex == max_entries_) {
- LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
- << "(max=" << max_entries_ << ")\n"
- << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+ if (top_index == max_entries_) {
+ if (resizable_ == ResizableCapacity::kNo) {
+ LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+ << "(max=" << max_entries_ << ")\n"
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+ UNREACHABLE();
+ }
+
+ // Try to double space.
+ std::string error_msg;
+ if (!Resize(max_entries_ * 2, &error_msg)) {
+ LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+ << "(max=" << max_entries_ << ")" << std::endl
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this)
+ << " Resizing failed: " << error_msg;
+ UNREACHABLE();
+ }
}
+ RecoverHoles(previous_state);
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
// We know there's enough room in the table. Now we just need to find
// the right spot. If there's a hole, find it and fill it; otherwise,
// add to the end of the list.
IndirectRef result;
- int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
size_t index;
- if (numHoles > 0) {
- DCHECK_GT(topIndex, 1U);
+ if (current_num_holes_ > 0) {
+ DCHECK_GT(top_index, 1U);
// Find the first hole; likely to be near the end of the list.
- IrtEntry* pScan = &table_[topIndex - 1];
- DCHECK(!pScan->GetReference()->IsNull());
- --pScan;
- while (!pScan->GetReference()->IsNull()) {
- DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
- --pScan;
+ IrtEntry* p_scan = &table_[top_index - 1];
+ DCHECK(!p_scan->GetReference()->IsNull());
+ --p_scan;
+ while (!p_scan->GetReference()->IsNull()) {
+ DCHECK_GE(p_scan, table_ + previous_state.top_index);
+ --p_scan;
}
- index = pScan - table_;
- segment_state_.parts.numHoles--;
+ index = p_scan - table_;
+ current_num_holes_--;
} else {
// Add to the end.
- index = topIndex++;
- segment_state_.parts.topIndex = topIndex;
+ index = top_index++;
+ segment_state_.top_index = top_index;
}
table_[index].Add(obj);
result = ToIndirectRef(index);
- if ((false)) {
- LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex
- << " holes=" << segment_state_.parts.numHoles;
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+ << " holes=" << current_num_holes_;
}
DCHECK(result != nullptr);
@@ -156,14 +311,18 @@ void IndirectReferenceTable::AssertEmpty() {
// This method is not called when a local frame is popped; this is only used
// for explicit single removals.
// Returns "false" if nothing was removed.
-bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
- IRTSegmentState prevState;
- prevState.all = cookie;
- int topIndex = segment_state_.parts.topIndex;
- int bottomIndex = prevState.parts.topIndex;
+bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
+ << " top_index=" << segment_state_.top_index
+ << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ const uint32_t top_index = segment_state_.top_index;
+ const uint32_t bottom_index = previous_state.top_index;
DCHECK(table_ != nullptr);
- DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
auto* self = Thread::Current();
@@ -180,21 +339,24 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
return true;
}
}
- const int idx = ExtractIndex(iref);
- if (idx < bottomIndex) {
+ const uint32_t idx = ExtractIndex(iref);
+ if (idx < bottom_index) {
// Wrong segment.
LOG(WARNING) << "Attempt to remove index outside index area (" << idx
- << " vs " << bottomIndex << "-" << topIndex << ")";
+ << " vs " << bottom_index << "-" << top_index << ")";
return false;
}
- if (idx >= topIndex) {
+ if (idx >= top_index) {
// Bad --- stale reference?
LOG(WARNING) << "Attempt to remove invalid index " << idx
- << " (bottom=" << bottomIndex << " top=" << topIndex << ")";
+ << " (bottom=" << bottom_index << " top=" << top_index << ")";
return false;
}
- if (idx == topIndex - 1) {
+ RecoverHoles(previous_state);
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+ if (idx == top_index - 1) {
// Top-most entry. Scan up and consume holes.
if (!CheckEntry("remove", iref, idx)) {
@@ -202,28 +364,30 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
}
*table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
- int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
- if (numHoles != 0) {
- while (--topIndex > bottomIndex && numHoles != 0) {
- if ((false)) {
- LOG(INFO) << "+++ checking for hole at " << topIndex - 1
- << " (cookie=" << cookie << ") val="
- << table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>();
+ if (current_num_holes_ != 0) {
+ uint32_t collapse_top_index = top_index;
+ while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+ if (kDebugIRT) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
+ << " (previous_state=" << bottom_index << ") val="
+ << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
}
- if (!table_[topIndex - 1].GetReference()->IsNull()) {
+ if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
break;
}
- if ((false)) {
- LOG(INFO) << "+++ ate hole at " << (topIndex - 1);
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
}
- numHoles--;
+ current_num_holes_--;
}
- segment_state_.parts.numHoles = numHoles + prevState.parts.numHoles;
- segment_state_.parts.topIndex = topIndex;
+ segment_state_.top_index = collapse_top_index;
+
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
} else {
- segment_state_.parts.topIndex = topIndex-1;
- if ((false)) {
- LOG(INFO) << "+++ ate last entry " << topIndex - 1;
+ segment_state_.top_index = top_index - 1;
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ ate last entry " << top_index - 1;
}
}
} else {
@@ -238,9 +402,10 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
}
*table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
- segment_state_.parts.numHoles++;
- if ((false)) {
- LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
+ current_num_holes_++;
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
}
}
@@ -278,4 +443,14 @@ void IndirectReferenceTable::Dump(std::ostream& os) const {
ReferenceTable::Dump(os, entries);
}
+void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
+ if (kDebugIRT) {
+ LOG(INFO) << "Setting segment state: "
+ << segment_state_.top_index
+ << " -> "
+ << new_state.top_index;
+ }
+ segment_state_ = new_state;
+}
+
} // namespace art
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 363280a87c..7e452a270a 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -20,8 +20,10 @@
#include <stdint.h>
#include <iosfwd>
+#include <limits>
#include <string>
+#include "base/bit_utils.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "gc_root.h"
@@ -40,165 +42,118 @@ class Object;
class MemMap;
-/*
- * Maintain a table of indirect references. Used for local/global JNI
- * references.
- *
- * The table contains object references that are part of the GC root set.
- * When an object is added we return an IndirectRef that is not a valid
- * pointer but can be used to find the original value in O(1) time.
- * Conversions to and from indirect references are performed on upcalls
- * and downcalls, so they need to be very fast.
- *
- * To be efficient for JNI local variable storage, we need to provide
- * operations that allow us to operate on segments of the table, where
- * segments are pushed and popped as if on a stack. For example, deletion
- * of an entry should only succeed if it appears in the current segment,
- * and we want to be able to strip off the current segment quickly when
- * a method returns. Additions to the table must be made in the current
- * segment even if space is available in an earlier area.
- *
- * A new segment is created when we call into native code from interpreted
- * code, or when we handle the JNI PushLocalFrame function.
- *
- * The GC must be able to scan the entire table quickly.
- *
- * In summary, these must be very fast:
- * - adding or removing a segment
- * - adding references to a new segment
- * - converting an indirect reference back to an Object
- * These can be a little slower, but must still be pretty quick:
- * - adding references to a "mature" segment
- * - removing individual references
- * - scanning the entire table straight through
- *
- * If there's more than one segment, we don't guarantee that the table
- * will fill completely before we fail due to lack of space. We do ensure
- * that the current segment will pack tightly, which should satisfy JNI
- * requirements (e.g. EnsureLocalCapacity).
- *
- * To make everything fit nicely in 32-bit integers, the maximum size of
- * the table is capped at 64K.
- *
- * Only SynchronizedGet is synchronized.
- */
-
-/*
- * Indirect reference definition. This must be interchangeable with JNI's
- * jobject, and it's convenient to let null be null, so we use void*.
- *
- * We need a 16-bit table index and a 2-bit reference type (global, local,
- * weak global). Real object pointers will have zeroes in the low 2 or 3
- * bits (4- or 8-byte alignment), so it's useful to put the ref type
- * in the low bits and reserve zero as an invalid value.
- *
- * The remaining 14 bits can be used to detect stale indirect references.
- * For example, if objects don't move, we can use a hash of the original
- * Object* to make sure the entry hasn't been re-used. (If the Object*
- * we find there doesn't match because of heap movement, we could do a
- * secondary check on the preserved hash value; this implies that creating
- * a global/local ref queries the hash value and forces it to be saved.)
- *
- * A more rigorous approach would be to put a serial number in the extra
- * bits, and keep a copy of the serial number in a parallel table. This is
- * easier when objects can move, but requires 2x the memory and additional
- * memory accesses on add/get. It will catch additional problems, e.g.:
- * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
- * iref1. A pattern based on object bits will miss this.
- */
+// Maintain a table of indirect references. Used for local/global JNI references.
+//
+// The table contains object references, where the strong (local/global) references are part of the
+// GC root set (but not the weak global references). When an object is added we return an
+// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
+// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
+// to be very fast.
+//
+// To be efficient for JNI local variable storage, we need to provide operations that allow us to
+// operate on segments of the table, where segments are pushed and popped as if on a stack. For
+// example, deletion of an entry should only succeed if it appears in the current segment, and we
+// want to be able to strip off the current segment quickly when a method returns. Additions to the
+// table must be made in the current segment even if space is available in an earlier area.
+//
+// A new segment is created when we call into native code from interpreted code, or when we handle
+// the JNI PushLocalFrame function.
+//
+// The GC must be able to scan the entire table quickly.
+//
+// In summary, these must be very fast:
+// - adding or removing a segment
+// - adding references to a new segment
+// - converting an indirect reference back to an Object
+// These can be a little slower, but must still be pretty quick:
+// - adding references to a "mature" segment
+// - removing individual references
+// - scanning the entire table straight through
+//
+// If there's more than one segment, we don't guarantee that the table will fill completely before
+// we fail due to lack of space. We do ensure that the current segment will pack tightly, which
+// should satisfy JNI requirements (e.g. EnsureLocalCapacity).
+//
+// Only SynchronizedGet is synchronized.
+
+// Indirect reference definition. This must be interchangeable with JNI's jobject, and it's
+// convenient to let null be null, so we use void*.
+//
+// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
+// global). We also reserve some bits to be used to detect stale indirect references: we put a
+// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
+// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
+// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
+// lookup iref1. A pattern based on object bits will miss this.
typedef void* IndirectRef;
-/*
- * Indirect reference kind, used as the two low bits of IndirectRef.
- *
- * For convenience these match up with enum jobjectRefType from jni.h.
- */
+// Indirect reference kind, used as the two low bits of IndirectRef.
+//
+// For convenience these match up with enum jobjectRefType from jni.h.
enum IndirectRefKind {
- kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
- kLocal = 1, // <<local reference>>
- kGlobal = 2, // <<global reference>>
- kWeakGlobal = 3 // <<weak global reference>>
+ kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
+ kLocal = 1, // <<local reference>>
+ kGlobal = 2, // <<global reference>>
+ kWeakGlobal = 3, // <<weak global reference>>
+ kLastKind = kWeakGlobal
};
std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
const char* GetIndirectRefKindString(const IndirectRefKind& kind);
-/*
- * Determine what kind of indirect reference this is.
- */
-static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
- return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03);
-}
-
-/* use as initial value for "cookie", and when table has only one segment */
-static const uint32_t IRT_FIRST_SEGMENT = 0;
-
-/*
- * Table definition.
- *
- * For the global reference table, the expected common operations are
- * adding a new entry and removing a recently-added entry (usually the
- * most-recently-added entry). For JNI local references, the common
- * operations are adding a new entry and removing an entire table segment.
- *
- * If "alloc_entries_" is not equal to "max_entries_", the table may expand
- * when entries are added, which means the memory may move. If you want
- * to keep pointers into "table" rather than offsets, you must use a
- * fixed-size table.
- *
- * If we delete entries from the middle of the list, we will be left with
- * "holes". We track the number of holes so that, when adding new elements,
- * we can quickly decide to do a trivial append or go slot-hunting.
- *
- * When the top-most entry is removed, any holes immediately below it are
- * also removed. Thus, deletion of an entry may reduce "topIndex" by more
- * than one.
- *
- * To get the desired behavior for JNI locals, we need to know the bottom
- * and top of the current "segment". The top is managed internally, and
- * the bottom is passed in as a function argument. When we call a native method or
- * push a local frame, the current top index gets pushed on, and serves
- * as the new bottom. When we pop a frame off, the value from the stack
- * becomes the new top index, and the value stored in the previous frame
- * becomes the new bottom.
- *
- * To avoid having to re-scan the table after a pop, we want to push the
- * number of holes in the table onto the stack. Because of our 64K-entry
- * cap, we can combine the two into a single unsigned 32-bit value.
- * Instead of a "bottom" argument we take a "cookie", which includes the
- * bottom index and the count of holes below the bottom.
- *
- * Common alternative implementation: make IndirectRef a pointer to the
- * actual reference slot. Instead of getting a table and doing a lookup,
- * the lookup can be done instantly. Operations like determining the
- * type and deleting the reference are more expensive because the table
- * must be hunted for (i.e. you have to do a pointer comparison to see
- * which table it's in), you can't move the table when expanding it (so
- * realloc() is out), and tricks like serial number checking to detect
- * stale references aren't possible (though we may be able to get similar
- * benefits with other approaches).
- *
- * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
- * add immediately follows a delete; must invalidate after segment pop
- * (which could increase the cost/complexity of method call/return).
- * Might be worth only using it for JNI globals.
- *
- * TODO: may want completely different add/remove algorithms for global
- * and local refs to improve performance. A large circular buffer might
- * reduce the amortized cost of adding global references.
- *
- */
-union IRTSegmentState {
- uint32_t all;
- struct {
- uint32_t topIndex:16; /* index of first unused entry */
- uint32_t numHoles:16; /* #of holes in entire table */
- } parts;
+// Table definition.
+//
+// For the global reference table, the expected common operations are adding a new entry and
+// removing a recently-added entry (usually the most-recently-added entry). For JNI local
+// references, the common operations are adding a new entry and removing an entire table segment.
+//
+// If we delete entries from the middle of the list, we will be left with "holes". We track the
+// number of holes so that, when adding new elements, we can quickly decide to do a trivial append
+// or go slot-hunting.
+//
+// When the top-most entry is removed, any holes immediately below it are also removed. Thus,
+// deletion of an entry may reduce "top_index" by more than one.
+//
+// To get the desired behavior for JNI locals, we need to know the bottom and top of the current
+// "segment". The top is managed internally, and the bottom is passed in as a function argument.
+// When we call a native method or push a local frame, the current top index gets pushed on, and
+// serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
+// index, and the value stored in the previous frame becomes the new bottom.
+//
+// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
+// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
+// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
+// adding and removing references needs to detect the change of a segment. Helper fields are used
+// for this detection.
+//
+// Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
+// Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
+// determining the type and deleting the reference are more expensive because the table must be
+// hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
+// the table when expanding it (so realloc() is out), and tricks like serial number checking to
+// detect stale references aren't possible (though we may be able to get similar benefits with other
+// approaches).
+//
+// TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
+// delete; must invalidate after segment pop might be worth only using it for JNI globals.
+//
+// TODO: may want completely different add/remove algorithms for global and local refs to improve
+// performance. A large circular buffer might reduce the amortized cost of adding global
+// references.
+
+// The state of the current segment. We only store the index. Splitting it for index and hole
+// count restricts the range too much.
+struct IRTSegmentState {
+ uint32_t top_index;
};
+// Use as initial value for "cookie", and when table has only one segment.
+static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
+
// Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
// Contains multiple entries but only one active one, this helps us detect use after free errors
// since the serial stored in the indirect ref wont match.
-static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
+static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
+
class IrtEntry {
public:
void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -208,6 +163,11 @@ class IrtEntry {
return &references_[serial_];
}
+ const GcRoot<mirror::Object>* GetReference() const {
+ DCHECK_LT(serial_, kIRTPrevCount);
+ return &references_[serial_];
+ }
+
uint32_t GetSerial() const {
return serial_;
}
@@ -220,6 +180,7 @@ class IrtEntry {
};
static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
"Unexpected sizeof(IrtEntry)");
+static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
class IrtIterator {
public:
@@ -257,14 +218,20 @@ bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
class IndirectReferenceTable {
public:
- /*
- * WARNING: Construction of the IndirectReferenceTable may fail.
- * error_msg must not be null. If error_msg is set by the constructor, then
- * construction has failed and the IndirectReferenceTable will be in an
- * invalid state. Use IsValid to check whether the object is in an invalid
- * state.
- */
- IndirectReferenceTable(size_t max_count, IndirectRefKind kind, std::string* error_msg);
+ enum class ResizableCapacity {
+ kNo,
+ kYes
+ };
+
+ // WARNING: Construction of the IndirectReferenceTable may fail.
+ // error_msg must not be null. If error_msg is set by the constructor, then
+ // construction has failed and the IndirectReferenceTable will be in an
+ // invalid state. Use IsValid to check whether the object is in an invalid
+ // state.
+ IndirectReferenceTable(size_t max_count,
+ IndirectRefKind kind,
+ ResizableCapacity resizable,
+ std::string* error_msg);
~IndirectReferenceTable();
@@ -277,20 +244,14 @@ class IndirectReferenceTable {
*/
bool IsValid() const;
- /*
- * Add a new entry. "obj" must be a valid non-nullptr object reference.
- *
- * Returns nullptr if the table is full (max entries reached, or alloc
- * failed during expansion).
- */
- IndirectRef Add(uint32_t cookie, ObjPtr<mirror::Object> obj)
+ // Add a new entry. "obj" must be a valid non-null object reference. This function will
+ // abort if the table is full (max entries reached, or expansion failed).
+ IndirectRef Add(IRTSegmentState previous_state, ObjPtr<mirror::Object> obj)
REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Given an IndirectRef in the table, return the Object it refers to.
- *
- * Returns kInvalidIndirectRefObject if iref is invalid.
- */
+ // Given an IndirectRef in the table, return the Object it refers to.
+ //
+ // This function may abort under error conditions.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
ALWAYS_INLINE;
@@ -302,34 +263,26 @@ class IndirectReferenceTable {
return Get<kReadBarrierOption>(iref);
}
- /*
- * Update an existing entry.
- *
- * Updates an existing indirect reference to point to a new object.
- */
+ // Updates an existing indirect reference to point to a new object.
void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Remove an existing entry.
- *
- * If the entry is not between the current top index and the bottom index
- * specified by the cookie, we don't remove anything. This is the behavior
- * required by JNI's DeleteLocalRef function.
- *
- * Returns "false" if nothing was removed.
- */
- bool Remove(uint32_t cookie, IndirectRef iref);
+ // Remove an existing entry.
+ //
+ // If the entry is not between the current top index and the bottom index
+ // specified by the cookie, we don't remove anything. This is the behavior
+ // required by JNI's DeleteLocalRef function.
+ //
+ // Returns "false" if nothing was removed.
+ bool Remove(IRTSegmentState previous_state, IndirectRef iref);
void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Return the #of entries in the entire table. This includes holes, and
- * so may be larger than the actual number of "live" entries.
- */
+ // Return the #of entries in the entire table. This includes holes, and
+ // so may be larger than the actual number of "live" entries.
size_t Capacity() const {
- return segment_state_.parts.topIndex;
+ return segment_state_.top_index;
}
// Note IrtIterator does not have a read barrier as it's used to visit roots.
@@ -344,13 +297,11 @@ class IndirectReferenceTable {
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t GetSegmentState() const {
- return segment_state_.all;
+ IRTSegmentState GetSegmentState() const {
+ return segment_state_;
}
- void SetSegmentState(uint32_t new_state) {
- segment_state_.all = new_state;
- }
+ void SetSegmentState(IRTSegmentState new_state);
static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
// Note: Currently segment_state_ is at offset 0. We're testing the expected value in
@@ -362,32 +313,74 @@ class IndirectReferenceTable {
// Release pages past the end of the table that may have previously held references.
void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
+ ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
+ return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
+ }
+
private:
+ static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount);
+ static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1;
+
+ static constexpr size_t kKindBits = MinimumBitsToStore(
+ static_cast<uint32_t>(IndirectRefKind::kLastKind));
+ static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
+
+ static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
+ static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
+ DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits);
+ return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits);
+ }
+ static constexpr uint32_t DecodeIndex(uintptr_t uref) {
+ return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits);
+ }
+
+ static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
+ return static_cast<uintptr_t>(kind);
+ }
+ static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
+ return static_cast<IndirectRefKind>(uref & kKindMask);
+ }
+
+ static constexpr uintptr_t EncodeSerial(uint32_t serial) {
+ DCHECK_LE(MinimumBitsToStore(serial), kSerialBits);
+ return serial << kKindBits;
+ }
+ static constexpr uint32_t DecodeSerial(uintptr_t uref) {
+ return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
+ }
+
+ constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
+ DCHECK_LT(table_index, max_entries_);
+ return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_);
+ }
+
+ static void ConstexprChecks();
+
// Extract the table index from an indirect reference.
- static uint32_t ExtractIndex(IndirectRef iref) {
- uintptr_t uref = reinterpret_cast<uintptr_t>(iref);
- return (uref >> 2) & 0xffff;
+ ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
+ return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
}
- /*
- * The object pointer itself is subject to relocation in some GC
- * implementations, so we shouldn't really be using it here.
- */
- IndirectRef ToIndirectRef(uint32_t tableIndex) const {
- DCHECK_LT(tableIndex, 65536U);
- uint32_t serialChunk = table_[tableIndex].GetSerial();
- uintptr_t uref = (serialChunk << 20) | (tableIndex << 2) | kind_;
- return reinterpret_cast<IndirectRef>(uref);
+ IndirectRef ToIndirectRef(uint32_t table_index) const {
+ DCHECK_LT(table_index, max_entries_);
+ uint32_t serial = table_[table_index].GetSerial();
+ return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
}
+ // Resize the backing table. Currently must be larger than the current size.
+ bool Resize(size_t new_size, std::string* error_msg);
+
+ void RecoverHoles(IRTSegmentState from);
+
// Abort if check_jni is not enabled. Otherwise, just log as an error.
static void AbortIfNoCheckJNI(const std::string& msg);
/* extra debugging checks */
bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_);
- bool CheckEntry(const char*, IndirectRef, int) const;
+ bool CheckEntry(const char*, IndirectRef, uint32_t) const;
- /* semi-public - read/write by jni down calls */
+ /// semi-public - read/write by jni down calls.
IRTSegmentState segment_state_;
// Mem map where we store the indirect refs.
@@ -395,10 +388,21 @@ class IndirectReferenceTable {
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
IrtEntry* table_;
- /* bit mask, ORed into all irefs */
+ // bit mask, ORed into all irefs.
const IndirectRefKind kind_;
- /* max #of entries allowed */
- const size_t max_entries_;
+
+ // max #of entries allowed (modulo resizing).
+ size_t max_entries_;
+
+ // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
+ // file.
+ // TODO: Consider other data structures for compact tables, e.g., free lists.
+ size_t current_num_holes_;
+ IRTSegmentState last_known_previous_state_;
+
+ // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
+ // responsibility to ensure thread-safety.
+ ResizableCapacity resizable_;
};
} // namespace art
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index d7026de559..722b411f97 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -50,7 +50,10 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
ScopedObjectAccess soa(Thread::Current());
static const size_t kTableMax = 20;
std::string error_msg;
- IndirectReferenceTable irt(kTableMax, kGlobal, &error_msg);
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
ASSERT_TRUE(irt.IsValid()) << error_msg;
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
@@ -65,7 +68,7 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
ASSERT_TRUE(obj3.Get() != nullptr);
- const uint32_t cookie = IRT_FIRST_SEGMENT;
+ const IRTSegmentState cookie = kIRTFirstSegment;
CheckDump(&irt, 0, 0);
@@ -257,4 +260,250 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
CheckDump(&irt, 0, 0);
}
+TEST_F(IndirectReferenceTableTest, Holes) {
+ // Test the explicitly named cases from the IRT implementation:
+ //
+ // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+ // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+ // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+ // reference
+ // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+ // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+ // reference
+
+ ScopedObjectAccess soa(Thread::Current());
+ static const size_t kTableMax = 10;
+
+ mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ StackHandleScope<5> hs(soa.Self());
+ ASSERT_TRUE(c != nullptr);
+ Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj0.Get() != nullptr);
+ Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj1.Get() != nullptr);
+ Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj2.Get() != nullptr);
+ Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj3.Get() != nullptr);
+ Handle<mirror::Object> obj4 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj4.Get() != nullptr);
+
+ std::string error_msg;
+
+ // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference.
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+ IndirectRef iref1 = irt.Add(cookie0, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie0, obj2.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie0, iref1));
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref3 = irt.Add(cookie1, obj3.Get());
+
+ // Must not have filled the previous hole.
+ EXPECT_EQ(irt.Capacity(), 4u);
+ EXPECT_TRUE(irt.Get(iref1) == nullptr);
+ CheckDump(&irt, 3, 3);
+
+ UNUSED(iref0, iref1, iref2, iref3);
+ }
+
+ // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie1, obj2.Get());
+ IndirectRef iref3 = irt.Add(cookie1, obj3.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref2));
+
+ // Pop segment.
+ irt.SetSegmentState(cookie1);
+
+ IndirectRef iref4 = irt.Add(cookie1, obj4.Get());
+
+ EXPECT_EQ(irt.Capacity(), 2u);
+ EXPECT_TRUE(irt.Get(iref2) == nullptr);
+ CheckDump(&irt, 2, 2);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4);
+ }
+
+ // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+ // reference.
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie1, obj2.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref1));
+
+ // New segment.
+ const IRTSegmentState cookie2 = irt.GetSegmentState();
+
+ IndirectRef iref3 = irt.Add(cookie2, obj3.Get());
+
+ // Pop segment.
+ irt.SetSegmentState(cookie2);
+
+ IndirectRef iref4 = irt.Add(cookie1, obj4.Get());
+
+ EXPECT_EQ(irt.Capacity(), 3u);
+ EXPECT_TRUE(irt.Get(iref1) == nullptr);
+ CheckDump(&irt, 3, 3);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4);
+ }
+
+ // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference.
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ EXPECT_TRUE(irt.Remove(cookie1, iref1));
+
+ // Emptied segment, push new one.
+ const IRTSegmentState cookie2 = irt.GetSegmentState();
+
+ IndirectRef iref2 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref3 = irt.Add(cookie1, obj2.Get());
+ IndirectRef iref4 = irt.Add(cookie1, obj3.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref3));
+
+ // Pop segment.
+ UNUSED(cookie2);
+ irt.SetSegmentState(cookie1);
+
+ IndirectRef iref5 = irt.Add(cookie1, obj4.Get());
+
+ EXPECT_EQ(irt.Capacity(), 2u);
+ EXPECT_TRUE(irt.Get(iref3) == nullptr);
+ CheckDump(&irt, 2, 2);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4, iref5);
+ }
+
+ // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+ // reference
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref3 = irt.Add(cookie1, obj2.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref2));
+
+ // Pop segment.
+ irt.SetSegmentState(cookie1);
+
+ // Push segment.
+ const IRTSegmentState cookie1_second = irt.GetSegmentState();
+ UNUSED(cookie1_second);
+
+ IndirectRef iref4 = irt.Add(cookie1, obj3.Get());
+
+ EXPECT_EQ(irt.Capacity(), 2u);
+ EXPECT_TRUE(irt.Get(iref3) == nullptr);
+ CheckDump(&irt, 2, 2);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4);
+ }
+}
+
+TEST_F(IndirectReferenceTableTest, Resize) {
+ ScopedObjectAccess soa(Thread::Current());
+ static const size_t kTableMax = 512;
+
+ mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ StackHandleScope<1> hs(soa.Self());
+ ASSERT_TRUE(c != nullptr);
+ Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj0.Get() != nullptr);
+
+ std::string error_msg;
+ IndirectReferenceTable irt(kTableMax,
+ kLocal,
+ IndirectReferenceTable::ResizableCapacity::kYes,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ CheckDump(&irt, 0, 0);
+ const IRTSegmentState cookie = kIRTFirstSegment;
+
+ for (size_t i = 0; i != kTableMax + 1; ++i) {
+ irt.Add(cookie, obj0.Get());
+ }
+
+ EXPECT_EQ(irt.Capacity(), kTableMax + 1);
+}
+
} // namespace art
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2e0077056e..a32c800491 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -35,8 +35,17 @@
namespace art {
namespace interpreter {
-static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
- Object* receiver, uint32_t* args, JValue* result)
+ALWAYS_INLINE static ObjPtr<mirror::Object> ObjArg(uint32_t arg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return ObjPtr<mirror::Object>(reinterpret_cast<mirror::Object*>(arg));
+}
+
+static void InterpreterJni(Thread* self,
+ ArtMethod* method,
+ const StringPiece& shorty,
+ ObjPtr<mirror::Object> receiver,
+ uint32_t* args,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
// TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
// it should be removed and JNI compiled stubs used instead.
@@ -52,7 +61,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), klass.get());
}
- result->SetL(soa.Decode<Object>(jresult));
+ result->SetL(soa.Decode<mirror::Object>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jclass);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -87,14 +96,13 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(ObjArg(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), klass.get(), arg0.get());
}
- result->SetL(soa.Decode<Object>(jresult));
+ result->SetL(soa.Decode<mirror::Object>(jresult));
} else if (shorty == "IIZ") {
typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -109,8 +117,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(ObjArg(args[0])));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
@@ -134,11 +141,9 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(ObjArg(args[0])));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(ObjArg(args[1])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
@@ -147,11 +152,9 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(ObjArg(args[1])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(ObjArg(args[2])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
@@ -160,8 +163,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(ObjArg(args[1])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
@@ -170,11 +172,9 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(ObjArg(args[0])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(ObjArg(args[2])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
} else {
@@ -192,7 +192,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get());
}
- result->SetL(soa.Decode<Object>(jresult));
+ result->SetL(soa.Decode<mirror::Object>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -206,14 +206,13 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(
- reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(ObjArg(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get(), arg0.get());
}
- result->SetL(soa.Decode<Object>(jresult));
+ result->SetL(soa.Decode<mirror::Object>(jresult));
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
@@ -312,7 +311,7 @@ static inline JValue Execute(
} else {
// Mterp didn't like that instruction. Single-step it with the reference interpreter.
result_register = ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame,
- result_register, true);
+ result_register, true);
if (shadow_frame.GetDexPC() == DexFile::kDexNoIndex) {
// Single-stepped a return or an exception not handled locally. Return to caller.
return result_register;
@@ -354,8 +353,11 @@ static inline JValue Execute(
}
}
-void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver,
- uint32_t* args, JValue* result,
+void EnterInterpreterFromInvoke(Thread* self,
+ ArtMethod* method,
+ ObjPtr<mirror::Object> receiver,
+ uint32_t* args,
+ JValue* result,
bool stay_in_interpreter) {
DCHECK_EQ(self, Thread::Current());
bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
@@ -393,7 +395,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
size_t cur_reg = num_regs - num_ins;
if (!method->IsStatic()) {
CHECK(receiver != nullptr);
- shadow_frame->SetVRegReference(cur_reg, receiver);
+ shadow_frame->SetVRegReference(cur_reg, receiver.Ptr());
++cur_reg;
}
uint32_t shorty_len = 0;
@@ -402,8 +404,9 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
DCHECK_LT(shorty_pos + 1, shorty_len);
switch (shorty[shorty_pos + 1]) {
case 'L': {
- Object* o = reinterpret_cast<StackReference<Object>*>(&args[arg_pos])->AsMirrorPtr();
- shadow_frame->SetVRegReference(cur_reg, o);
+ ObjPtr<mirror::Object> o =
+ reinterpret_cast<StackReference<mirror::Object>*>(&args[arg_pos])->AsMirrorPtr();
+ shadow_frame->SetVRegReference(cur_reg, o.Ptr());
break;
}
case 'J': case 'D': {
@@ -442,7 +445,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
// references pointers due to moving GC.
args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1);
if (!Runtime::Current()->IsStarted()) {
- UnstartedRuntime::Jni(self, method, receiver, args, result);
+ UnstartedRuntime::Jni(self, method, receiver.Ptr(), args, result);
} else {
InterpreterJni(self, method, shorty, receiver, args, result);
}
@@ -539,7 +542,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
if (kIsDebugBuild) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// This is a suspend point. But it's ok since value has been set into shadow_frame.
- mirror::Class* klass = class_linker->ResolveType(
+ ObjPtr<mirror::Class> klass = class_linker->ResolveType(
instr->VRegB_21c(), shadow_frame->GetMethod());
DCHECK(klass->IsStringClass());
}
@@ -582,8 +585,10 @@ JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* cod
return Execute(self, code_item, *shadow_frame, JValue());
}
-void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result) {
+void ArtInterpreterToInterpreterBridge(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result) {
bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
@@ -595,10 +600,10 @@ void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* co
// Ensure static methods are initialized.
const bool is_static = method->IsStatic();
if (is_static) {
- mirror::Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
StackHandleScope<1> hs(self);
- HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
+ HandleWrapperObjPtr<mirror::Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
self, h_declaring_class, true, true))) {
DCHECK(self->IsExceptionPending());
@@ -615,9 +620,9 @@ void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* co
// We don't expect to be asked to interpret native code (which is entered via a JNI compiler
// generated stub) except during testing and image writing.
CHECK(!Runtime::Current()->IsStarted());
- Object* receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
+ ObjPtr<mirror::Object> receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
- UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver, args, result);
+ UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver.Ptr(), args, result);
}
self->PopShadowFrame();
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 38ce851e8e..65cfade09a 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -19,6 +19,7 @@
#include "base/mutex.h"
#include "dex_file.h"
+#include "obj_ptr.h"
namespace art {
namespace mirror {
@@ -36,7 +37,9 @@ namespace interpreter {
// The optional stay_in_interpreter parameter (false by default) can be used by clients to
// explicitly force interpretation in the remaining path that implements method invocation.
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
- mirror::Object* receiver, uint32_t* args, JValue* result,
+ ObjPtr<mirror::Object> receiver,
+ uint32_t* args,
+ JValue* result,
bool stay_in_interpreter = false)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index b71236b511..1ed3d550b9 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -33,6 +33,7 @@
#include "stack.h"
#include "unstarted_runtime.h"
#include "verifier/method_verifier.h"
+#include "well_known_classes.h"
namespace art {
namespace interpreter {
@@ -53,7 +54,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
CHECK(self->IsExceptionPending());
return false;
}
- ObjPtr<Object> obj;
+ ObjPtr<mirror::Object> obj;
if (is_static) {
obj = f->GetDeclaringClass();
} else {
@@ -70,7 +71,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
StackHandleScope<1> hs(self);
// Wrap in handle wrapper in case the listener does thread suspension.
HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
- ObjPtr<Object> this_object;
+ ObjPtr<mirror::Object> this_object;
if (!f->IsStatic()) {
this_object = obj;
}
@@ -146,7 +147,7 @@ EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticObjectRead, Primitive::kPrimNot)
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type>
bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
if (UNLIKELY(obj == nullptr)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
@@ -162,8 +163,14 @@ bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t in
field_offset.Uint32Value());
DCHECK(f != nullptr);
DCHECK(!f->IsStatic());
- instrumentation->FieldReadEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
- shadow_frame.GetDexPC(), f);
+ StackHandleScope<1> hs(Thread::Current());
+ // Save obj in case the instrumentation event has thread suspension.
+ HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
+ instrumentation->FieldReadEvent(Thread::Current(),
+ obj.Ptr(),
+ shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(),
+ f);
}
// Note: iget-x-quick instructions are only for non-volatile fields.
const uint32_t vregA = inst->VRegA_22c(inst_data);
@@ -257,7 +264,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
CHECK(self->IsExceptionPending());
return false;
}
- ObjPtr<Object> obj;
+ ObjPtr<mirror::Object> obj;
if (is_static) {
obj = f->GetDeclaringClass();
} else {
@@ -277,7 +284,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
// Wrap in handle wrapper in case the listener does thread suspension.
HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
- ObjPtr<Object> this_object = f->IsStatic() ? nullptr : obj;
+ ObjPtr<mirror::Object> this_object = f->IsStatic() ? nullptr : obj;
instrumentation->FieldWriteEvent(self, this_object.Ptr(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC(),
@@ -304,14 +311,14 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
f->SetLong<transaction_active>(obj, shadow_frame.GetVRegLong(vregA));
break;
case Primitive::kPrimNot: {
- Object* reg = shadow_frame.GetVRegReference(vregA);
+ ObjPtr<mirror::Object> reg = shadow_frame.GetVRegReference(vregA);
if (do_assignability_check && reg != nullptr) {
// FieldHelper::GetType can resolve classes, use a handle wrapper which will restore the
// object in the destructor.
- ObjPtr<Class> field_class;
+ ObjPtr<mirror::Class> field_class;
{
StackHandleScope<2> hs(self);
- HandleWrapper<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
+ HandleWrapperObjPtr<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
field_class = f->GetType<true>();
}
@@ -370,7 +377,7 @@ EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticObjectWrite, Primitive::kPrimNot)
template<Primitive::Type field_type, bool transaction_active>
bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
if (UNLIKELY(obj == nullptr)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
@@ -388,8 +395,15 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
DCHECK(f != nullptr);
DCHECK(!f->IsStatic());
JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
- instrumentation->FieldWriteEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
- shadow_frame.GetDexPC(), f, field_value);
+ StackHandleScope<1> hs(Thread::Current());
+ // Save obj in case the instrumentation event has thread suspension.
+ HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
+ instrumentation->FieldWriteEvent(Thread::Current(),
+ obj.Ptr(),
+ shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(),
+ f,
+ field_value);
}
// Note: iput-x-quick instructions are only for non-volatile fields.
switch (field_type) {
@@ -491,7 +505,12 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) {
Runtime::Current()->AbortTransactionAndThrowAbortError(self, abort_msg);
}
-// Separate declaration is required solely for the attributes.
+// START DECLARATIONS :
+//
+// These additional declarations are required because clang complains
+// about ALWAYS_INLINE (-Werror, -Wgcc-compat) in definitions.
+//
+
template <bool is_range, bool do_assignability_check>
REQUIRES_SHARED(Locks::mutator_lock_)
static inline bool DoCallCommon(ArtMethod* called_method,
@@ -502,7 +521,6 @@ static inline bool DoCallCommon(ArtMethod* called_method,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
uint32_t vregC) ALWAYS_INLINE;
-// Separate declaration is required solely for the attributes.
template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
static inline bool DoCallPolymorphic(ArtMethod* called_method,
Handle<mirror::MethodType> callsite_type,
@@ -513,6 +531,33 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
uint32_t vregC) ALWAYS_INLINE;
+REQUIRES_SHARED(Locks::mutator_lock_)
+static inline bool DoCallTransform(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandleImpl> receiver,
+ JValue* result) ALWAYS_INLINE;
+
+REQUIRES_SHARED(Locks::mutator_lock_)
+inline void PerformCall(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ArtMethod* caller_method,
+ const size_t first_dest_reg,
+ ShadowFrame* callee_frame,
+ JValue* result) ALWAYS_INLINE;
+
+template <bool is_range>
+REQUIRES_SHARED(Locks::mutator_lock_)
+inline void CopyRegisters(ShadowFrame& caller_frame,
+ ShadowFrame* callee_frame,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ const size_t first_src_reg,
+ const size_t first_dest_reg,
+ const size_t num_regs) ALWAYS_INLINE;
+
+// END DECLARATIONS.
+
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
const DexFile::CodeItem* code_item,
@@ -522,7 +567,7 @@ void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
- mirror::Class* declaringClass = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaringClass = method->GetDeclaringClass();
if (UNLIKELY(!declaringClass->IsInitialized())) {
self->PushShadowFrame(shadow_frame);
StackHandleScope<1> hs(self);
@@ -555,7 +600,7 @@ void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
uint16_t this_obj_vreg,
JValue result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- Object* existing = shadow_frame->GetVRegReference(this_obj_vreg);
+ ObjPtr<mirror::Object> existing = shadow_frame->GetVRegReference(this_obj_vreg);
if (existing == nullptr) {
// If it's null, we come from compiled code that was deoptimized. Nothing to do,
// as the compiler verified there was no alias.
@@ -576,10 +621,11 @@ void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
}
template<bool is_range, bool do_access_check>
- REQUIRES_SHARED(Locks::mutator_lock_)
-inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data,
- JValue* result) {
+inline bool DoInvokePolymorphic(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
// Invoke-polymorphic instructions always take a receiver. i.e, they are never static.
const uint32_t vRegC = (is_range) ? inst->VRegC_4rcc() : inst->VRegC_45cc();
@@ -593,7 +639,8 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
// that vRegC really is a reference type.
StackHandleScope<6> hs(self);
Handle<mirror::MethodHandleImpl> method_handle(hs.NewHandle(
- reinterpret_cast<mirror::MethodHandleImpl*>(shadow_frame.GetVRegReference(vRegC))));
+ ObjPtr<mirror::MethodHandleImpl>::DownCast(
+ MakeObjPtr(shadow_frame.GetVRegReference(vRegC)))));
if (UNLIKELY(method_handle.Get() == nullptr)) {
const int method_idx = (is_range) ? inst->VRegB_4rcc() : inst->VRegB_45cc();
// Note that the invoke type is kVirtual here because a call to a signature
@@ -635,12 +682,6 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
CHECK(called_method != nullptr);
CHECK(handle_type.Get() != nullptr);
- // We now have to massage the number of inputs to the target function.
- // It's always one less than the number of inputs to the signature polymorphic
- // invoke, the first input being a reference to the MethodHandle itself.
- const uint16_t number_of_inputs =
- ((is_range) ? inst->VRegA_4rcc(inst_data) : inst->VRegA_45cc(inst_data)) - 1;
-
uint32_t arg[Instruction::kMaxVarArgRegs] = {};
uint32_t receiver_vregC = 0;
if (is_range) {
@@ -657,8 +698,8 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
if (IsInvoke(handle_kind)) {
if (handle_kind == kInvokeVirtual || handle_kind == kInvokeInterface) {
- mirror::Object* receiver = shadow_frame.GetVRegReference(receiver_vregC);
- mirror::Class* declaring_class = called_method->GetDeclaringClass();
+ ObjPtr<mirror::Object> receiver = shadow_frame.GetVRegReference(receiver_vregC);
+ ObjPtr<mirror::Class> declaring_class = called_method->GetDeclaringClass();
// Verify that _vRegC is an object reference and of the type expected by
// the receiver.
called_method = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
@@ -678,15 +719,15 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
// constructor. It's a private method, and we've already access checked at
// the point of creating the handle.
} else if (handle_kind == kInvokeSuper) {
- mirror::Class* declaring_class = called_method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = called_method->GetDeclaringClass();
// Note that we're not dynamically dispatching on the type of the receiver
// here. We use the static type of the "receiver" object that we've
// recorded in the method handle's type, which will be the same as the
// special caller that was specified at the point of lookup.
- mirror::Class* referrer_class = handle_type->GetPTypes()->Get(0);
+ ObjPtr<mirror::Class> referrer_class = handle_type->GetPTypes()->Get(0);
if (!declaring_class->IsInterface()) {
- mirror::Class* super_class = referrer_class->GetSuperClass();
+ ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
uint16_t vtable_index = called_method->GetMethodIndex();
DCHECK(super_class != nullptr);
DCHECK(super_class->HasVTable());
@@ -702,18 +743,22 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
CHECK(called_method != nullptr);
}
- // NOTE: handle_kind == kInvokeStatic needs no special treatment here. We
- // can directly make the call. handle_kind == kInvokeSuper doesn't have any
- // particular use and can probably be dropped.
-
- if (callsite_type->IsExactMatch(handle_type.Get())) {
- return DoCallCommon<is_range, do_access_check>(
- called_method, self, shadow_frame, result, number_of_inputs,
- arg, receiver_vregC);
+ if (handle_kind == kInvokeTransform) {
+ return DoCallTransform(called_method,
+ callsite_type,
+ self,
+ shadow_frame,
+ method_handle /* receiver */,
+ result);
} else {
- return DoCallPolymorphic<is_range>(
- called_method, callsite_type, handle_type, self, shadow_frame,
- result, arg, receiver_vregC);
+ return DoCallPolymorphic<is_range>(called_method,
+ callsite_type,
+ handle_type,
+ self,
+ shadow_frame,
+ result,
+ arg,
+ receiver_vregC);
}
} else {
// TODO(narayan): Implement field getters and setters.
@@ -749,6 +794,64 @@ static inline size_t GetInsForProxyOrNativeMethod(ArtMethod* method)
return num_ins;
}
+
+inline void PerformCall(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ArtMethod* caller_method,
+ const size_t first_dest_reg,
+ ShadowFrame* callee_frame,
+ JValue* result) {
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ ArtMethod* target = callee_frame->GetMethod();
+ if (ClassLinker::ShouldUseInterpreterEntrypoint(
+ target,
+ target->GetEntryPointFromQuickCompiledCode())) {
+ ArtInterpreterToInterpreterBridge(self, code_item, callee_frame, result);
+ } else {
+ ArtInterpreterToCompiledCodeBridge(
+ self, caller_method, code_item, callee_frame, result);
+ }
+ } else {
+ UnstartedRuntime::Invoke(self, code_item, callee_frame, result, first_dest_reg);
+ }
+}
+
+template <bool is_range>
+inline void CopyRegisters(ShadowFrame& caller_frame,
+ ShadowFrame* callee_frame,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ const size_t first_src_reg,
+ const size_t first_dest_reg,
+ const size_t num_regs) {
+ if (is_range) {
+ const size_t dest_reg_bound = first_dest_reg + num_regs;
+ for (size_t src_reg = first_src_reg, dest_reg = first_dest_reg; dest_reg < dest_reg_bound;
+ ++dest_reg, ++src_reg) {
+ AssignRegister(callee_frame, caller_frame, dest_reg, src_reg);
+ }
+ } else {
+ DCHECK_LE(num_regs, arraysize(arg));
+
+ for (size_t arg_index = 0; arg_index < num_regs; ++arg_index) {
+ AssignRegister(callee_frame, caller_frame, first_dest_reg + arg_index, arg[arg_index]);
+ }
+ }
+}
+
+// Returns true iff. the callsite type for a polymorphic invoke is transformer
+// like, i.e that it has a single input argument whose type is
+// dalvik.system.EmulatedStackFrame.
+static inline bool IsCallerTransformer(Handle<mirror::MethodType> callsite_type)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::ObjectArray<mirror::Class>> param_types(callsite_type->GetPTypes());
+ if (param_types->GetLength() == 1) {
+ ObjPtr<mirror::Class> param(param_types->GetWithoutChecks(0));
+ return param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame);
+ }
+
+ return false;
+}
+
template <bool is_range>
static inline bool DoCallPolymorphic(ArtMethod* called_method,
Handle<mirror::MethodType> callsite_type,
@@ -757,7 +860,7 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
ShadowFrame& shadow_frame,
JValue* result,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) {
+ uint32_t first_src_reg) {
// TODO(narayan): Wire in the String.init hacks.
// Compute method information.
@@ -770,16 +873,18 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
// some transformations (such as boxing a long -> Long or wideining an
// int -> long will change that number.
uint16_t num_regs;
+ size_t num_input_regs;
size_t first_dest_reg;
if (LIKELY(code_item != nullptr)) {
num_regs = code_item->registers_size_;
first_dest_reg = num_regs - code_item->ins_size_;
+ num_input_regs = code_item->ins_size_;
// Parameter registers go at the end of the shadow frame.
DCHECK_NE(first_dest_reg, (size_t)-1);
} else {
// No local regs for proxy and native methods.
DCHECK(called_method->IsNative() || called_method->IsProxyMethod());
- num_regs = GetInsForProxyOrNativeMethod(called_method);
+ num_regs = num_input_regs = GetInsForProxyOrNativeMethod(called_method);
first_dest_reg = 0;
}
@@ -793,35 +898,109 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
{
ScopedStackedShadowFramePusher pusher(
self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
- if (!PerformArgumentConversions<is_range>(self, callsite_type, target_type,
- shadow_frame, vregC, first_dest_reg,
- arg, new_shadow_frame, result)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
- }
-
- // Do the call now.
- if (LIKELY(Runtime::Current()->IsStarted())) {
- ArtMethod* target = new_shadow_frame->GetMethod();
- if (ClassLinker::ShouldUseInterpreterEntrypoint(
- target,
- target->GetEntryPointFromQuickCompiledCode())) {
- ArtInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
+ if (callsite_type->IsExactMatch(target_type.Get())) {
+ // This is an exact invoke, we can take the fast path of just copying all
+ // registers without performing any argument conversions.
+ CopyRegisters<is_range>(shadow_frame,
+ new_shadow_frame,
+ arg,
+ first_src_reg,
+ first_dest_reg,
+ num_input_regs);
} else {
- ArtInterpreterToCompiledCodeBridge(
- self, shadow_frame.GetMethod(), code_item, new_shadow_frame, result);
+ // This includes the case where we're entering this invoke-polymorphic
+ // from a transformer method. In that case, the callsite_type will contain
+ // a single argument of type dalvik.system.EmulatedStackFrame. In that
+ // case, we'll have to unmarshal the EmulatedStackFrame into the
+ // new_shadow_frame and perform argument conversions on it.
+ if (IsCallerTransformer(callsite_type)) {
+ // The emulated stack frame will be the first ahnd only argument
+ // when we're coming through from a transformer.
+ //
+ // TODO(narayan): This should be a mirror::EmulatedStackFrame after that
+ // type is introduced.
+ ObjPtr<mirror::Object> emulated_stack_frame(
+ shadow_frame.GetVRegReference(first_src_reg));
+ if (!ConvertAndCopyArgumentsFromEmulatedStackFrame<is_range>(self,
+ emulated_stack_frame,
+ target_type,
+ first_dest_reg,
+ new_shadow_frame)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
+ } else if (!ConvertAndCopyArgumentsFromCallerFrame<is_range>(self,
+ callsite_type,
+ target_type,
+ shadow_frame,
+ first_src_reg,
+ first_dest_reg,
+ arg,
+ new_shadow_frame)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
}
- } else {
- UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg);
}
+ PerformCall(self, code_item, shadow_frame.GetMethod(), first_dest_reg, new_shadow_frame, result);
+
// TODO(narayan): Perform return value conversions.
return !self->IsExceptionPending();
}
+static inline bool DoCallTransform(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandleImpl> receiver,
+ JValue* result) {
+ // This can be fixed, because the method we're calling here
+ // (MethodHandle.transformInternal) doesn't have any locals and the signature
+ // is known :
+ //
+ // private MethodHandle.transformInternal(EmulatedStackFrame sf);
+ //
+ // This means we need only two vregs :
+ // - One for the receiver object.
+ // - One for the only method argument (an EmulatedStackFrame).
+ static constexpr size_t kNumRegsForTransform = 2;
+
+ const DexFile::CodeItem* code_item = called_method->GetCodeItem();
+ DCHECK(code_item != nullptr);
+ DCHECK_EQ(kNumRegsForTransform, code_item->registers_size_);
+ DCHECK_EQ(kNumRegsForTransform, code_item->ins_size_);
+
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(kNumRegsForTransform, &shadow_frame, called_method, /* dex pc */ 0);
+ ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
+
+ // TODO(narayan): Perform argument conversions first (if this is an inexact invoke), and
+ // then construct an argument list object that's passed through to the
+ // method. Note that the ArgumentList reference is currently a nullptr.
+ //
+ // NOTE(narayan): If the caller is a transformer method (i.e, there is only
+ // one argument and its type is EmulatedStackFrame), we can directly pass that
+ // through without having to do any additional work.
+ UNUSED(callsite_type);
+
+ new_shadow_frame->SetVRegReference(0, receiver.Get());
+ // TODO(narayan): This is the EmulatedStackFrame, currently nullptr.
+ new_shadow_frame->SetVRegReference(1, nullptr);
+
+ PerformCall(self,
+ code_item,
+ shadow_frame.GetMethod(),
+ 0 /* first dest reg */,
+ new_shadow_frame,
+ result);
+
+ return !self->IsExceptionPending();
+}
+
template <bool is_range,
bool do_assignability_check>
static inline bool DoCallCommon(ArtMethod* called_method,
@@ -940,15 +1119,22 @@ static inline bool DoCallCommon(ArtMethod* called_method,
switch (shorty[shorty_pos + 1]) {
// Handle Object references. 1 virtual register slot.
case 'L': {
- Object* o = shadow_frame.GetVRegReference(src_reg);
+ ObjPtr<mirror::Object> o = shadow_frame.GetVRegReference(src_reg);
if (do_assignability_check && o != nullptr) {
PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- Class* arg_type =
- method->GetClassFromTypeIndex(
- params->GetTypeItem(shorty_pos).type_idx_, true /* resolve */, pointer_size);
+ const uint32_t type_idx = params->GetTypeItem(shorty_pos).type_idx_;
+ ObjPtr<mirror::Class> arg_type = method->GetDexCacheResolvedType(type_idx,
+ pointer_size);
if (arg_type == nullptr) {
- CHECK(self->IsExceptionPending());
- return false;
+ StackHandleScope<1> hs(self);
+ // Preserve o since it is used below and GetClassFromTypeIndex may cause thread
+ // suspension.
+ HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&o);
+ arg_type = method->GetClassFromTypeIndex(type_idx, true /* resolve */, pointer_size);
+ if (arg_type == nullptr) {
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
}
if (!o->VerifierInstanceOf(arg_type)) {
// This should never happen.
@@ -961,7 +1147,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
return false;
}
}
- new_shadow_frame->SetVRegReference(dest_reg, o);
+ new_shadow_frame->SetVRegReference(dest_reg, o.Ptr());
break;
}
// Handle doubles and longs. 2 consecutive virtual register slots.
@@ -982,40 +1168,20 @@ static inline bool DoCallCommon(ArtMethod* called_method,
}
}
} else {
- size_t arg_index = 0;
-
- // Fast path: no extra checks.
if (is_range) {
- uint16_t first_src_reg = vregC;
-
- for (size_t src_reg = first_src_reg, dest_reg = first_dest_reg; dest_reg < num_regs;
- ++dest_reg, ++src_reg) {
- AssignRegister(new_shadow_frame, shadow_frame, dest_reg, src_reg);
- }
- } else {
- DCHECK_LE(number_of_inputs, arraysize(arg));
-
- for (; arg_index < number_of_inputs; ++arg_index) {
- AssignRegister(new_shadow_frame, shadow_frame, first_dest_reg + arg_index, arg[arg_index]);
- }
+ DCHECK_EQ(num_regs, first_dest_reg + number_of_inputs);
}
+
+ CopyRegisters<is_range>(shadow_frame,
+ new_shadow_frame,
+ arg,
+ vregC,
+ first_dest_reg,
+ number_of_inputs);
self->EndAssertNoThreadSuspension(old_cause);
}
- // Do the call now.
- if (LIKELY(Runtime::Current()->IsStarted())) {
- ArtMethod* target = new_shadow_frame->GetMethod();
- if (ClassLinker::ShouldUseInterpreterEntrypoint(
- target,
- target->GetEntryPointFromQuickCompiledCode())) {
- ArtInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
- } else {
- ArtInterpreterToCompiledCodeBridge(
- self, shadow_frame.GetMethod(), code_item, new_shadow_frame, result);
- }
- } else {
- UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg);
- }
+ PerformCall(self, code_item, shadow_frame.GetMethod(), first_dest_reg, new_shadow_frame, result);
if (string_init && !self->IsExceptionPending()) {
SetStringInitValueToAllAliases(&shadow_frame, string_init_vreg_this, *result);
@@ -1048,8 +1214,10 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
}
template <bool is_range, bool do_access_check, bool transaction_active>
-bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
- Thread* self, JValue* result) {
+bool DoFilledNewArray(const Instruction* inst,
+ const ShadowFrame& shadow_frame,
+ Thread* self,
+ JValue* result) {
DCHECK(inst->Opcode() == Instruction::FILLED_NEW_ARRAY ||
inst->Opcode() == Instruction::FILLED_NEW_ARRAY_RANGE);
const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
@@ -1062,14 +1230,17 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
return false;
}
uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- Class* array_class = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
- self, false, do_access_check);
+ ObjPtr<mirror::Class> array_class = ResolveVerifyAndClinit(type_idx,
+ shadow_frame.GetMethod(),
+ self,
+ false,
+ do_access_check);
if (UNLIKELY(array_class == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
CHECK(array_class->IsArrayClass());
- Class* component_class = array_class->GetComponentType();
+ ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
const bool is_primitive_int_component = component_class->IsPrimitiveInt();
if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
@@ -1082,9 +1253,12 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
}
return false;
}
- Object* new_array = Array::Alloc<true>(self, array_class, length,
- array_class->GetComponentSizeShift(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ ObjPtr<mirror::Object> new_array = mirror::Array::Alloc<true>(
+ self,
+ array_class,
+ length,
+ array_class->GetComponentSizeShift(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
if (UNLIKELY(new_array == nullptr)) {
self->AssertPendingOOMException();
return false;
@@ -1102,7 +1276,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
new_array->AsIntArray()->SetWithoutChecks<transaction_active>(
i, shadow_frame.GetVReg(src_reg));
} else {
- new_array->AsObjectArray<Object>()->SetWithoutChecks<transaction_active>(
+ new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks<transaction_active>(
i, shadow_frame.GetVRegReference(src_reg));
}
}
@@ -1111,17 +1285,18 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
return true;
}
-// TODO fix thread analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
+// TODO: Use ObjPtr here.
template<typename T>
-static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
- NO_THREAD_SAFETY_ANALYSIS {
+static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array,
+ int32_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
for (int32_t i = 0; i < count; ++i) {
runtime->RecordWriteArray(array, i, array->GetWithoutChecks(i));
}
}
-void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
+void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsActiveTransaction());
DCHECK(array != nullptr);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 40d6f036a2..9c26d24ab1 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -43,25 +43,11 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "obj_ptr.h"
#include "stack.h"
#include "thread.h"
#include "well_known_classes.h"
-using ::art::ArtMethod;
-using ::art::mirror::Array;
-using ::art::mirror::BooleanArray;
-using ::art::mirror::ByteArray;
-using ::art::mirror::CharArray;
-using ::art::mirror::Class;
-using ::art::mirror::ClassLoader;
-using ::art::mirror::IntArray;
-using ::art::mirror::LongArray;
-using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
-using ::art::mirror::ShortArray;
-using ::art::mirror::String;
-using ::art::mirror::Throwable;
-
namespace art {
namespace interpreter {
@@ -69,13 +55,11 @@ void ThrowNullPointerExceptionFromInterpreter()
REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kMonitorCounting>
-static inline void DoMonitorEnter(Thread* self,
- ShadowFrame* frame,
- Object* ref)
+static inline void DoMonitorEnter(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_) {
StackHandleScope<1> hs(self);
- Handle<Object> h_ref(hs.NewHandle(ref));
+ Handle<mirror::Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorEnter(self);
if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
frame->GetLockCountData().AddMonitor(self, h_ref.Get());
@@ -83,13 +67,11 @@ static inline void DoMonitorEnter(Thread* self,
}
template <bool kMonitorCounting>
-static inline void DoMonitorExit(Thread* self,
- ShadowFrame* frame,
- Object* ref)
+static inline void DoMonitorExit(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_) {
StackHandleScope<1> hs(self);
- Handle<Object> h_ref(hs.NewHandle(ref));
+ Handle<mirror::Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorExit(self);
if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
@@ -113,7 +95,7 @@ void AbortTransactionF(Thread* self, const char* fmt, ...)
void AbortTransactionV(Thread* self, const char* fmt, va_list args)
REQUIRES_SHARED(Locks::mutator_lock_);
-void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
+void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
@@ -126,11 +108,14 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
template<InvokeType type, bool is_range, bool do_access_check>
-static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data, JValue* result) {
+static inline bool DoInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) {
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
+ ObjPtr<mirror::Object> receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* sf_method = shadow_frame.GetMethod();
ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
method_idx, &receiver, sf_method, self);
@@ -156,7 +141,7 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
instrumentation->InvokeVirtualOrInterface(
- self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ self, receiver.Ptr(), sf_method, shadow_frame.GetDexPC(), called_method);
}
}
return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
@@ -177,7 +162,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data,
JValue* result) {
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* const receiver = shadow_frame.GetVRegReference(vregC);
+ ObjPtr<mirror::Object> const receiver = shadow_frame.GetVRegReference(vregC);
if (UNLIKELY(receiver == nullptr)) {
// We lost the reference to the method index so we cannot get a more
// precised exception message.
@@ -190,7 +175,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
CHECK(receiver->GetClass() != nullptr)
<< "Null class found in object " << receiver << " in region type "
<< Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
- RegionSpace()->GetRegionType(receiver);
+ RegionSpace()->GetRegionType(receiver.Ptr());
}
CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
@@ -214,7 +199,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
// TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
instrumentation->InvokeVirtualOrInterface(
- self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
+ self, receiver.Ptr(), shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
}
// No need to check since we've been quickened.
return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
@@ -249,9 +234,11 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
// java.lang.String class is initialized.
-static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
+static inline ObjPtr<mirror::String> ResolveString(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t string_idx)
REQUIRES_SHARED(Locks::mutator_lock_) {
- Class* java_lang_string_class = String::GetJavaLangString();
+ ObjPtr<mirror::Class> java_lang_string_class = mirror::String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
StackHandleScope<1> hs(self);
@@ -262,11 +249,11 @@ static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uin
}
}
ArtMethod* method = shadow_frame.GetMethod();
- mirror::Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
DCHECK_LT(string_idx % mirror::DexCache::kDexCacheStringCacheSize,
declaring_class->GetDexFile().NumStringIds());
- mirror::String* string_ptr =
+ ObjPtr<mirror::String> string_ptr =
mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
string_idx,
mirror::DexCache::kDexCacheStringCacheSize).Read();
@@ -318,8 +305,10 @@ static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
// Handles div-long and div-long-2addr instructions.
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
-static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
- int64_t dividend, int64_t divisor)
+static inline bool DoLongDivide(ShadowFrame& shadow_frame,
+ size_t result_reg,
+ int64_t dividend,
+ int64_t divisor)
REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
@@ -336,8 +325,10 @@ static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
// Handles rem-long and rem-long-2addr instructions.
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
-static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
- int64_t dividend, int64_t divisor)
+static inline bool DoLongRemainder(ShadowFrame& shadow_frame,
+ size_t result_reg,
+ int64_t dividend,
+ int64_t divisor)
REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
@@ -443,7 +434,7 @@ static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruc
<< inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\n";
for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
uint32_t raw_value = shadow_frame.GetVReg(i);
- Object* ref_value = shadow_frame.GetVRegReference(i);
+ ObjPtr<mirror::Object> ref_value = shadow_frame.GetVRegReference(i);
oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
if (ref_value != nullptr) {
if (ref_value->GetClass()->IsStringClass() &&
@@ -469,13 +460,13 @@ static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFra
REQUIRES_SHARED(Locks::mutator_lock_) {
// Uint required, so that sign extension does not make this wrong on 64b systems
uint32_t src_value = shadow_frame.GetVReg(src_reg);
- mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
+ ObjPtr<mirror::Object> o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
// If both register locations contains the same value, the register probably holds a reference.
// Note: As an optimization, non-moving collectors leave a stale reference value
// in the references array even after the original vreg was overwritten to a non-reference.
- if (src_value == reinterpret_cast<uintptr_t>(o)) {
- new_shadow_frame->SetVRegReference(dest_reg, o);
+ if (src_value == reinterpret_cast<uintptr_t>(o.Ptr())) {
+ new_shadow_frame->SetVRegReference(dest_reg, o.Ptr());
} else {
new_shadow_frame->SetVReg(dest_reg, src_value);
}
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 90d9f89d67..1be20fab25 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -21,6 +21,7 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "jvalue.h"
+#include "obj_ptr.h"
namespace art {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 78afe56104..43bc9bd162 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -192,9 +192,9 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
case Instruction::MOVE_EXCEPTION: {
PREAMBLE();
- Throwable* exception = self->GetException();
+ ObjPtr<mirror::Throwable> exception = self->GetException();
DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
- shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception.Ptr());
self->ClearException();
inst = inst->Next_1xx();
break;
@@ -273,11 +273,11 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
const size_t ref_idx = inst->VRegA_11x(inst_data);
- Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
+ ObjPtr<mirror::Object> obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- Class* return_type = shadow_frame.GetMethod()->GetReturnType(true /* resolve */,
- pointer_size);
+ ObjPtr<mirror::Class> return_type = method->GetReturnType(true /* resolve */,
+ pointer_size);
// Re-load since it might have moved.
obj_result = shadow_frame.GetVRegReference(ref_idx);
if (return_type == nullptr) {
@@ -373,41 +373,44 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
case Instruction::CONST_STRING: {
PREAMBLE();
- String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
+ ObjPtr<mirror::String> s = ResolveString(self, shadow_frame, inst->VRegB_21c());
if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s.Ptr());
inst = inst->Next_2xx();
}
break;
}
case Instruction::CONST_STRING_JUMBO: {
PREAMBLE();
- String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
+ ObjPtr<mirror::String> s = ResolveString(self, shadow_frame, inst->VRegB_31c());
if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
- shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+ shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s.Ptr());
inst = inst->Next_3xx();
}
break;
}
case Instruction::CONST_CLASS: {
PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+ shadow_frame.GetMethod(),
+ self,
+ false,
+ do_access_check);
if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c.Ptr());
inst = inst->Next_2xx();
}
break;
}
case Instruction::MONITOR_ENTER: {
PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -419,7 +422,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::MONITOR_EXIT: {
PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -431,12 +434,15 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::CHECK_CAST: {
PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+ shadow_frame.GetMethod(),
+ self,
+ false,
+ do_access_check);
if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
@@ -448,12 +454,15 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::INSTANCE_OF: {
PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegC_22c(),
+ shadow_frame.GetMethod(),
+ self,
+ false,
+ do_access_check);
if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
(obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
inst = inst->Next_2xx();
@@ -462,7 +471,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::ARRAY_LENGTH: {
PREAMBLE();
- Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+ ObjPtr<mirror::Object> array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -474,9 +483,12 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::NEW_INSTANCE: {
PREAMBLE();
- Object* obj = nullptr;
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
+ ObjPtr<mirror::Object> obj = nullptr;
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+ shadow_frame.GetMethod(),
+ self,
+ false,
+ do_access_check);
if (LIKELY(c != nullptr)) {
if (UNLIKELY(c->IsStringClass())) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -499,7 +511,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
HANDLE_PENDING_EXCEPTION();
break;
}
- shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj.Ptr());
inst = inst->Next_2xx();
}
break;
@@ -507,13 +519,13 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::NEW_ARRAY: {
PREAMBLE();
int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
- Object* obj = AllocArrayFromCode<do_access_check, true>(
+ ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
- shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj.Ptr());
inst = inst->Next_2xx();
}
break;
@@ -539,7 +551,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+ ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
bool success = FillArrayData(obj, payload);
if (!success) {
HANDLE_PENDING_EXCEPTION();
@@ -553,7 +565,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::THROW: {
PREAMBLE();
- Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ ObjPtr<mirror::Object> exception =
+ shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
@@ -911,14 +924,14 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET_BOOLEAN: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
+ ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -929,14 +942,14 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET_BYTE: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
+ ObjPtr<mirror::ByteArray> array = a->AsByteArray();
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -947,14 +960,14 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET_CHAR: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
+ ObjPtr<mirror::CharArray> array = a->AsCharArray();
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -965,14 +978,14 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET_SHORT: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
+ ObjPtr<mirror::ShortArray> array = a->AsShortArray();
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -983,7 +996,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -991,7 +1004,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
- auto* array = down_cast<IntArray*>(a);
+ ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -1002,7 +1015,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET_WIDE: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1010,7 +1023,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
- auto* array = down_cast<LongArray*>(a);
+ ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -1021,14 +1034,14 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::AGET_OBJECT: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -1039,7 +1052,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT_BOOLEAN: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1047,7 +1060,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
+ ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1058,7 +1071,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT_BYTE: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1066,7 +1079,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
+ ObjPtr<mirror::ByteArray> array = a->AsByteArray();
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1077,7 +1090,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT_CHAR: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1085,7 +1098,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
+ ObjPtr<mirror::CharArray> array = a->AsCharArray();
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1096,7 +1109,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT_SHORT: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1104,7 +1117,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
+ ObjPtr<mirror::ShortArray> array = a->AsShortArray();
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1115,7 +1128,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1124,7 +1137,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
- auto* array = down_cast<IntArray*>(a);
+ ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1135,7 +1148,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT_WIDE: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
@@ -1144,7 +1157,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
- LongArray* array = down_cast<LongArray*>(a);
+ ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1155,15 +1168,15 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::APUT_OBJECT: {
PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ ObjPtr<mirror::Object> val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index d0c9386bd9..267df2e219 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -21,6 +21,7 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "jvalue.h"
+#include "obj_ptr.h"
namespace art {
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index cf8d4bd1b5..46d5af179f 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -291,11 +291,11 @@ extern "C" size_t MterpConstString(uint32_t index,
ShadowFrame* shadow_frame,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- String* s = ResolveString(self, *shadow_frame, index);
+ ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, index);
if (UNLIKELY(s == nullptr)) {
return true;
}
- shadow_frame->SetVRegReference(tgt_vreg, s);
+ shadow_frame->SetVRegReference(tgt_vreg, s.Ptr());
return false;
}
@@ -304,7 +304,7 @@ extern "C" size_t MterpConstClass(uint32_t index,
ShadowFrame* shadow_frame,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
+ mirror::Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
}
@@ -317,12 +317,12 @@ extern "C" size_t MterpCheckCast(uint32_t index,
art::ArtMethod* method,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
}
// Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
- Object* obj = vreg_addr->AsMirrorPtr();
+ mirror::Object* obj = vreg_addr->AsMirrorPtr();
if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
return true;
@@ -335,16 +335,16 @@ extern "C" size_t MterpInstanceOf(uint32_t index,
art::ArtMethod* method,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return false; // Caller will check for pending exception. Return value unimportant.
}
// Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
- Object* obj = vreg_addr->AsMirrorPtr();
+ mirror::Object* obj = vreg_addr->AsMirrorPtr();
return (obj != nullptr) && obj->InstanceOf(c);
}
-extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
+extern "C" size_t MterpFillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
REQUIRES_SHARED(Locks::mutator_lock_) {
return FillArrayData(obj, payload);
}
@@ -352,9 +352,12 @@ extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPa
extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- Object* obj = nullptr;
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
- self, false, false);
+ mirror::Object* obj = nullptr;
+ mirror::Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+ shadow_frame->GetMethod(),
+ self,
+ false,
+ false);
if (LIKELY(c != nullptr)) {
if (UNLIKELY(c->IsStringClass())) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -404,13 +407,13 @@ extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
uint32_t inst_data)
REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
- Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
+ mirror::Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
return false;
}
int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
- Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ mirror::Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
+ mirror::ObjectArray<mirror::Object>* array = a->AsObjectArray<mirror::Object>();
if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
array->SetWithoutChecks<false>(index, val);
return true;
@@ -442,7 +445,7 @@ extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
- Object* obj = AllocArrayFromCode<false, true>(
+ mirror::Object* obj = AllocArrayFromCode<false, true>(
inst->VRegC_22c(), length, shadow_frame->GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
if (UNLIKELY(obj == nullptr)) {
@@ -678,7 +681,7 @@ extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t i
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
}
- ObjectArray<Object>* array = arr->AsObjectArray<Object>();
+ mirror::ObjectArray<mirror::Object>* array = arr->AsObjectArray<mirror::Object>();
if (LIKELY(array->CheckIsValidIndex(index))) {
return array->GetWithoutChecks(index);
} else {
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 5a62bd77d5..75b91b1885 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -564,7 +564,7 @@ void UnstartedRuntime::UnstartedClassLoaderGetResourceAsStream(
this_classloader_class.Get()) {
AbortTransactionOrFail(self,
"Unsupported classloader type %s for getResourceAsStream",
- Class::PrettyClass(this_classloader_class.Get()).c_str());
+ mirror::Class::PrettyClass(this_classloader_class.Get()).c_str());
return;
}
}
@@ -608,10 +608,11 @@ static void PrimitiveArrayCopy(Thread* self,
int32_t length)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
- AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
- Class::PrettyDescriptor(
+ AbortTransactionOrFail(self,
+ "Types mismatched in arraycopy: %s vs %s.",
+ mirror::Class::PrettyDescriptor(
src_array->GetClass()->GetComponentType()).c_str(),
- Class::PrettyDescriptor(
+ mirror::Class::PrettyDescriptor(
dst_array->GetClass()->GetComponentType()).c_str());
return;
}
@@ -677,9 +678,9 @@ void UnstartedRuntime::UnstartedSystemArraycopy(
GetComponentType();
if (trg_type->IsPrimitiveInt()) {
AbortTransactionOrFail(self, "Type mismatch in arraycopy: %s vs %s",
- Class::PrettyDescriptor(
+ mirror::Class::PrettyDescriptor(
src_array->GetClass()->GetComponentType()).c_str(),
- Class::PrettyDescriptor(
+ mirror::Class::PrettyDescriptor(
dst_array->GetClass()->GetComponentType()).c_str());
return;
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 9b4327f137..8e76aeb7cd 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -54,10 +54,10 @@ bool JavaVMExt::IsBadJniVersion(int version) {
class SharedLibrary {
public:
SharedLibrary(JNIEnv* env, Thread* self, const std::string& path, void* handle,
- jobject class_loader, void* class_loader_allocator)
+ bool needs_native_bridge, jobject class_loader, void* class_loader_allocator)
: path_(path),
handle_(handle),
- needs_native_bridge_(false),
+ needs_native_bridge_(needs_native_bridge),
class_loader_(env->NewWeakGlobalRef(class_loader)),
class_loader_allocator_(class_loader_allocator),
jni_on_load_lock_("JNI_OnLoad lock"),
@@ -73,9 +73,7 @@ class SharedLibrary {
self->GetJniEnv()->DeleteWeakGlobalRef(class_loader_);
}
- if (!needs_native_bridge_) {
- android::CloseNativeLibrary(handle_);
- }
+ android::CloseNativeLibrary(handle_, needs_native_bridge_);
}
jweak GetClassLoader() const {
@@ -131,8 +129,8 @@ class SharedLibrary {
jni_on_load_cond_.Broadcast(self);
}
- void SetNeedsNativeBridge() {
- needs_native_bridge_ = true;
+ void SetNeedsNativeBridge(bool needs) {
+ needs_native_bridge_ = needs;
}
bool NeedsNativeBridge() const {
@@ -422,10 +420,13 @@ JavaVMExt::JavaVMExt(Runtime* runtime,
tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
|| VLOG_IS_ON(third_party_jni)),
trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
- globals_(kGlobalsMax, kGlobal, error_msg),
+ globals_(kGlobalsMax, kGlobal, IndirectReferenceTable::ResizableCapacity::kNo, error_msg),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_(kWeakGlobalsMax, kWeakGlobal, error_msg),
+ weak_globals_(kWeakGlobalsMax,
+ kWeakGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ error_msg),
allow_accessing_weak_globals_(true),
weak_globals_add_condition_("weak globals add condition",
(CHECK(Locks::jni_weak_globals_lock_ != nullptr),
@@ -551,7 +552,7 @@ jobject JavaVMExt::AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
return nullptr;
}
WriterMutexLock mu(self, *Locks::jni_globals_lock_);
- IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
+ IndirectRef ref = globals_.Add(kIRTFirstSegment, obj);
return reinterpret_cast<jobject>(ref);
}
@@ -563,7 +564,7 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
- IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+ IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
return reinterpret_cast<jweak>(ref);
}
@@ -572,7 +573,7 @@ void JavaVMExt::DeleteGlobalRef(Thread* self, jobject obj) {
return;
}
WriterMutexLock mu(self, *Locks::jni_globals_lock_);
- if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ if (!globals_.Remove(kIRTFirstSegment, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
<< "failed to find entry";
}
@@ -583,7 +584,7 @@ void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
return;
}
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
- if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ if (!weak_globals_.Remove(kIRTFirstSegment, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
}
@@ -680,7 +681,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref
// This only applies in the case where MayAccessWeakGlobals goes from false to true. In the other
// case, it may be racy, this is benign since DecodeWeakGlobalLocked does the correct behavior
// if MayAccessWeakGlobals is false.
- DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
return weak_globals_.SynchronizedGet(ref);
}
@@ -699,7 +700,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectR
}
ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref) {
- DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
DCHECK(Runtime::Current()->IsShuttingDown(self));
if (self != nullptr) {
return DecodeWeakGlobal(self, ref);
@@ -712,7 +713,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, I
}
bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
- DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
@@ -814,24 +815,18 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
Locks::mutator_lock_->AssertNotHeld(self);
const char* path_str = path.empty() ? nullptr : path.c_str();
+ bool needs_native_bridge = false;
void* handle = android::OpenNativeLibrary(env,
runtime_->GetTargetSdkVersion(),
path_str,
class_loader,
- library_path);
-
- bool needs_native_bridge = false;
- if (handle == nullptr) {
- if (android::NativeBridgeIsSupported(path_str)) {
- handle = android::NativeBridgeLoadLibrary(path_str, RTLD_NOW);
- needs_native_bridge = true;
- }
- }
+ library_path,
+ &needs_native_bridge,
+ error_msg);
VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_NOW) returned " << handle << "]";
if (handle == nullptr) {
- *error_msg = dlerror();
VLOG(jni) << "dlopen(\"" << path << "\", RTLD_NOW) failed: " << *error_msg;
return false;
}
@@ -847,7 +842,14 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
{
// Create SharedLibrary ahead of taking the libraries lock to maintain lock ordering.
std::unique_ptr<SharedLibrary> new_library(
- new SharedLibrary(env, self, path, handle, class_loader, class_loader_allocator));
+ new SharedLibrary(env,
+ self,
+ path,
+ handle,
+ needs_native_bridge,
+ class_loader,
+ class_loader_allocator));
+
MutexLock mu(self, *Locks::jni_libraries_lock_);
library = libraries_->Get(path);
if (library == nullptr) { // We won race to get libraries_lock.
@@ -864,11 +866,7 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
bool was_successful = false;
- void* sym;
- if (needs_native_bridge) {
- library->SetNeedsNativeBridge();
- }
- sym = library->FindSymbol("JNI_OnLoad", nullptr);
+ void* sym = library->FindSymbol("JNI_OnLoad", nullptr);
if (sym == nullptr) {
VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
was_successful = true;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 953b1c0540..4c1006360b 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -683,7 +683,7 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
}
}
-void Jit::InvokeVirtualOrInterface(mirror::Object* this_object,
+void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee ATTRIBUTE_UNUSED) {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index d3178b0b37..a7824378c2 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -22,9 +22,10 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
+#include "jit/profile_saver_options.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offline_profiling_info.h"
-#include "jit/profile_saver_options.h"
#include "thread_pool.h"
namespace art {
@@ -114,7 +115,7 @@ class Jit {
void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
REQUIRES_SHARED(Locks::mutator_lock_);
- void InvokeVirtualOrInterface(mirror::Object* this_object,
+ void InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 8eca8fcba9..342e0d2a53 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -68,8 +68,8 @@ JNIEnvExt* JNIEnvExt::Create(Thread* self_in, JavaVMExt* vm_in, std::string* err
JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in, std::string* error_msg)
: self(self_in),
vm(vm_in),
- local_ref_cookie(IRT_FIRST_SEGMENT),
- locals(kLocalsInitial, kLocal, error_msg),
+ local_ref_cookie(kIRTFirstSegment),
+ locals(kLocalsInitial, kLocal, IndirectReferenceTable::ResizableCapacity::kYes, error_msg),
check_jni(false),
runtime_deleted(false),
critical(0),
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index e89debbf90..5cca0aef9b 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -64,7 +64,7 @@ struct JNIEnvExt : public JNIEnv {
JavaVMExt* const vm;
// Cookie used when using the local indirect reference table.
- uint32_t local_ref_cookie;
+ IRTSegmentState local_ref_cookie;
// JNI local references.
IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
@@ -72,7 +72,7 @@ struct JNIEnvExt : public JNIEnv {
// Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
// TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
// to a native method.
- std::vector<uint32_t> stacked_local_ref_cookies;
+ std::vector<IRTSegmentState> stacked_local_ref_cookies;
// Frequently-accessed fields cached from JavaVM.
bool check_jni;
@@ -131,7 +131,7 @@ class ScopedJniEnvLocalRefState {
private:
JNIEnvExt* const env_;
- uint32_t saved_local_ref_cookie_;
+ IRTSegmentState saved_local_ref_cookie_;
DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
};
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 3839e087dc..0217a67559 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2374,7 +2374,7 @@ class JNI {
// Do we definitely know what kind of reference this is?
IndirectRef ref = reinterpret_cast<IndirectRef>(java_object);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
switch (kind) {
case kLocal:
return JNILocalRefType;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 9479a181c6..e9909352ec 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -2308,21 +2308,25 @@ TEST_F(JniInternalTest, IndirectReferenceTableOffsets) {
// by modifying memory.
// The parameters don't really matter here.
std::string error_msg;
- IndirectReferenceTable irt(5, IndirectRefKind::kGlobal, &error_msg);
+ IndirectReferenceTable irt(5,
+ IndirectRefKind::kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
ASSERT_TRUE(irt.IsValid()) << error_msg;
- uint32_t old_state = irt.GetSegmentState();
+ IRTSegmentState old_state = irt.GetSegmentState();
// Write some new state directly. We invert parts of old_state to ensure a new value.
- uint32_t new_state = old_state ^ 0x07705005;
- ASSERT_NE(old_state, new_state);
+ IRTSegmentState new_state;
+ new_state.top_index = old_state.top_index ^ 0x07705005;
+ ASSERT_NE(old_state.top_index, new_state.top_index);
uint8_t* base = reinterpret_cast<uint8_t*>(&irt);
int32_t segment_state_offset =
IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
- *reinterpret_cast<uint32_t*>(base + segment_state_offset) = new_state;
+ *reinterpret_cast<IRTSegmentState*>(base + segment_state_offset) = new_state;
// Read and compare.
- EXPECT_EQ(new_state, irt.GetSegmentState());
+ EXPECT_EQ(new_state.top_index, irt.GetSegmentState().top_index);
}
// Test the offset computation of JNIEnvExt offsets. b/26071368.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index bb07fcbf37..1ec59b3cc7 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -318,11 +318,18 @@ MemMap* MemMap::MapAnonymous(const char* name,
debug_friendly_name += name;
fd.Reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count),
/* check_usage */ false);
+
if (fd.Fd() == -1) {
- *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
- return nullptr;
+ // We failed to create the ashmem region. Print a warning, but continue
+ // anyway by creating a true anonymous mmap with an fd of -1. It is
+ // better to use an unlabelled anonymous map than to fail to create a
+ // map at all.
+ PLOG(WARNING) << "ashmem_create_region failed for '" << name << "'";
+ } else {
+ // We succeeded in creating the ashmem region. Use the created ashmem
+ // region as backing for the mmap.
+ flags &= ~MAP_ANONYMOUS;
}
- flags &= ~MAP_ANONYMOUS;
}
// We need to store and potentially set an error number for pretty printing of errors
@@ -354,7 +361,6 @@ MemMap* MemMap::MapAnonymous(const char* name,
}
return nullptr;
}
- std::ostringstream check_map_request_error_msg;
if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
@@ -441,7 +447,6 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
}
return nullptr;
}
- std::ostringstream check_map_request_error_msg;
if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
@@ -918,4 +923,23 @@ void MemMap::TryReadable() {
}
}
+void ZeroAndReleasePages(void* address, size_t length) {
+ uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
+ uint8_t* const mem_end = mem_begin + length;
+ uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
+ uint8_t* const page_end = AlignDown(mem_end, kPageSize);
+ if (!kMadviseZeroes || page_begin >= page_end) {
+ // No possible area to madvise.
+ std::fill(mem_begin, mem_end, 0);
+ } else {
+ // Spans one or more pages.
+ DCHECK_LE(mem_begin, page_begin);
+ DCHECK_LE(page_begin, page_end);
+ DCHECK_LE(page_end, mem_end);
+ std::fill(mem_begin, page_begin, 0);
+ CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
+ std::fill(page_end, mem_end, 0);
+ }
+}
+
} // namespace art
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 597f0d46e1..049ae12acf 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -241,9 +241,13 @@ class MemMap {
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
+
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps);
+// Zero and release pages if possible, no requirements on alignments.
+void ZeroAndReleasePages(void* address, size_t length);
+
} // namespace art
#endif // ART_RUNTIME_MEM_MAP_H_
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
index 5f9824c079..b488133259 100644
--- a/runtime/method_handles-inl.h
+++ b/runtime/method_handles-inl.h
@@ -97,16 +97,79 @@ template <bool is_range> class ArgIterator {
size_t arg_index_;
};
+REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertJValue(Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ const JValue& from_value,
+ JValue* to_value) {
+ const Primitive::Type from_type = from->GetPrimitiveType();
+ const Primitive::Type to_type = to->GetPrimitiveType();
+
+ // This method must be called only when the types don't match.
+ DCHECK(from.Get() != to.Get());
+
+ if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
+ // Throws a ClassCastException if we're unable to convert a primitive value.
+ return ConvertPrimitiveValue(false, from_type, to_type, from_value, to_value);
+ } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
+ // They're both reference types. If "from" is null, we can pass it
+ // through unchanged. If not, we must generate a cast exception if
+ // |to| is not assignable from the dynamic type of |ref|.
+ mirror::Object* const ref = from_value.GetL();
+ if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
+ to_value->SetL(ref);
+ return true;
+ } else {
+ ThrowClassCastException(to.Get(), ref->GetClass());
+ return false;
+ }
+ } else {
+ // Precisely one of the source or the destination are reference types.
+ // We must box or unbox.
+ if (to_type == Primitive::kPrimNot) {
+ // The target type is a reference, we must box.
+ Primitive::Type type;
+ // TODO(narayan): This is a CHECK for now. There might be a few corner cases
+ // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
+ // we will need to box this "naturally".
+ CHECK(GetPrimitiveType(to.Get(), &type));
+ // First perform a primitive conversion to the unboxed equivalent of the target,
+ // if necessary. This should be for the rarer cases like (int->Long) etc.
+ if (UNLIKELY(from_type != type)) {
+ if (!ConvertPrimitiveValue(false, from_type, type, from_value, to_value)) {
+ return false;
+ }
+ } else {
+ *to_value = from_value;
+ }
+
+ // Then perform the actual boxing, and then set the reference.
+ ObjPtr<mirror::Object> boxed = BoxPrimitive(type, from_value);
+ to_value->SetL(boxed.Ptr());
+ return true;
+ } else {
+ // The target type is a primitive, we must unbox.
+ ObjPtr<mirror::Object> ref(from_value.GetL());
+
+ // Note that UnboxPrimitiveForResult already performs all of the type
+ // conversions that we want, based on |to|.
+ JValue unboxed_value;
+ return UnboxPrimitiveForResult(ref, to.Get(), to_value);
+ }
+ }
+
+ return true;
+}
+
template <bool is_range>
-bool PerformArgumentConversions(Thread* self,
- Handle<mirror::MethodType> callsite_type,
- Handle<mirror::MethodType> callee_type,
- const ShadowFrame& caller_frame,
- uint32_t first_src_reg,
- uint32_t first_dest_reg,
- const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- ShadowFrame* callee_frame,
- JValue* result) {
+bool ConvertAndCopyArgumentsFromCallerFrame(Thread* self,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> callee_type,
+ const ShadowFrame& caller_frame,
+ uint32_t first_src_reg,
+ uint32_t first_dest_reg,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ ShadowFrame* callee_frame) {
StackHandleScope<4> hs(self);
Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(callsite_type->GetPTypes()));
Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
@@ -114,7 +177,6 @@ bool PerformArgumentConversions(Thread* self,
const int32_t num_method_params = from_types->GetLength();
if (to_types->GetLength() != num_method_params) {
ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
- result->SetJ(0);
return false;
}
@@ -149,105 +211,32 @@ bool PerformArgumentConversions(Thread* self,
}
continue;
- } else if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
- // They are both primitive types - we should perform any widening or
- // narrowing conversions as applicable.
+ } else {
JValue from_value;
JValue to_value;
if (Primitive::Is64BitType(from_type)) {
from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
+ } else if (from_type == Primitive::kPrimNot) {
+ from_value.SetL(caller_frame.GetVRegReference(input_args.Next()));
} else {
from_value.SetI(caller_frame.GetVReg(input_args.Next()));
}
- // Throws a ClassCastException if we're unable to convert a primitive value.
- if (!ConvertPrimitiveValue(false, from_type, to_type, from_value, &to_value)) {
+ if (!ConvertJValue(from, to, from_value, &to_value)) {
DCHECK(self->IsExceptionPending());
- result->SetL(0);
return false;
}
if (Primitive::Is64BitType(to_type)) {
callee_frame->SetVRegLong(first_dest_reg + to_arg_index, to_value.GetJ());
to_arg_index += 2;
- } else {
- callee_frame->SetVReg(first_dest_reg + to_arg_index, to_value.GetI());
- ++to_arg_index;
- }
- } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
- // They're both reference types. If "from" is null, we can pass it
- // through unchanged. If not, we must generate a cast exception if
- // |to| is not assignable from the dynamic type of |ref|.
- const size_t next_arg_reg = input_args.Next();
- mirror::Object* const ref = caller_frame.GetVRegReference(next_arg_reg);
- if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
- interpreter::AssignRegister(callee_frame,
- caller_frame,
- first_dest_reg + to_arg_index,
- next_arg_reg);
+ } else if (to_type == Primitive::kPrimNot) {
+ callee_frame->SetVRegReference(first_dest_reg + to_arg_index, to_value.GetL());
++to_arg_index;
} else {
- ThrowClassCastException(to.Get(), ref->GetClass());
- result->SetL(0);
- return false;
- }
- } else {
- // Precisely one of the source or the destination are reference types.
- // We must box or unbox.
- if (to_type == Primitive::kPrimNot) {
- // The target type is a reference, we must box.
- Primitive::Type type;
- // TODO(narayan): This is a CHECK for now. There might be a few corner cases
- // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
- // we will need to box this "naturally".
- CHECK(GetPrimitiveType(to.Get(), &type));
-
- JValue from_value;
- JValue to_value;
-
- if (Primitive::Is64BitType(from_type)) {
- from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
- } else {
- from_value.SetI(caller_frame.GetVReg(input_args.Next()));
- }
-
- // First perform a primitive conversion to the unboxed equivalent of the target,
- // if necessary. This should be for the rarer cases like (int->Long) etc.
- if (UNLIKELY(from_type != type)) {
- if (!ConvertPrimitiveValue(false, from_type, type, from_value, &to_value)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
- } else {
- to_value = from_value;
- }
-
- // Then perform the actual boxing, and then set the reference.
- ObjPtr<mirror::Object> boxed = BoxPrimitive(type, to_value);
- callee_frame->SetVRegReference(first_dest_reg + to_arg_index, boxed.Ptr());
+ callee_frame->SetVReg(first_dest_reg + to_arg_index, to_value.GetI());
++to_arg_index;
- } else {
- // The target type is a primitive, we must unbox.
- ObjPtr<mirror::Object> ref(caller_frame.GetVRegReference(input_args.Next()));
-
- // Note that UnboxPrimitiveForResult already performs all of the type
- // conversions that we want, based on |to|.
- JValue unboxed_value;
- if (!UnboxPrimitiveForResult(ref, to.Get(), &unboxed_value)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
-
- if (Primitive::Is64BitType(to_type)) {
- callee_frame->SetVRegLong(first_dest_reg + to_arg_index, unboxed_value.GetJ());
- to_arg_index += 2;
- } else {
- callee_frame->SetVReg(first_dest_reg + to_arg_index, unboxed_value.GetI());
- ++to_arg_index;
- }
}
}
}
@@ -255,6 +244,25 @@ bool PerformArgumentConversions(Thread* self,
return true;
}
+// Similar to |ConvertAndCopyArgumentsFromCallerFrame|, except that the
+// arguments are copied from an |EmulatedStackFrame|.
+template <bool is_range>
+bool ConvertAndCopyArgumentsFromEmulatedStackFrame(Thread* self,
+ ObjPtr<mirror::Object> emulated_stack_frame,
+ Handle<mirror::MethodType> callee_type,
+ const uint32_t first_dest_reg,
+ ShadowFrame* callee_frame) {
+ UNUSED(self);
+ UNUSED(emulated_stack_frame);
+ UNUSED(callee_type);
+ UNUSED(first_dest_reg);
+ UNUSED(callee_frame);
+
+ UNIMPLEMENTED(FATAL) << "ConvertAndCopyArgumentsFromEmulatedStackFrame is unimplemented";
+ return false;
+}
+
+
} // namespace art
#endif // ART_RUNTIME_METHOD_HANDLES_INL_H_
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index a36b66db9a..5175dceed3 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -42,12 +42,13 @@ enum MethodHandleKind {
kInvokeDirect,
kInvokeStatic,
kInvokeInterface,
+ kInvokeTransform,
kInstanceGet,
kInstancePut,
kStaticGet,
kStaticPut,
kLastValidKind = kStaticPut,
- kLastInvokeKind = kInvokeInterface
+ kLastInvokeKind = kInvokeTransform
};
// Whether the given method handle kind is some variant of an invoke.
@@ -55,21 +56,38 @@ inline bool IsInvoke(const MethodHandleKind handle_kind) {
return handle_kind <= kLastInvokeKind;
}
+// Performs a single argument conversion from type |from| to a distinct
+// type |to|. Returns true on success, false otherwise.
+REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertJValue(Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ const JValue& from_value,
+ JValue* to_value) ALWAYS_INLINE;
+
// Perform argument conversions between |callsite_type| (the type of the
// incoming arguments) and |callee_type| (the type of the method being
// invoked). These include widening and narrowing conversions as well as
// boxing and unboxing. Returns true on success, on false on failure. A
// pending exception will always be set on failure.
template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
-bool PerformArgumentConversions(Thread* self,
- Handle<mirror::MethodType> callsite_type,
- Handle<mirror::MethodType> callee_type,
- const ShadowFrame& caller_frame,
- uint32_t first_src_reg,
- uint32_t first_dest_reg,
- const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- ShadowFrame* callee_frame,
- JValue* result);
+bool ConvertAndCopyArgumentsFromCallerFrame(Thread* self,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> callee_type,
+ const ShadowFrame& caller_frame,
+ uint32_t first_src_reg,
+ uint32_t first_dest_reg,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ ShadowFrame* callee_frame);
+
+// Similar to |ConvertAndCopyArgumentsFromCallerFrame|, except that the
+// arguments are copied from an |EmulatedStackFrame|.
+template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertAndCopyArgumentsFromEmulatedStackFrame(Thread* self,
+ ObjPtr<mirror::Object> emulated_stack_frame,
+ Handle<mirror::MethodType> callee_type,
+ const uint32_t first_dest_reg,
+ ShadowFrame* callee_frame);
+
} // namespace art
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 2e70c9b43f..3bf9d94410 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -275,7 +275,7 @@ inline void Object::AssertReadBarrierPointer() const {
}
template<VerifyObjectFlags kVerifyFlags>
-inline bool Object::VerifierInstanceOf(Class* klass) {
+inline bool Object::VerifierInstanceOf(ObjPtr<Class> klass) {
DCHECK(klass != nullptr);
DCHECK(GetClass<kVerifyFlags>() != nullptr);
return klass->IsInterface() || InstanceOf(klass);
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f1ab72a989..886637be5c 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -119,7 +119,7 @@ class MANAGED LOCKABLE Object {
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool VerifierInstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool VerifierInstanceOf(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool InstanceOf(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 18529561cf..8d85425c10 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -482,6 +482,31 @@ static jobjectArray VMDebug_getRuntimeStatsInternal(JNIEnv* env, jclass) {
return result;
}
+static void VMDebug_attachAgent(JNIEnv* env, jclass, jstring agent) {
+ if (agent == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowNullPointerException("agent is null");
+ return;
+ }
+
+ if (!Dbg::IsJdwpAllowed()) {
+ ScopedObjectAccess soa(env);
+ ThrowSecurityException("Can't attach agent, process is not debuggable.");
+ return;
+ }
+
+ std::string filename;
+ {
+ ScopedUtfChars chars(env, agent);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ filename = chars.c_str();
+ }
+
+ Runtime::Current()->AttachAgent(filename);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"),
@@ -514,7 +539,8 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, stopMethodTracing, "()V"),
NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "!()J"),
NATIVE_METHOD(VMDebug, getRuntimeStatInternal, "(I)Ljava/lang/String;"),
- NATIVE_METHOD(VMDebug, getRuntimeStatsInternal, "()[Ljava/lang/String;")
+ NATIVE_METHOD(VMDebug, getRuntimeStatsInternal, "()[Ljava/lang/String;"),
+ NATIVE_METHOD(VMDebug, attachAgent, "(Ljava/lang/String;)V"),
};
void register_dalvik_system_VMDebug(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 54214109ea..866dc7f73f 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -317,7 +317,7 @@ static void PreloadDexCachesResolveType(Thread* self,
if (class_name[1] == '\0') {
klass = linker->FindPrimitiveClass(class_name[0]);
} else {
- klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), nullptr);
+ klass = linker->LookupClass(self, class_name, nullptr);
}
if (klass == nullptr) {
return;
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index ff082845e1..e5bab36870 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -20,12 +20,41 @@
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
+#include "obj_ptr.h"
#include "scoped_fast_native_object_access-inl.h"
#include "ScopedUtfChars.h"
#include "zip_archive.h"
namespace art {
+// A class so we can be friends with ClassLinker and access internal methods.
+class VMClassLoader {
+ public:
+ static mirror::Class* LookupClass(ClassLinker* cl,
+ Thread* self,
+ const char* descriptor,
+ size_t hash,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return cl->LookupClass(self, descriptor, hash, class_loader);
+ }
+
+ static ObjPtr<mirror::Class> FindClassInPathClassLoader(ClassLinker* cl,
+ ScopedObjectAccessAlreadyRunnable& soa,
+ Thread* self,
+ const char* descriptor,
+ size_t hash,
+ Handle<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> result;
+ if (cl->FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &result)) {
+ return result;
+ }
+ return nullptr;
+ }
+};
+
static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader,
jstring javaName) {
ScopedFastNativeObjectAccess soa(env);
@@ -35,12 +64,16 @@ static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoa
return nullptr;
}
ClassLinker* cl = Runtime::Current()->GetClassLinker();
+
+ // Compute hash once.
std::string descriptor(DotToDescriptor(name.c_str()));
const size_t descriptor_hash = ComputeModifiedUtf8Hash(descriptor.c_str());
- ObjPtr<mirror::Class> c = cl->LookupClass(soa.Self(),
- descriptor.c_str(),
- descriptor_hash,
- loader.Ptr());
+
+ ObjPtr<mirror::Class> c = VMClassLoader::LookupClass(cl,
+ soa.Self(),
+ descriptor.c_str(),
+ descriptor_hash,
+ loader);
if (c != nullptr && c->IsResolved()) {
return soa.AddLocalReference<jclass>(c);
}
@@ -61,17 +94,26 @@ static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoa
}
return nullptr;
}
+
+ // Hard-coded performance optimization: We know that all failed libcore calls to findLoadedClass
+ // are followed by a call to the the classloader to actually
+ // load the class.
if (loader != nullptr) {
// Try the common case.
StackHandleScope<1> hs(soa.Self());
- cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
- hs.NewHandle(loader), &c);
+ c = VMClassLoader::FindClassInPathClassLoader(cl,
+ soa,
+ soa.Self(),
+ descriptor.c_str(),
+ descriptor_hash,
+ hs.NewHandle(loader));
if (c != nullptr) {
return soa.AddLocalReference<jclass>(c);
}
}
- // Class wasn't resolved so it may be erroneous or not yet ready, force the caller to go into
- // the regular loadClass code.
+
+ // The class wasn't loaded, yet, and our fast-path did not apply (e.g., we didn't understand the
+ // classloader chain).
return nullptr;
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b99bcb5f7d..63a0e14bf3 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -301,6 +301,10 @@ class OatFile {
// error and sets found to false.
static OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found);
+ VdexFile* GetVdexFile() const {
+ return vdex_.get();
+ }
+
protected:
OatFile(const std::string& filename, bool executable);
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 59e01eafa0..12692a168d 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -34,6 +34,8 @@
#include "art_jvmti.h"
#include "base/logging.h"
#include "gc/allocation_listener.h"
+#include "gc/gc_pause_listener.h"
+#include "gc/heap.h"
#include "instrumentation.h"
#include "jni_env_ext-inl.h"
#include "mirror/class.h"
@@ -131,7 +133,7 @@ class JvmtiAllocationListener : public art::gc::AllocationListener {
explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_EQ(self, art::Thread::Current());
if (handler_->IsEventEnabledAnywhere(JVMTI_EVENT_VM_OBJECT_ALLOC)) {
@@ -185,11 +187,74 @@ static void SetupObjectAllocationTracking(art::gc::AllocationListener* listener,
}
}
+// Report GC pauses (see spec) as GARBAGE_COLLECTION_START and GARBAGE_COLLECTION_END.
+class JvmtiGcPauseListener : public art::gc::GcPauseListener {
+ public:
+ explicit JvmtiGcPauseListener(EventHandler* handler)
+ : handler_(handler),
+ start_enabled_(false),
+ finish_enabled_(false) {}
+
+ void StartPause() OVERRIDE {
+ handler_->DispatchEvent(nullptr, JVMTI_EVENT_GARBAGE_COLLECTION_START);
+ }
+
+ void EndPause() OVERRIDE {
+ handler_->DispatchEvent(nullptr, JVMTI_EVENT_GARBAGE_COLLECTION_FINISH);
+ }
+
+ bool IsEnabled() {
+ return start_enabled_ || finish_enabled_;
+ }
+
+ void SetStartEnabled(bool e) {
+ start_enabled_ = e;
+ }
+
+ void SetFinishEnabled(bool e) {
+ finish_enabled_ = e;
+ }
+
+ private:
+ EventHandler* handler_;
+ bool start_enabled_;
+ bool finish_enabled_;
+};
+
+static void SetupGcPauseTracking(JvmtiGcPauseListener* listener, jvmtiEvent event, bool enable) {
+ bool old_state = listener->IsEnabled();
+
+ if (event == JVMTI_EVENT_GARBAGE_COLLECTION_START) {
+ listener->SetStartEnabled(enable);
+ } else {
+ listener->SetFinishEnabled(enable);
+ }
+
+ bool new_state = listener->IsEnabled();
+
+ if (old_state != new_state) {
+ if (new_state) {
+ art::Runtime::Current()->GetHeap()->SetGcPauseListener(listener);
+ } else {
+ art::Runtime::Current()->GetHeap()->RemoveGcPauseListener();
+ }
+ }
+}
+
// Handle special work for the given event type, if necessary.
void EventHandler::HandleEventType(jvmtiEvent event, bool enable) {
- if (event == JVMTI_EVENT_VM_OBJECT_ALLOC) {
- SetupObjectAllocationTracking(alloc_listener_.get(), enable);
- return;
+ switch (event) {
+ case JVMTI_EVENT_VM_OBJECT_ALLOC:
+ SetupObjectAllocationTracking(alloc_listener_.get(), enable);
+ return;
+
+ case JVMTI_EVENT_GARBAGE_COLLECTION_START:
+ case JVMTI_EVENT_GARBAGE_COLLECTION_FINISH:
+ SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
+ return;
+
+ default:
+ break;
}
}
@@ -253,6 +318,7 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
EventHandler::EventHandler() {
alloc_listener_.reset(new JvmtiAllocationListener(this));
+ gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
}
EventHandler::~EventHandler() {
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
index 3212b12a54..07d6bfd4c2 100644
--- a/runtime/openjdkjvmti/events.h
+++ b/runtime/openjdkjvmti/events.h
@@ -28,6 +28,7 @@ namespace openjdkjvmti {
struct ArtJvmTiEnv;
class JvmtiAllocationListener;
+class JvmtiGcPauseListener;
struct EventMask {
static constexpr size_t kEventsSize = JVMTI_MAX_EVENT_TYPE_VAL - JVMTI_MIN_EVENT_TYPE_VAL + 1;
@@ -103,6 +104,7 @@ class EventHandler {
EventMask global_mask;
std::unique_ptr<JvmtiAllocationListener> alloc_listener_;
+ std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/heap.cc b/runtime/openjdkjvmti/heap.cc
index decbfc934b..1799e1957d 100644
--- a/runtime/openjdkjvmti/heap.cc
+++ b/runtime/openjdkjvmti/heap.cc
@@ -21,7 +21,6 @@
#include "base/mutex.h"
#include "class_linker.h"
#include "gc/heap.h"
-#include "java_vm_ext.h"
#include "jni_env_ext.h"
#include "mirror/class.h"
#include "object_callbacks.h"
@@ -179,8 +178,7 @@ jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
bool operator()(art::ObjPtr<art::mirror::Class> klass)
OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- art::JNIEnvExt* jni_env = self_->GetJniEnv();
- classes_.push_back(reinterpret_cast<jclass>(jni_env->vm->AddGlobalRef(self_, klass)));
+ classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
return true;
}
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index 9ea14a20a4..f16b023d1a 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -46,7 +46,9 @@
namespace openjdkjvmti {
-void ObjectTagTable::UpdateTable() {
+void ObjectTagTable::UpdateTableWithReadBarrier() {
+ update_since_last_sweep_ = true;
+
auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -57,7 +59,11 @@ void ObjectTagTable::UpdateTable() {
}
bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
- UpdateTable();
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+ UpdateTableWithReadBarrier();
return GetTagLocked(self, obj, result);
}
@@ -84,9 +90,14 @@ bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, j
return true;
}
- if (art::kUseReadBarrier && self->GetIsGcMarking()) {
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
// Update the table.
- UpdateTable();
+ UpdateTableWithReadBarrier();
// And try again.
return RemoveLocked(self, obj, tag);
@@ -111,9 +122,14 @@ bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlon
return true;
}
- if (art::kUseReadBarrier && self->GetIsGcMarking()) {
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
// Update the table.
- UpdateTable();
+ UpdateTableWithReadBarrier();
// And try again.
return SetLocked(self, obj, new_tag);
@@ -131,6 +147,14 @@ void ObjectTagTable::Sweep(art::IsMarkedVisitor* visitor) {
} else {
SweepImpl<false>(visitor);
}
+
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. We explicitly update the table then
+ // to ensure we compare against to-space pointers. But we want to do this only once. Once
+ // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
+ // so we re-enable the explicit update for the next marking.
+ update_since_last_sweep_ = false;
}
template <bool kHandleNull>
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 90c40f6010..579dc22985 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -34,6 +34,7 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
public:
explicit ObjectTagTable(EventHandler* event_handler)
: art::gc::SystemWeakHolder(art::LockLevel::kAllocTrackerLock),
+ update_since_last_sweep_(false),
event_handler_(event_handler) {
}
@@ -83,7 +84,8 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
if (art::kUseReadBarrier &&
self != nullptr &&
- self->GetIsGcMarking()) {
+ self->GetIsGcMarking() &&
+ !update_since_last_sweep_) {
return GetTagSlowPath(self, obj, result);
}
@@ -96,7 +98,9 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
- void UpdateTable()
+ // Update the table by doing read barriers on each element, ensuring that to-space pointers
+ // are stored.
+ void UpdateTableWithReadBarrier()
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
@@ -138,6 +142,8 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
EqGcRoot> tagged_objects_
GUARDED_BY(allow_disallow_lock_)
GUARDED_BY(art::Locks::mutator_lock_);
+ // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
+ bool update_since_last_sweep_;
EventHandler* event_handler_;
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index f937ca7603..56eab5e181 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -601,7 +601,7 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options,
<< "runtime plugins.";
} else if (!args.GetOrDefault(M::Plugins).empty()) {
LOG(WARNING) << "Experimental runtime plugin support has not been enabled. Ignored options: ";
- for (auto& op : args.GetOrDefault(M::Plugins)) {
+ for (const auto& op : args.GetOrDefault(M::Plugins)) {
LOG(WARNING) << " -plugin:" << op.GetLibrary();
}
}
@@ -614,14 +614,14 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options,
} else if (!args.GetOrDefault(M::AgentLib).empty() || !args.GetOrDefault(M::AgentPath).empty()) {
LOG(WARNING) << "agent support has not been enabled. Enable experimental agent "
<< " support with '-XExperimental:agent'. Ignored options are:";
- for (auto op : args.GetOrDefault(M::AgentLib)) {
+ for (const auto& op : args.GetOrDefault(M::AgentLib)) {
if (op.HasArgs()) {
LOG(WARNING) << " -agentlib:" << op.GetName() << "=" << op.GetArgs();
} else {
LOG(WARNING) << " -agentlib:" << op.GetName();
}
}
- for (auto op : args.GetOrDefault(M::AgentPath)) {
+ for (const auto& op : args.GetOrDefault(M::AgentPath)) {
if (op.HasArgs()) {
LOG(WARNING) << " -agentpath:" << op.GetName() << "=" << op.GetArgs();
} else {
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 661012c5d4..f88309baa1 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -911,7 +911,7 @@ void InvalidReceiverError(ObjPtr<mirror::Object> o, ObjPtr<mirror::Class> c) {
// Will need to be fixed if there's cases where it's not.
void UpdateReference(Thread* self, jobject obj, ObjPtr<mirror::Object> result) {
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
if (kind == kLocal) {
self->GetJniEnv()->locals.Update(obj, result);
} else if (kind == kHandleScopeOrInvalid) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e8f41d4a55..4e600ae46d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1311,6 +1311,28 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
return true;
}
+// Attach a new agent and add it to the list of runtime agents
+//
+// TODO: once we decide on the threading model for agents,
+// revisit this and make sure we're doing this on the right thread
+// (and we synchronize access to any shared data structures like "agents_")
+//
+void Runtime::AttachAgent(const std::string& agent_arg) {
+ ti::Agent agent(agent_arg);
+
+ int res = 0;
+ std::string err;
+ ti::Agent::LoadError result = agent.Attach(&res, &err);
+
+ if (result == ti::Agent::kNoError) {
+ agents_.push_back(std::move(agent));
+ } else {
+ LOG(ERROR) << "Agent attach failed (result=" << result << ") : " << err;
+ ScopedObjectAccess soa(Thread::Current());
+ ThrowWrappedIOException("%s", err.c_str());
+ }
+}
+
void Runtime::InitNativeMethods() {
VLOG(startup) << "Runtime::InitNativeMethods entering";
Thread* self = Thread::Current();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 7cb87abe30..b25ec2351b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -665,6 +665,8 @@ class Runtime {
NO_RETURN
static void Aborter(const char* abort_message);
+ void AttachAgent(const std::string& agent_arg);
+
private:
static void InitPlatformSignalHandlers();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e47ccc062b..ace5e679b3 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1860,7 +1860,7 @@ ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
return nullptr;
}
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
ObjPtr<mirror::Object> result;
bool expect_null = false;
// The "kinds" below are sorted by the frequency we expect to encounter them.
@@ -1902,7 +1902,7 @@ ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
bool Thread::IsJWeakCleared(jweak obj) const {
CHECK(obj != nullptr);
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
CHECK_EQ(kind, kWeakGlobal);
return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
}
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 7c0ea64228..d21ff77849 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -25,17 +25,10 @@ const char* AGENT_ON_LOAD_FUNCTION_NAME = "Agent_OnLoad";
const char* AGENT_ON_ATTACH_FUNCTION_NAME = "Agent_OnAttach";
const char* AGENT_ON_UNLOAD_FUNCTION_NAME = "Agent_OnUnload";
-Agent Agent::Create(std::string arg) {
- size_t eq = arg.find_first_of('=');
- if (eq == std::string::npos) {
- return Agent(arg, "");
- } else {
- return Agent(arg.substr(0, eq), arg.substr(eq + 1, arg.length()));
- }
-}
-
// TODO We need to acquire some locks probably.
-Agent::LoadError Agent::Load(/*out*/jint* call_res, /*out*/ std::string* error_msg) {
+Agent::LoadError Agent::DoLoadHelper(bool attaching,
+ /*out*/jint* call_res,
+ /*out*/std::string* error_msg) {
DCHECK(call_res != nullptr);
DCHECK(error_msg != nullptr);
@@ -49,8 +42,10 @@ Agent::LoadError Agent::Load(/*out*/jint* call_res, /*out*/ std::string* error_m
VLOG(agents) << "err: " << *error_msg;
return err;
}
- if (onload_ == nullptr) {
- *error_msg = StringPrintf("Unable to start agent %s: No Agent_OnLoad function found",
+ AgentOnLoadFunction callback = attaching ? onattach_ : onload_;
+ if (callback == nullptr) {
+ *error_msg = StringPrintf("Unable to start agent %s: No %s callback found",
+ (attaching ? "attach" : "load"),
name_.c_str());
VLOG(agents) << "err: " << *error_msg;
return kLoadingError;
@@ -59,9 +54,9 @@ Agent::LoadError Agent::Load(/*out*/jint* call_res, /*out*/ std::string* error_m
std::unique_ptr<char[]> copied_args(new char[args_.size() + 1]);
strcpy(copied_args.get(), args_.c_str());
// TODO Need to do some checks that we are at a good spot etc.
- *call_res = onload_(static_cast<JavaVM*>(Runtime::Current()->GetJavaVM()),
- copied_args.get(),
- nullptr);
+ *call_res = callback(Runtime::Current()->GetJavaVM(),
+ copied_args.get(),
+ nullptr);
if (*call_res != 0) {
*error_msg = StringPrintf("Initialization of %s returned non-zero value of %d",
name_.c_str(), *call_res);
@@ -74,6 +69,12 @@ Agent::LoadError Agent::Load(/*out*/jint* call_res, /*out*/ std::string* error_m
Agent::LoadError Agent::DoDlOpen(/*out*/std::string* error_msg) {
DCHECK(error_msg != nullptr);
+
+ DCHECK(dlopen_handle_ == nullptr);
+ DCHECK(onload_ == nullptr);
+ DCHECK(onattach_ == nullptr);
+ DCHECK(onunload_ == nullptr);
+
dlopen_handle_ = dlopen(name_.c_str(), RTLD_LAZY);
if (dlopen_handle_ == nullptr) {
*error_msg = StringPrintf("Unable to dlopen %s: %s", name_.c_str(), dlerror());
@@ -85,7 +86,7 @@ Agent::LoadError Agent::DoDlOpen(/*out*/std::string* error_msg) {
if (onload_ == nullptr) {
VLOG(agents) << "Unable to find 'Agent_OnLoad' symbol in " << this;
}
- onattach_ = reinterpret_cast<AgentOnAttachFunction>(dlsym(dlopen_handle_,
+ onattach_ = reinterpret_cast<AgentOnLoadFunction>(dlsym(dlopen_handle_,
AGENT_ON_ATTACH_FUNCTION_NAME));
if (onattach_ == nullptr) {
VLOG(agents) << "Unable to find 'Agent_OnAttach' symbol in " << this;
@@ -106,21 +107,91 @@ void Agent::Unload() {
}
dlclose(dlopen_handle_);
dlopen_handle_ = nullptr;
+ onload_ = nullptr;
+ onattach_ = nullptr;
+ onunload_ = nullptr;
} else {
VLOG(agents) << this << " is not currently loaded!";
}
}
+Agent::Agent(std::string arg)
+ : dlopen_handle_(nullptr),
+ onload_(nullptr),
+ onattach_(nullptr),
+ onunload_(nullptr) {
+ size_t eq = arg.find_first_of('=');
+ if (eq == std::string::npos) {
+ name_ = arg;
+ } else {
+ name_ = arg.substr(0, eq);
+ args_ = arg.substr(eq + 1, arg.length());
+ }
+}
+
Agent::Agent(const Agent& other)
- : name_(other.name_),
- args_(other.args_),
- dlopen_handle_(other.dlopen_handle_),
- onload_(other.onload_),
- onattach_(other.onattach_),
- onunload_(other.onunload_) {
- if (other.dlopen_handle_ != nullptr) {
- dlopen(other.name_.c_str(), 0);
+ : dlopen_handle_(nullptr),
+ onload_(nullptr),
+ onattach_(nullptr),
+ onunload_(nullptr) {
+ *this = other;
+}
+
+// Attempting to copy to/from loaded/started agents is a fatal error
+Agent& Agent::operator=(const Agent& other) {
+ if (this != &other) {
+ if (other.dlopen_handle_ != nullptr) {
+ LOG(FATAL) << "Attempting to copy a loaded agent!";
+ }
+
+ if (dlopen_handle_ != nullptr) {
+ LOG(FATAL) << "Attempting to assign into a loaded agent!";
+ }
+
+ DCHECK(other.onload_ == nullptr);
+ DCHECK(other.onattach_ == nullptr);
+ DCHECK(other.onunload_ == nullptr);
+
+ DCHECK(onload_ == nullptr);
+ DCHECK(onattach_ == nullptr);
+ DCHECK(onunload_ == nullptr);
+
+ name_ = other.name_;
+ args_ = other.args_;
+
+ dlopen_handle_ = nullptr;
+ onload_ = nullptr;
+ onattach_ = nullptr;
+ onunload_ = nullptr;
+ }
+ return *this;
+}
+
+Agent::Agent(Agent&& other)
+ : dlopen_handle_(nullptr),
+ onload_(nullptr),
+ onattach_(nullptr),
+ onunload_(nullptr) {
+ *this = std::move(other);
+}
+
+Agent& Agent::operator=(Agent&& other) {
+ if (this != &other) {
+ if (dlopen_handle_ != nullptr) {
+ dlclose(dlopen_handle_);
+ }
+ name_ = std::move(other.name_);
+ args_ = std::move(other.args_);
+ dlopen_handle_ = other.dlopen_handle_;
+ onload_ = other.onload_;
+ onattach_ = other.onattach_;
+ onunload_ = other.onunload_;
+ other.dlopen_handle_ = nullptr;
+ other.onload_ = nullptr;
+ other.onattach_ = nullptr;
+ other.onunload_ = nullptr;
}
+ return *this;
}
Agent::~Agent() {
diff --git a/runtime/ti/agent.h b/runtime/ti/agent.h
index 521e21e4e4..6561756edd 100644
--- a/runtime/ti/agent.h
+++ b/runtime/ti/agent.h
@@ -28,9 +28,10 @@ namespace art {
namespace ti {
using AgentOnLoadFunction = jint (*)(JavaVM*, const char*, void*);
-using AgentOnAttachFunction = jint (*)(JavaVM*, const char*, void*);
using AgentOnUnloadFunction = void (*)(JavaVM*);
+// TODO: consider splitting ti::Agent into command line, agent and shared library handler classes
+
class Agent {
public:
enum LoadError {
@@ -56,65 +57,44 @@ class Agent {
return !GetArgs().empty();
}
- // TODO We need to acquire some locks probably.
- LoadError Load(/*out*/jint* call_res, /*out*/std::string* error_msg);
+ LoadError Load(/*out*/jint* call_res, /*out*/std::string* error_msg) {
+ VLOG(agents) << "Loading agent: " << name_ << " " << args_;
+ return DoLoadHelper(false, call_res, error_msg);
+ }
// TODO We need to acquire some locks probably.
void Unload();
// Tries to attach the agent using its OnAttach method. Returns true on success.
- // TODO We need to acquire some locks probably.
- LoadError Attach(std::string* error_msg) {
- // TODO
- *error_msg = "Attach has not yet been implemented!";
- return kLoadingError;
- }
-
- static Agent Create(std::string arg);
-
- static Agent Create(std::string name, std::string args) {
- return Agent(name, args);
+ LoadError Attach(/*out*/jint* call_res, /*out*/std::string* error_msg) {
+ VLOG(agents) << "Attaching agent: " << name_ << " " << args_;
+ return DoLoadHelper(true, call_res, error_msg);
}
- ~Agent();
+ explicit Agent(std::string arg);
- // We need move constructor and copy for vectors
Agent(const Agent& other);
+ Agent& operator=(const Agent& other);
- Agent(Agent&& other)
- : name_(other.name_),
- args_(other.args_),
- dlopen_handle_(nullptr),
- onload_(nullptr),
- onattach_(nullptr),
- onunload_(nullptr) {
- other.dlopen_handle_ = nullptr;
- other.onload_ = nullptr;
- other.onattach_ = nullptr;
- other.onunload_ = nullptr;
- }
+ Agent(Agent&& other);
+ Agent& operator=(Agent&& other);
- // We don't need an operator=
- void operator=(const Agent&) = delete;
+ ~Agent();
private:
- Agent(std::string name, std::string args)
- : name_(name),
- args_(args),
- dlopen_handle_(nullptr),
- onload_(nullptr),
- onattach_(nullptr),
- onunload_(nullptr) { }
-
LoadError DoDlOpen(/*out*/std::string* error_msg);
- const std::string name_;
- const std::string args_;
+ LoadError DoLoadHelper(bool attaching,
+ /*out*/jint* call_res,
+ /*out*/std::string* error_msg);
+
+ std::string name_;
+ std::string args_;
void* dlopen_handle_;
// The entrypoints.
AgentOnLoadFunction onload_;
- AgentOnAttachFunction onattach_;
+ AgentOnLoadFunction onattach_;
AgentOnUnloadFunction onunload_;
friend std::ostream& operator<<(std::ostream &os, Agent const& m);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 5557d5f950..6ed54f748f 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1124,370 +1124,6 @@ std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
-static void DumpMethodCFGImpl(const DexFile* dex_file,
- uint32_t dex_method_idx,
- const DexFile::CodeItem* code_item,
- std::ostream& os) {
- os << "digraph {\n";
- os << " # /* " << dex_file->PrettyMethod(dex_method_idx, true) << " */\n";
-
- std::set<uint32_t> dex_pc_is_branch_target;
- {
- // Go and populate.
- const Instruction* inst = Instruction::At(code_item->insns_);
- for (uint32_t dex_pc = 0;
- dex_pc < code_item->insns_size_in_code_units_;
- dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
- if (inst->IsBranch()) {
- dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset());
- } else if (inst->IsSwitch()) {
- const uint16_t* insns = code_item->insns_ + dex_pc;
- int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
- const uint16_t* switch_insns = insns + switch_offset;
- uint32_t switch_count = switch_insns[1];
- int32_t targets_offset;
- if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
- /* 0=sig, 1=count, 2/3=firstKey */
- targets_offset = 4;
- } else {
- /* 0=sig, 1=count, 2..count*2 = keys */
- targets_offset = 2 + 2 * switch_count;
- }
- for (uint32_t targ = 0; targ < switch_count; targ++) {
- int32_t offset =
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
- dex_pc_is_branch_target.insert(dex_pc + offset);
- }
- }
- }
- }
-
- // Create nodes for "basic blocks."
- std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts.
- std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs.
-
- {
- const Instruction* inst = Instruction::At(code_item->insns_);
- bool first_in_block = true;
- bool force_new_block = false;
- for (uint32_t dex_pc = 0;
- dex_pc < code_item->insns_size_in_code_units_;
- dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
- if (dex_pc == 0 ||
- (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) ||
- force_new_block) {
- uint32_t id = dex_pc_to_node_id.size();
- if (id > 0) {
- // End last node.
- os << "}\"];\n";
- }
- // Start next node.
- os << " node" << id << " [shape=record,label=\"{";
- dex_pc_to_node_id.insert(std::make_pair(dex_pc, id));
- first_in_block = true;
- force_new_block = false;
- }
-
- // Register instruction.
- dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1));
-
- // Print instruction.
- if (!first_in_block) {
- os << " | ";
- } else {
- first_in_block = false;
- }
-
- // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'.
- os << "<" << "p" << dex_pc << ">";
- os << " 0x" << std::hex << dex_pc << std::dec << ": ";
- std::string inst_str = inst->DumpString(dex_file);
- size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars
- // we need to escape.
- while (cur_start != std::string::npos) {
- size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1);
- if (next_escape == std::string::npos) {
- os << inst_str.substr(cur_start, inst_str.size() - cur_start);
- break;
- } else {
- os << inst_str.substr(cur_start, next_escape - cur_start);
- // Escape all necessary characters.
- while (next_escape < inst_str.size()) {
- char c = inst_str.at(next_escape);
- if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
- os << '\\' << c;
- } else {
- break;
- }
- next_escape++;
- }
- if (next_escape >= inst_str.size()) {
- next_escape = std::string::npos;
- }
- cur_start = next_escape;
- }
- }
-
- // Force a new block for some fall-throughs and some instructions that terminate the "local"
- // control flow.
- force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd();
- }
- // Close last node.
- if (dex_pc_to_node_id.size() > 0) {
- os << "}\"];\n";
- }
- }
-
- // Create edges between them.
- {
- std::ostringstream regular_edges;
- std::ostringstream taken_edges;
- std::ostringstream exception_edges;
-
- // Common set of exception edges.
- std::set<uint32_t> exception_targets;
-
- // These blocks (given by the first dex pc) need exception per dex-pc handling in a second
- // pass. In the first pass we try and see whether we can use a common set of edges.
- std::set<uint32_t> blocks_with_detailed_exceptions;
-
- {
- uint32_t last_node_id = std::numeric_limits<uint32_t>::max();
- uint32_t old_dex_pc = 0;
- uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max();
- const Instruction* inst = Instruction::At(code_item->insns_);
- for (uint32_t dex_pc = 0;
- dex_pc < code_item->insns_size_in_code_units_;
- old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) {
- {
- auto it = dex_pc_to_node_id.find(dex_pc);
- if (it != dex_pc_to_node_id.end()) {
- if (!exception_targets.empty()) {
- // It seems the last block had common exception handlers. Add the exception edges now.
- uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
- for (uint32_t handler_pc : exception_targets) {
- auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
- if (node_id_it != dex_pc_to_incl_id.end()) {
- exception_edges << " node" << node_id
- << " -> node" << node_id_it->second << ":p" << handler_pc
- << ";\n";
- }
- }
- exception_targets.clear();
- }
-
- block_start_dex_pc = dex_pc;
-
- // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things
- // like switch data.
- uint32_t old_last = last_node_id;
- last_node_id = it->second;
- if (old_last != std::numeric_limits<uint32_t>::max()) {
- regular_edges << " node" << old_last << ":p" << old_dex_pc
- << " -> node" << last_node_id << ":p" << dex_pc
- << ";\n";
- }
- }
-
- // Look at the exceptions of the first entry.
- CatchHandlerIterator catch_it(*code_item, dex_pc);
- for (; catch_it.HasNext(); catch_it.Next()) {
- exception_targets.insert(catch_it.GetHandlerAddress());
- }
- }
-
- // Handle instruction.
-
- // Branch: something with at most two targets.
- if (inst->IsBranch()) {
- const int32_t offset = inst->GetTargetOffset();
- const bool conditional = !inst->IsUnconditional();
-
- auto target_it = dex_pc_to_node_id.find(dex_pc + offset);
- if (target_it != dex_pc_to_node_id.end()) {
- taken_edges << " node" << last_node_id << ":p" << dex_pc
- << " -> node" << target_it->second << ":p" << (dex_pc + offset)
- << ";\n";
- }
- if (!conditional) {
- // No fall-through.
- last_node_id = std::numeric_limits<uint32_t>::max();
- }
- } else if (inst->IsSwitch()) {
- // TODO: Iterate through all switch targets.
- const uint16_t* insns = code_item->insns_ + dex_pc;
- /* make sure the start of the switch is in range */
- int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
- /* offset to switch table is a relative branch-style offset */
- const uint16_t* switch_insns = insns + switch_offset;
- uint32_t switch_count = switch_insns[1];
- int32_t targets_offset;
- if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
- /* 0=sig, 1=count, 2/3=firstKey */
- targets_offset = 4;
- } else {
- /* 0=sig, 1=count, 2..count*2 = keys */
- targets_offset = 2 + 2 * switch_count;
- }
- /* make sure the end of the switch is in range */
- /* verify each switch target */
- for (uint32_t targ = 0; targ < switch_count; targ++) {
- int32_t offset =
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
- static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
- int32_t abs_offset = dex_pc + offset;
- auto target_it = dex_pc_to_node_id.find(abs_offset);
- if (target_it != dex_pc_to_node_id.end()) {
- // TODO: value label.
- taken_edges << " node" << last_node_id << ":p" << dex_pc
- << " -> node" << target_it->second << ":p" << (abs_offset)
- << ";\n";
- }
- }
- }
-
- // Exception edges. If this is not the first instruction in the block
- if (block_start_dex_pc != dex_pc) {
- std::set<uint32_t> current_handler_pcs;
- CatchHandlerIterator catch_it(*code_item, dex_pc);
- for (; catch_it.HasNext(); catch_it.Next()) {
- current_handler_pcs.insert(catch_it.GetHandlerAddress());
- }
- if (current_handler_pcs != exception_targets) {
- exception_targets.clear(); // Clear so we don't do something at the end.
- blocks_with_detailed_exceptions.insert(block_start_dex_pc);
- }
- }
-
- if (inst->IsReturn() ||
- (inst->Opcode() == Instruction::THROW) ||
- (inst->IsBranch() && inst->IsUnconditional())) {
- // No fall-through.
- last_node_id = std::numeric_limits<uint32_t>::max();
- }
- }
- // Finish up the last block, if it had common exceptions.
- if (!exception_targets.empty()) {
- // It seems the last block had common exception handlers. Add the exception edges now.
- uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second;
- for (uint32_t handler_pc : exception_targets) {
- auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
- if (node_id_it != dex_pc_to_incl_id.end()) {
- exception_edges << " node" << node_id
- << " -> node" << node_id_it->second << ":p" << handler_pc
- << ";\n";
- }
- }
- exception_targets.clear();
- }
- }
-
- // Second pass for detailed exception blocks.
- // TODO
- // Exception edges. If this is not the first instruction in the block
- for (uint32_t dex_pc : blocks_with_detailed_exceptions) {
- const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]);
- uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second;
- while (true) {
- CatchHandlerIterator catch_it(*code_item, dex_pc);
- if (catch_it.HasNext()) {
- std::set<uint32_t> handled_targets;
- for (; catch_it.HasNext(); catch_it.Next()) {
- uint32_t handler_pc = catch_it.GetHandlerAddress();
- auto it = handled_targets.find(handler_pc);
- if (it == handled_targets.end()) {
- auto node_id_it = dex_pc_to_incl_id.find(handler_pc);
- if (node_id_it != dex_pc_to_incl_id.end()) {
- exception_edges << " node" << this_node_id << ":p" << dex_pc
- << " -> node" << node_id_it->second << ":p" << handler_pc
- << ";\n";
- }
-
- // Mark as done.
- handled_targets.insert(handler_pc);
- }
- }
- }
- if (inst->IsBasicBlockEnd()) {
- break;
- }
-
- // Loop update. Have a break-out if the next instruction is a branch target and thus in
- // another block.
- dex_pc += inst->SizeInCodeUnits();
- if (dex_pc >= code_item->insns_size_in_code_units_) {
- break;
- }
- if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) {
- break;
- }
- inst = inst->Next();
- }
- }
-
- // Write out the sub-graphs to make edges styled.
- os << "\n";
- os << " subgraph regular_edges {\n";
- os << " edge [color=\"#000000\",weight=.3,len=3];\n\n";
- os << " " << regular_edges.str() << "\n";
- os << " }\n\n";
-
- os << " subgraph taken_edges {\n";
- os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n";
- os << " " << taken_edges.str() << "\n";
- os << " }\n\n";
-
- os << " subgraph exception_edges {\n";
- os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n";
- os << " " << exception_edges.str() << "\n";
- os << " }\n\n";
- }
-
- os << "}\n";
-}
-
-void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
- // This is painful, we need to find the code item. That means finding the class, and then
- // iterating the table.
- if (dex_method_idx >= dex_file->NumMethodIds()) {
- os << "Could not find method-idx.";
- return;
- }
- const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
-
- const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
- if (class_def == nullptr) {
- os << "Could not find class-def.";
- return;
- }
-
- const uint8_t* class_data = dex_file->GetClassData(*class_def);
- if (class_data == nullptr) {
- os << "No class data.";
- return;
- }
-
- ClassDataItemIterator it(*dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- it.Next();
- }
-
- // Find method, and dump it.
- while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == dex_method_idx) {
- DumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
- return;
- }
- it.Next();
- }
-
- // Otherwise complain.
- os << "Something went wrong, didn't find the method in the class data.";
-}
-
static void ParseStringAfterChar(const std::string& s,
char c,
std::string* parsed_value,
diff --git a/runtime/utils.h b/runtime/utils.h
index f96ddd7829..94738d29ce 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -36,12 +36,8 @@
#include "obj_ptr.h"
#include "primitive.h"
-class BacktraceMap;
-
namespace art {
-class DexFile;
-
template <typename T>
bool ParseUint(const char *in, T* out) {
char* end;
@@ -274,8 +270,6 @@ static inline constexpr bool ValidPointerSize(size_t pointer_size) {
return pointer_size == 4 || pointer_size == 8;
}
-void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
-
static inline const void* EntryPointToCodePointer(const void* entry_point) {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
// TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 5ca7684565..c7875b56ec 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -70,10 +70,7 @@ inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- // App image patching relies on having enough room for a forwarding pointer in the types array.
- // See FixupArtMethodArrayVisitor and ClassLinker::AddImageSpace.
- return std::max(ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements),
- static_cast<size_t>(pointer_size_));
+ return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements);
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
@@ -85,8 +82,7 @@ inline size_t DexCacheArraysLayout::MethodOffset(uint32_t method_idx) const {
}
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
- // App image patching relies on having enough room for a forwarding pointer in the methods array.
- return std::max(ArraySize(pointer_size_, num_elements), static_cast<size_t>(pointer_size_));
+ return ArraySize(pointer_size_, num_elements);
}
inline size_t DexCacheArraysLayout::MethodsAlignment() const {
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 9fbf87595b..b3dab581d5 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -34,9 +34,12 @@ bool VdexFile::Header::IsVersionValid() const {
return (memcmp(version_, kVdexVersion, sizeof(kVdexVersion)) == 0);
}
-VdexFile::Header::Header(uint32_t dex_size, uint32_t verifier_deps_size)
+VdexFile::Header::Header(uint32_t dex_size,
+ uint32_t verifier_deps_size,
+ uint32_t quickening_info_size)
: dex_size_(dex_size),
- verifier_deps_size_(verifier_deps_size) {
+ verifier_deps_size_(verifier_deps_size),
+ quickening_info_size_(quickening_info_size) {
memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
memcpy(version_, kVdexVersion, sizeof(kVdexVersion));
DCHECK(IsMagicValid());
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 6bea153d29..28f9bb3481 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -42,13 +42,14 @@ class VdexFile {
public:
struct Header {
public:
- Header(uint32_t dex_size, uint32_t verifier_deps_size);
+ Header(uint32_t dex_size, uint32_t verifier_deps_size, uint32_t quickening_info_size);
bool IsMagicValid() const;
bool IsVersionValid() const;
uint32_t GetDexSize() const { return dex_size_; }
uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
+ uint32_t GetQuickeningInfoSize() const { return quickening_info_size_; }
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
@@ -58,6 +59,7 @@ class VdexFile {
uint8_t version_[4];
uint32_t dex_size_;
uint32_t verifier_deps_size_;
+ uint32_t quickening_info_size_;
};
static VdexFile* Open(const std::string& vdex_filename,
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index d0493e5f73..93286ea84e 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -154,8 +154,7 @@ mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassL
if (can_load_classes_) {
klass = class_linker->FindClass(self, descriptor, class_loader);
} else {
- klass = class_linker->LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor),
- loader);
+ klass = class_linker->LookupClass(self, descriptor, loader);
if (klass != nullptr && !klass->IsResolved()) {
// We found the class but without it being loaded its not safe for use.
klass = nullptr;
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 3c7fb7a26a..4d1e337bb1 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -280,6 +280,22 @@ static inline VerifierDeps* GetVerifierDepsSingleton() {
return callbacks->GetVerifierDeps();
}
+void VerifierDeps::MaybeRecordVerificationStatus(const DexFile& dex_file,
+ uint16_t type_idx,
+ MethodVerifier::FailureKind failure_kind) {
+ if (failure_kind == MethodVerifier::kNoFailure) {
+ // We only record classes that did not fully verify at compile time.
+ return;
+ }
+
+ VerifierDeps* singleton = GetVerifierDepsSingleton();
+ if (singleton != nullptr) {
+ DexFileDeps* dex_deps = singleton->GetDexFileDeps(dex_file);
+ MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+ dex_deps->unverified_classes_.push_back(type_idx);
+ }
+}
+
void VerifierDeps::MaybeRecordClassResolution(const DexFile& dex_file,
uint16_t type_idx,
mirror::Class* klass) {
@@ -360,6 +376,14 @@ static inline void EncodeSet(std::vector<uint8_t>* out, const std::set<T>& set)
}
}
+static inline void EncodeUint16Vector(std::vector<uint8_t>* out,
+ const std::vector<uint16_t>& vector) {
+ EncodeUnsignedLeb128(out, vector.size());
+ for (uint16_t entry : vector) {
+ EncodeUnsignedLeb128(out, entry);
+ }
+}
+
template<typename T>
static inline void DecodeSet(const uint8_t** in, const uint8_t* end, std::set<T>* set) {
DCHECK(set->empty());
@@ -371,6 +395,17 @@ static inline void DecodeSet(const uint8_t** in, const uint8_t* end, std::set<T>
}
}
+static inline void DecodeUint16Vector(const uint8_t** in,
+ const uint8_t* end,
+ std::vector<uint16_t>* vector) {
+ DCHECK(vector->empty());
+ size_t num_entries = DecodeUint32WithOverflowCheck(in, end);
+ vector->reserve(num_entries);
+ for (size_t i = 0; i < num_entries; ++i) {
+ vector->push_back(dchecked_integral_cast<uint16_t>(DecodeUint32WithOverflowCheck(in, end)));
+ }
+}
+
static inline void EncodeStringVector(std::vector<uint8_t>* out,
const std::vector<std::string>& strings) {
EncodeUnsignedLeb128(out, strings.size());
@@ -407,6 +442,7 @@ void VerifierDeps::Encode(std::vector<uint8_t>* buffer) const {
EncodeSet(buffer, entry.second->direct_methods_);
EncodeSet(buffer, entry.second->virtual_methods_);
EncodeSet(buffer, entry.second->interface_methods_);
+ EncodeUint16Vector(buffer, entry.second->unverified_classes_);
}
}
@@ -423,6 +459,7 @@ VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRe
DecodeSet(&data_start, data_end, &entry.second->direct_methods_);
DecodeSet(&data_start, data_end, &entry.second->virtual_methods_);
DecodeSet(&data_start, data_end, &entry.second->interface_methods_);
+ DecodeUint16Vector(&data_start, data_end, &entry.second->unverified_classes_);
}
CHECK_LE(data_start, data_end);
}
@@ -463,7 +500,8 @@ bool VerifierDeps::DexFileDeps::Equals(const VerifierDeps::DexFileDeps& rhs) con
(fields_ == rhs.fields_) &&
(direct_methods_ == rhs.direct_methods_) &&
(virtual_methods_ == rhs.virtual_methods_) &&
- (interface_methods_ == rhs.interface_methods_);
+ (interface_methods_ == rhs.interface_methods_) &&
+ (unverified_classes_ == rhs.unverified_classes_);
}
} // namespace verifier
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 3223f6f7b9..9d2622de3a 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -26,6 +26,7 @@
#include "base/array_ref.h"
#include "base/mutex.h"
#include "method_resolution_kind.h"
+#include "method_verifier.h" // For MethodVerifier::FailureKind.
#include "obj_ptr.h"
#include "os.h"
@@ -49,6 +50,12 @@ class VerifierDeps {
explicit VerifierDeps(const std::vector<const DexFile*>& dex_files)
REQUIRES(!Locks::verifier_deps_lock_);
+ // Record the verification status of the class at `type_idx`.
+ static void MaybeRecordVerificationStatus(const DexFile& dex_file,
+ uint16_t type_idx,
+ MethodVerifier::FailureKind failure_kind)
+ REQUIRES(!Locks::verifier_deps_lock_);
+
// Record the outcome `klass` of resolving type `type_idx` from `dex_file`.
// If `klass` is null, the class is assumed unresolved.
static void MaybeRecordClassResolution(const DexFile& dex_file,
@@ -136,7 +143,7 @@ class VerifierDeps {
};
using TypeAssignabilityBase = std::tuple<uint32_t, uint32_t>;
- struct TypeAssignability : public std::tuple<uint32_t, uint32_t> {
+ struct TypeAssignability : public TypeAssignabilityBase {
TypeAssignability() = default;
TypeAssignability(const TypeAssignability&) = default;
TypeAssignability(uint32_t destination_idx, uint32_t source_idx)
@@ -165,6 +172,9 @@ class VerifierDeps {
std::set<MethodResolution> virtual_methods_;
std::set<MethodResolution> interface_methods_;
+ // List of classes that were not fully verified in that dex file.
+ std::vector<uint16_t> unverified_classes_;
+
bool Equals(const DexFileDeps& rhs) const;
};
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 78fc53ac36..153c7ef59e 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -37,6 +37,7 @@ jclass WellKnownClasses::dalvik_annotation_optimization_FastNative;
jclass WellKnownClasses::dalvik_system_DexFile;
jclass WellKnownClasses::dalvik_system_DexPathList;
jclass WellKnownClasses::dalvik_system_DexPathList__Element;
+jclass WellKnownClasses::dalvik_system_EmulatedStackFrame;
jclass WellKnownClasses::dalvik_system_PathClassLoader;
jclass WellKnownClasses::dalvik_system_VMRuntime;
jclass WellKnownClasses::java_lang_annotation_Annotation__array;
@@ -266,6 +267,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
dalvik_system_DexFile = CacheClass(env, "dalvik/system/DexFile");
dalvik_system_DexPathList = CacheClass(env, "dalvik/system/DexPathList");
dalvik_system_DexPathList__Element = CacheClass(env, "dalvik/system/DexPathList$Element");
+ dalvik_system_EmulatedStackFrame = CacheClass(env, "dalvik/system/EmulatedStackFrame");
dalvik_system_PathClassLoader = CacheClass(env, "dalvik/system/PathClassLoader");
dalvik_system_VMRuntime = CacheClass(env, "dalvik/system/VMRuntime");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 248ba7f431..2fb5bb471d 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -50,6 +50,7 @@ struct WellKnownClasses {
static jclass dalvik_system_DexFile;
static jclass dalvik_system_DexPathList;
static jclass dalvik_system_DexPathList__Element;
+ static jclass dalvik_system_EmulatedStackFrame;
static jclass dalvik_system_PathClassLoader;
static jclass dalvik_system_VMRuntime;
static jclass java_lang_annotation_Annotation__array;
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index aca356b5b9..56d737f4ee 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -370,7 +370,7 @@ extern "C" const struct android::NativeBridgeRuntimeValues* native_bridge_getApp
// v2 parts.
-extern "C" bool nb_is_compatible(uint32_t bridge_version ATTRIBUTE_UNUSED) {
+extern "C" bool native_bridge_isCompatibleWith(uint32_t bridge_version ATTRIBUTE_UNUSED) {
return true;
}
@@ -453,7 +453,7 @@ static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* co
return true;
}
-static ::android::NativeBridgeSignalHandlerFn native_bridge_get_signal_handler(int signal) {
+static ::android::NativeBridgeSignalHandlerFn native_bridge_getSignalHandler(int signal) {
// Test segv for already claimed signal, and sigill for not claimed signal
if ((signal == SIGSEGV) || (signal == SIGILL)) {
return &nb_signalhandler;
@@ -461,16 +461,63 @@ static ::android::NativeBridgeSignalHandlerFn native_bridge_get_signal_handler(i
return nullptr;
}
+extern "C" int native_bridge_unloadLibrary(void* handle ATTRIBUTE_UNUSED) {
+ printf("dlclose() in native bridge.\n");
+ return 0;
+}
+
+extern "C" char* native_bridge_getError() {
+ printf("dlerror() in native bridge.\n");
+ return nullptr;
+}
+
+extern "C" bool native_bridge_isPathSupported(const char* library_path ATTRIBUTE_UNUSED) {
+ printf("Checking for path support in native bridge.\n");
+ return false;
+}
+
+extern "C" bool native_bridge_initNamespace(const char* public_ns_sonames ATTRIBUTE_UNUSED,
+ const char* anon_ns_library_path ATTRIBUTE_UNUSED) {
+ printf("Initializing namespaces in native bridge.\n");
+ return false;
+}
+
+extern "C" android::native_bridge_namespace_t*
+native_bridge_createNamespace(const char* name ATTRIBUTE_UNUSED,
+ const char* ld_library_path ATTRIBUTE_UNUSED,
+ const char* default_library_path ATTRIBUTE_UNUSED,
+ uint64_t type ATTRIBUTE_UNUSED,
+ const char* permitted_when_isolated_path ATTRIBUTE_UNUSED,
+ android::native_bridge_namespace_t* parent_ns ATTRIBUTE_UNUSED) {
+ printf("Creating namespace in native bridge.\n");
+ return nullptr;
+}
+
+extern "C" void* native_bridge_loadLibraryExt(const char* libpath ATTRIBUTE_UNUSED,
+ int flag ATTRIBUTE_UNUSED,
+ android::native_bridge_namespace_t* ns ATTRIBUTE_UNUSED) {
+ printf("Loading library with Extension in native bridge.\n");
+ return nullptr;
+}
// "NativeBridgeItf" is effectively an API (it is the name of the symbol that will be loaded
// by the native bridge library).
android::NativeBridgeCallbacks NativeBridgeItf {
- .version = 2,
+ // v1
+ .version = 3,
.initialize = &native_bridge_initialize,
.loadLibrary = &native_bridge_loadLibrary,
.getTrampoline = &native_bridge_getTrampoline,
.isSupported = &native_bridge_isSupported,
.getAppEnv = &native_bridge_getAppEnv,
- .isCompatibleWith = &nb_is_compatible,
- .getSignalHandler = &native_bridge_get_signal_handler
+ // v2
+ .isCompatibleWith = &native_bridge_isCompatibleWith,
+ .getSignalHandler = &native_bridge_getSignalHandler,
+ // v3
+ .unloadLibrary = &native_bridge_unloadLibrary,
+ .getError = &native_bridge_getError,
+ .isPathSupported = &native_bridge_isPathSupported,
+ .initNamespace = &native_bridge_initNamespace,
+ .createNamespace = &native_bridge_createNamespace,
+ .loadLibraryExt = &native_bridge_loadLibraryExt
};
diff --git a/test/151-OpenFileLimit/expected.txt b/test/151-OpenFileLimit/expected.txt
index 971e472bff..6bc45ef24d 100644
--- a/test/151-OpenFileLimit/expected.txt
+++ b/test/151-OpenFileLimit/expected.txt
@@ -1,3 +1,3 @@
Message includes "Too many open files"
-Message includes "Too many open files"
+thread run.
done.
diff --git a/test/151-OpenFileLimit/info.txt b/test/151-OpenFileLimit/info.txt
index 56ed3963f4..9af393d78b 100644
--- a/test/151-OpenFileLimit/info.txt
+++ b/test/151-OpenFileLimit/info.txt
@@ -1,3 +1,2 @@
-This test verifies the exception message is informative for failure to launch
-a thread due to the number of available file descriptors in the process being
-exceeded.
+This test verifies that running out of file descriptors in the process doesn't
+prevent us from launching a new thread.
diff --git a/test/151-OpenFileLimit/run b/test/151-OpenFileLimit/run
new file mode 100755
index 0000000000..5c83fd0507
--- /dev/null
+++ b/test/151-OpenFileLimit/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+flags="$@"
+
+# Reduce the file descriptor limit so the test will reach the limit sooner.
+ulimit -n 512
+${RUN} ${flags}
diff --git a/test/151-OpenFileLimit/src/Main.java b/test/151-OpenFileLimit/src/Main.java
index 01a9a4ed34..9fe47c8b16 100644
--- a/test/151-OpenFileLimit/src/Main.java
+++ b/test/151-OpenFileLimit/src/Main.java
@@ -52,11 +52,7 @@ public class Main {
thread.start();
thread.join();
} catch (Throwable e) {
- if (e.getMessage().contains("Too many open files")) {
- System.out.println("Message includes \"Too many open files\"");
- } else {
- System.out.println(e.getMessage());
- }
+ System.out.println(e.getMessage());
}
for (int i = 0; i < files.size(); i++) {
diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java
index 23d6438f3b..47b64754b8 100644
--- a/test/530-checker-loops2/src/Main.java
+++ b/test/530-checker-loops2/src/Main.java
@@ -890,11 +890,19 @@ public class Main {
return result;
}
+ /// CHECK-START: int Main.shortIndex(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+ //
+ /// CHECK-START: int Main.shortIndex(int[]) BCE (after)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+ //
+ /// CHECK-START: int Main.shortIndex(int[]) BCE (after)
+ /// CHECK-NOT: Deoptimize
static int shortIndex(int[] a) {
int r = 0;
// Make sure short/int conversions compiles well (b/32193474).
- // TODO: investigate type implications and whether we can use
- // constant range to apply dyn BCE on all subscripts.
for (short i = 1; i < 10; i++) {
int ki = i - 1;
r += a[ki] + a[i];
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index ff6ccd49a9..3c053cf5ea 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -284,29 +284,28 @@ public class Main {
/// CHECK-START: java.lang.String Main.$noinline$getNonBootImageString() sharpening (before)
/// CHECK: LoadString load_kind:DexCacheViaMethod
- // FIXME: Disabled because of BSS root visiting issues. Bug: 32124939
- // CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
- // CHECK-DAG: X86ComputeBaseMethodAddress
- // CHECK-DAG: LoadString load_kind:BssEntry
+ /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
+ /// CHECK-DAG: X86ComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
- // CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
- // CHECK-DAG: MipsComputeBaseMethodAddress
- // CHECK-DAG: LoadString load_kind:BssEntry
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
+ /// CHECK-DAG: MipsComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
public static String $noinline$getNonBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/test/621-checker-new-instance/expected.txt b/test/621-checker-new-instance/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/621-checker-new-instance/expected.txt
diff --git a/test/621-checker-new-instance/info.txt b/test/621-checker-new-instance/info.txt
new file mode 100644
index 0000000000..c27c45ca7f
--- /dev/null
+++ b/test/621-checker-new-instance/info.txt
@@ -0,0 +1 @@
+Tests for removing useless load class.
diff --git a/test/621-checker-new-instance/src/Main.java b/test/621-checker-new-instance/src/Main.java
new file mode 100644
index 0000000000..68a46449f0
--- /dev/null
+++ b/test/621-checker-new-instance/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (before)
+ /// CHECK: LoadClass
+ /// CHECK: NewInstance
+
+ /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (after)
+ /// CHECK-NOT: LoadClass
+ /// CHECK: NewInstance
+ public static Object newObject() {
+ return new Object();
+ }
+
+ /// CHECK-START: java.lang.Object Main.newFinalizableMayThrow() prepare_for_register_allocation (after)
+ /// CHECK: LoadClass
+ /// CHECK: NewInstance
+ public static Object newFinalizableMayThrow() {
+ return $inline$newFinalizableMayThrow();
+ }
+
+ public static Object $inline$newFinalizableMayThrow() {
+ return new FinalizableMayThrow();
+ }
+
+ public static void main(String[] args) {
+ newFinalizableMayThrow();
+ newObject();
+ }
+}
+
+class FinalizableMayThrow {
+ // clinit may throw OOME.
+ static Object o = new Object();
+ static String s;
+ public void finalize() {
+ s = "Test";
+ }
+}
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index e752bcbbeb..0e09d1be09 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -70,12 +70,23 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses(
// 1) Free the local references.
// 2) Deallocate.
for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
- env->DeleteGlobalRef(classes[i]);
+ env->DeleteLocalRef(classes[i]);
}
jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes));
return ret;
}
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
} // namespace Test907GetLoadedClasses
} // namespace art
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
index 3e135a378d..3f5a059fe2 100755
--- a/test/907-get-loaded-classes/run
+++ b/test/907-get-loaded-classes/run
@@ -37,7 +37,7 @@ fi
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
- --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+ --runtime-option -agentpath:${agent}=907-get-loaded-classes,${arg} \
--android-runtime-option -Xplugin:${plugin} \
${other_args} \
--args ${lib}
diff --git a/test/908-gc-start-finish/build b/test/908-gc-start-finish/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/908-gc-start-finish/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/908-gc-start-finish/expected.txt b/test/908-gc-start-finish/expected.txt
new file mode 100644
index 0000000000..45f89dc767
--- /dev/null
+++ b/test/908-gc-start-finish/expected.txt
@@ -0,0 +1,12 @@
+---
+true true
+---
+true true
+---
+true true
+---
+false false
+---
+false false
+---
+false false
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
new file mode 100644
index 0000000000..d54651324d
--- /dev/null
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc_callbacks.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test908GcStartFinish {
+
+static size_t starts = 0;
+static size_t finishes = 0;
+
+static void JNICALL GarbageCollectionFinish(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
+ finishes++;
+}
+
+static void JNICALL GarbageCollectionStart(jvmtiEnv* ti_env ATTRIBUTE_UNUSED) {
+ starts++;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_setupGcCallback(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.GarbageCollectionFinish = GarbageCollectionFinish;
+ callbacks.GarbageCollectionStart = GarbageCollectionStart;
+
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error setting callbacks: %s\n", err);
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableGcTracking(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED,
+ jboolean enable) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_GARBAGE_COLLECTION_START,
+ nullptr);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error enabling/disabling gc callbacks: %s\n", err);
+ }
+ ret = jvmti_env->SetEventNotificationMode(
+ enable ? JVMTI_ENABLE : JVMTI_DISABLE,
+ JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
+ nullptr);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(ret, &err);
+ printf("Error enabling/disabling gc callbacks: %s\n", err);
+ }
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getGcStarts(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jint result = static_cast<jint>(starts);
+ starts = 0;
+ return result;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getGcFinishes(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jint result = static_cast<jint>(finishes);
+ finishes = 0;
+ return result;
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ return 0;
+}
+
+} // namespace Test908GcStartFinish
+} // namespace art
diff --git a/test/908-gc-start-finish/gc_callbacks.h b/test/908-gc-start-finish/gc_callbacks.h
new file mode 100644
index 0000000000..177a4eb7b2
--- /dev/null
+++ b/test/908-gc-start-finish/gc_callbacks.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
+#define ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test908GcStartFinish {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test908GcStartFinish
+} // namespace art
+
+#endif // ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
diff --git a/test/908-gc-start-finish/info.txt b/test/908-gc-start-finish/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/908-gc-start-finish/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/908-gc-start-finish/run b/test/908-gc-start-finish/run
new file mode 100755
index 0000000000..2fc35f0048
--- /dev/null
+++ b/test/908-gc-start-finish/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=908-gc-start-finish,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/908-gc-start-finish/src/Main.java b/test/908-gc-start-finish/src/Main.java
new file mode 100644
index 0000000000..2be0eea975
--- /dev/null
+++ b/test/908-gc-start-finish/src/Main.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Use a list to ensure objects must be allocated.
+ ArrayList<Object> l = new ArrayList<>(100);
+
+ setupGcCallback();
+
+ enableGcTracking(true);
+ run(l);
+
+ enableGcTracking(false);
+ run(l);
+ }
+
+ private static void run(ArrayList<Object> l) {
+ allocate(l, 1);
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+
+ // Note: the reporting will not depend on the heap layout (which could be unstable). Walking
+ // the tag table should give us a stable output order.
+ for (int i = 10; i <= 1000; i *= 10) {
+ allocate(l, i);
+ }
+ l.clear();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+
+ Runtime.getRuntime().gc();
+
+ printStats();
+ }
+
+ private static void allocate(ArrayList<Object> l, long tag) {
+ Object obj = new Object();
+ l.add(obj);
+ }
+
+ private static void printStats() {
+ System.out.println("---");
+ int s = getGcStarts();
+ int f = getGcFinishes();
+ System.out.println((s > 0) + " " + (f > 0));
+ }
+
+ private static native void setupGcCallback();
+ private static native void enableGcTracking(boolean enable);
+ private static native int getGcStarts();
+ private static native int getGcFinishes();
+}
diff --git a/test/909-attach-agent/attach.cc b/test/909-attach-agent/attach.cc
new file mode 100644
index 0000000000..2b50eb83b4
--- /dev/null
+++ b/test/909-attach-agent/attach.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "909-attach-agent/attach.h"
+
+#include <jni.h>
+#include <stdio.h>
+#include <string.h>
+#include "base/macros.h"
+#include "openjdkjvmti/jvmti.h"
+
+namespace art {
+namespace Test909AttachAgent {
+
+jint OnAttach(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ printf("Attached Agent for test 909-attach-agent\n");
+ fsync(1);
+ jvmtiEnv* env = nullptr;
+ jvmtiEnv* env2 = nullptr;
+
+#define CHECK_CALL_SUCCESS(c) \
+ do { \
+ if ((c) != JNI_OK) { \
+ printf("call " #c " did not succeed\n"); \
+ return -1; \
+ } \
+ } while (false)
+
+ CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&env), JVMTI_VERSION_1_0));
+ CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&env2), JVMTI_VERSION_1_0));
+ if (env == env2) {
+ printf("GetEnv returned same environment twice!\n");
+ return -1;
+ }
+ unsigned char* local_data = nullptr;
+ CHECK_CALL_SUCCESS(env->Allocate(8, &local_data));
+ strcpy(reinterpret_cast<char*>(local_data), "hello!!");
+ CHECK_CALL_SUCCESS(env->SetEnvironmentLocalStorage(local_data));
+ unsigned char* get_data = nullptr;
+ CHECK_CALL_SUCCESS(env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&get_data)));
+ if (get_data != local_data) {
+ printf("Got different data from local storage then what was set!\n");
+ return -1;
+ }
+ CHECK_CALL_SUCCESS(env2->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&get_data)));
+ if (get_data != nullptr) {
+ printf("env2 did not have nullptr local storage.\n");
+ return -1;
+ }
+ CHECK_CALL_SUCCESS(env->Deallocate(local_data));
+ jint version = 0;
+ CHECK_CALL_SUCCESS(env->GetVersionNumber(&version));
+ if ((version & JVMTI_VERSION_1) != JVMTI_VERSION_1) {
+ printf("Unexpected version number!\n");
+ return -1;
+ }
+ CHECK_CALL_SUCCESS(env->DisposeEnvironment());
+ CHECK_CALL_SUCCESS(env2->DisposeEnvironment());
+#undef CHECK_CALL_SUCCESS
+ return JNI_OK;
+}
+
+} // namespace Test909AttachAgent
+} // namespace art
diff --git a/test/909-attach-agent/attach.h b/test/909-attach-agent/attach.h
new file mode 100644
index 0000000000..3e6fe6c1c9
--- /dev/null
+++ b/test/909-attach-agent/attach.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_909_ATTACH_AGENT_ATTACH_H_
+#define ART_TEST_909_ATTACH_AGENT_ATTACH_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test909AttachAgent {
+
+jint OnAttach(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test909AttachAgent
+} // namespace art
+
+#endif // ART_TEST_909_ATTACH_AGENT_ATTACH_H_
diff --git a/test/909-attach-agent/build b/test/909-attach-agent/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/909-attach-agent/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/909-attach-agent/expected.txt b/test/909-attach-agent/expected.txt
new file mode 100644
index 0000000000..eacc595aaf
--- /dev/null
+++ b/test/909-attach-agent/expected.txt
@@ -0,0 +1,3 @@
+Hello, world!
+Attached Agent for test 909-attach-agent
+Goodbye!
diff --git a/test/909-attach-agent/info.txt b/test/909-attach-agent/info.txt
new file mode 100644
index 0000000000..06f3c8c852
--- /dev/null
+++ b/test/909-attach-agent/info.txt
@@ -0,0 +1 @@
+Tests jvmti plugin attaching during live phase.
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
new file mode 100755
index 0000000000..aed6e83d67
--- /dev/null
+++ b/test/909-attach-agent/run
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --android-runtime-option -Xplugin:${plugin} \
+ --args agent:${agent}=909-attach-agent
diff --git a/test/909-attach-agent/src/Main.java b/test/909-attach-agent/src/Main.java
new file mode 100644
index 0000000000..8a8a087458
--- /dev/null
+++ b/test/909-attach-agent/src/Main.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.VMDebug;
+import java.io.IOException;
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println("Hello, world!");
+ for(String a : args) {
+ if(a.startsWith("agent:")) {
+ String agent = a.substring(6);
+ try {
+ VMDebug.attachAgent(agent);
+ } catch(IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ System.out.println("Goodbye!");
+ }
+}
diff --git a/test/956-methodhandles/build b/test/956-methodhandles/build
index 613e97c711..a423ca6b4e 100755
--- a/test/956-methodhandles/build
+++ b/test/956-methodhandles/build
@@ -20,9 +20,6 @@ set -e
if [[ $@ != *"--jvm"* ]]; then
# Don't do anything with jvm.
export USE_JACK=true
- export JACK_SERVER=false
- export JACK_REPOSITORY="${ANDROID_BUILD_TOP}/prebuilts/sdk/tools/jacks"
- export JACK_VERSION=4.11.BETA
fi
./default-build "$@" --experimental method-handles
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index 2802dfa4cc..badea53247 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -57,6 +57,8 @@ public class Main {
public static void main(String[] args) throws Throwable {
testfindSpecial_invokeSuperBehaviour();
testfindSpecial_invokeDirectBehaviour();
+
+ testThrowException();
}
public static void testfindSpecial_invokeSuperBehaviour() throws Throwable {
@@ -131,6 +133,21 @@ public class Main {
} catch (IllegalAccessException expected) {
}
}
+
+ public static void testThrowException() throws Throwable {
+ MethodHandle handle = MethodHandles.throwException(String.class,
+ IllegalArgumentException.class);
+ if (handle.type().returnType() != String.class) {
+ System.out.println("Unexpected return type for handle: " + handle
+ + " [ " + handle.type() + "]");
+ }
+
+ try {
+ handle.invoke();
+ System.out.println("Expected an exception of type: java.lang.IllegalArgumentException");
+ } catch (IllegalArgumentException expected) {
+ }
+ }
}
diff --git a/test/Android.bp b/test/Android.bp
index c3036164a3..be1864cc4e 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -251,6 +251,8 @@ art_cc_defaults {
"905-object-free/tracking_free.cc",
"906-iterate-heap/iterate_heap.cc",
"907-get-loaded-classes/get_loaded_classes.cc",
+ "908-gc-start-finish/gc_callbacks.cc",
+ "909-attach-agent/attach.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index c99510a537..a3c16e656e 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -232,11 +232,9 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),
# Disable 149-suspend-all-stress, its output is flaky (b/28988206).
-# Disable 151-OpenFileLimit (b/32302133)
# Disable 577-profile-foreign-dex (b/27454772).
TEST_ART_BROKEN_ALL_TARGET_TESTS := \
149-suspend-all-stress \
- 151-OpenFileLimit \
577-profile-foreign-dex \
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -263,17 +261,21 @@ endif
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
-# 90{2,3,4,5,6,7} are not supported in current form due to linker
-# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
- 569-checker-pattern-replacement \
+ 569-checker-pattern-replacement
+
+# These 9** tests are not supported in current form due to linker
+# restrictions. See b/31681198
+TEST_ART_BROKEN_TARGET_TESTS += \
902-hello-transformation \
903-hello-tagging \
904-object-allocation \
905-object-free \
906-iterate-heap \
907-get-loaded-classes \
+ 908-gc-start-finish \
+ 909-attach-agent \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -459,8 +461,10 @@ TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
# 802 and 570-checker-osr:
# This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
# when already tracing, and writes an error message that we do not want to check for.
+# 130 occasional timeout b/32383962.
TEST_ART_BROKEN_TRACING_RUN_TESTS := \
087-gc-after-link \
+ 130-hprof \
137-cfi \
141-class-unload \
570-checker-osr \
diff --git a/test/VerifierDeps/MyClassWithNoSuper.smali b/test/VerifierDeps/MyClassWithNoSuper.smali
new file mode 100644
index 0000000000..d8509bccf6
--- /dev/null
+++ b/test/VerifierDeps/MyClassWithNoSuper.smali
@@ -0,0 +1,16 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyClassWithNoSuper;
+.super LNoSuper;
diff --git a/test/VerifierDeps/MyClassWithNoSuperButFailures.smali b/test/VerifierDeps/MyClassWithNoSuperButFailures.smali
new file mode 100644
index 0000000000..1dbe9d1faa
--- /dev/null
+++ b/test/VerifierDeps/MyClassWithNoSuperButFailures.smali
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyClassWithNoSuperButFailures;
+.super LNoSuper;
+
+.method public final foo()I
+ .registers 1
+ return-void
+.end method
diff --git a/test/VerifierDeps/MyVerificationFailure.smali b/test/VerifierDeps/MyVerificationFailure.smali
new file mode 100644
index 0000000000..187b1ad0d6
--- /dev/null
+++ b/test/VerifierDeps/MyVerificationFailure.smali
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyVerificationFailure;
+.super Ljava/lang/Object;
+
+.method public final foo()I
+ .registers 1
+ return-void
+.end method
diff --git a/test/dexdump/invoke-polymorphic.dex b/test/dexdump/invoke-polymorphic.dex
new file mode 100644
index 0000000000..5cf3068b9d
--- /dev/null
+++ b/test/dexdump/invoke-polymorphic.dex
Binary files differ
diff --git a/test/dexdump/invoke-polymorphic.lst b/test/dexdump/invoke-polymorphic.lst
new file mode 100644
index 0000000000..3eb8e246c5
--- /dev/null
+++ b/test/dexdump/invoke-polymorphic.lst
@@ -0,0 +1,3 @@
+#invoke-polymorphic.dex
+0x000001bc 8 Main <init> ()V Main.java 9
+0x000001d4 60 Main main ([Ljava/lang/String;)V Main.java 31
diff --git a/test/dexdump/invoke-polymorphic.txt b/test/dexdump/invoke-polymorphic.txt
new file mode 100644
index 0000000000..16e708cce7
--- /dev/null
+++ b/test/dexdump/invoke-polymorphic.txt
@@ -0,0 +1,109 @@
+Processing 'invoke-polymorphic.dex'...
+Opened 'invoke-polymorphic.dex', DEX version '037'
+DEX file header:
+magic : 'dex\n037\0'
+checksum : 0b5f9fd7
+signature : fcf4...f0e5
+file_size : 1160
+header_size : 112
+link_size : 0
+link_off : 0 (0x000000)
+string_ids_size : 30
+string_ids_off : 112 (0x000070)
+type_ids_size : 11
+type_ids_off : 232 (0x0000e8)
+proto_ids_size : 6
+proto_ids_off : 276 (0x000114)
+field_ids_size : 0
+field_ids_off : 0 (0x000000)
+method_ids_size : 5
+method_ids_off : 348 (0x00015c)
+class_defs_size : 1
+class_defs_off : 388 (0x000184)
+data_size : 740
+data_off : 420 (0x0001a4)
+
+Class #0 header:
+class_idx : 2
+access_flags : 1 (0x0001)
+superclass_idx : 4
+interfaces_off : 0 (0x000000)
+source_file_idx : 12
+annotations_off : 528 (0x000210)
+class_data_off : 959 (0x0003bf)
+static_fields_size : 0
+instance_fields_size: 0
+direct_methods_size : 2
+virtual_methods_size: 0
+
+Class #0 annotations:
+Annotations on method #1 'main'
+ VISIBILITY_SYSTEM Ldalvik/annotation/Throws; value={ Ljava/lang/Throwable; }
+
+Class #0 -
+ Class descriptor : 'LMain;'
+ Access flags : 0x0001 (PUBLIC)
+ Superclass : 'Ljava/lang/Object;'
+ Interfaces -
+ Static fields -
+ Instance fields -
+ Direct methods -
+ #0 : (in LMain;)
+ name : '<init>'
+ type : '()V'
+ access : 0x10001 (PUBLIC CONSTRUCTOR)
+ code -
+ registers : 1
+ ins : 1
+ outs : 1
+ insns size : 4 16-bit code units
+0001ac: |[0001ac] Main.<init>:()V
+0001bc: 7010 0200 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0002
+0001c2: 0e00 |0003: return-void
+ catches : (none)
+ positions :
+ 0x0000 line=9
+ locals :
+ 0x0000 - 0x0004 reg=0 this LMain;
+
+ #1 : (in LMain;)
+ name : 'main'
+ type : '([Ljava/lang/String;)V'
+ access : 0x0009 (PUBLIC STATIC)
+ code -
+ registers : 10
+ ins : 1
+ outs : 0
+ insns size : 30 16-bit code units
+0001c4: |[0001c4] Main.main:([Ljava/lang/String;)V
+0001d4: 1802 9a99 9999 9999 0140 |0000: const-wide v2, #double 2.2 // #400199999999999a
+0001de: 1214 |0005: const/4 v4, #int 1 // #1
+0001e0: 1200 |0006: const/4 v0, #int 0 // #0
+0001e2: 1205 |0007: const/4 v5, #int 0 // #0
+0001e4: 1b01 1200 0000 |0008: const-string/jumbo v1, "a" // string@00000012
+0001ea: 0146 |000b: move v6, v4
+0001ec: fb07 0300 0000 0200 |000c: invoke-polymorphic/range {v0, v1, v2, v3, v4, v5, v6}, Ljava/lang/invoke/MethodHandle;.invoke:([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;DILjava/lang/Object;I)Ljava/lang/String; // method@0003, proto@0002
+0001f4: 0c07 |0010: move-result-object v7
+0001f6: fa40 0400 2043 0000 |0011: invoke-polymorphic {v0, v2, v3, v4}, Ljava/lang/invoke/MethodHandle;.invokeExact:([Ljava/lang/Object;)Ljava/lang/Object;, (DI)I // method@0004, proto@0000
+0001fe: 0a08 |0015: move-result v8
+000200: 1b01 1200 0000 |0016: const-string/jumbo v1, "a" // string@00000012
+000206: fa54 0300 1032 0400 |0019: invoke-polymorphic {v0, v1, v2, v3, v4}, Ljava/lang/invoke/MethodHandle;.invoke:([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;DI)V // method@0003, proto@0004
+00020e: 0e00 |001d: return-void
+ catches : (none)
+ positions :
+ 0x0006 line=31
+ 0x0007 line=32
+ 0x0008 line=33
+ 0x0011 line=34
+ 0x0016 line=35
+ 0x001d line=56
+ locals :
+ 0x0007 - 0x001e reg=0 handle Ljava/lang/invoke/MethodHandle;
+ 0x0008 - 0x001e reg=5 o Ljava/lang/Object;
+ 0x0011 - 0x001e reg=7 s Ljava/lang/String;
+ 0x0016 - 0x001e reg=8 x I
+ 0x0000 - 0x001e reg=9 args [Ljava/lang/String;
+
+ Virtual methods -
+ source_file_idx : 12 (Main.java)
+
diff --git a/test/dexdump/invoke-polymorphic.xml b/test/dexdump/invoke-polymorphic.xml
new file mode 100644
index 0000000000..ab99a76768
--- /dev/null
+++ b/test/dexdump/invoke-polymorphic.xml
@@ -0,0 +1,33 @@
+<api>
+<package name=""
+>
+<class name="Main"
+ extends="java.lang.Object"
+ interface="false"
+ abstract="false"
+ static="false"
+ final="false"
+ visibility="public"
+>
+<constructor name="Main"
+ type="Main"
+ static="false"
+ final="false"
+ visibility="public"
+>
+</constructor>
+<method name="main"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="true"
+ final="false"
+ visibility="public"
+>
+<parameter name="arg0" type="java.lang.String[]">
+</parameter>
+</method>
+</class>
+</package>
+</api>
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index c4126365fc..90d0a66fd7 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -30,6 +30,9 @@
#include "904-object-allocation/tracking.h"
#include "905-object-free/tracking_free.h"
#include "906-iterate-heap/iterate_heap.h"
+#include "907-get-loaded-classes/get_loaded_classes.h"
+#include "908-gc-start-finish/gc_callbacks.h"
+#include "909-attach-agent/attach.h"
namespace art {
@@ -52,6 +55,9 @@ AgentLib agents[] = {
{ "904-object-allocation", Test904ObjectAllocation::OnLoad, nullptr },
{ "905-object-free", Test905ObjectFree::OnLoad, nullptr },
{ "906-iterate-heap", Test906IterateHeap::OnLoad, nullptr },
+ { "907-get-loaded-classes", Test907GetLoadedClasses::OnLoad, nullptr },
+ { "908-gc-start-finish", Test908GcStartFinish::OnLoad, nullptr },
+ { "909-attach-agent", nullptr, Test909AttachAgent::OnAttach },
};
static AgentLib* FindAgent(char* name) {
@@ -101,7 +107,6 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void*
return lib->load(vm, remaining_options, reserved);
}
-
extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm, char* options, void* reserved) {
char* remaining_options = nullptr;
char* name_option = nullptr;
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 5ef66d196a..12e03386d6 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -45,16 +45,6 @@ while true; do
fi
done
-# Workaround for repo incompatibilities on the Chromium buildbot.
-# TODO: Remove this workaround once https://bugs.chromium.org/p/chromium/issues/detail?id=646329
-# is addressed.
-repo=$(which repo)
-if [[ $repo == *"depot_tools"* ]]; then
- ln -s build/soong/root.bp Android.bp
- ln -s build/soong/bootstrap.bash bootstrap.bash
- echo "include build/core/main.mk" > Makefile
-fi
-
if [[ $mode == "host" ]]; then
make_command="make $j_arg $showcommands build-art-host-tests $common_targets"
make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "