summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk33
-rw-r--r--cmdline/cmdline_parser_test.cc94
-rw-r--r--cmdline/cmdline_types.h202
-rw-r--r--compiler/driver/compiler_driver.cc22
-rw-r--r--compiler/image_writer.cc65
-rw-r--r--compiler/image_writer.h6
-rw-r--r--compiler/optimizing/code_generator_arm.cc15
-rw-r--r--compiler/optimizing/code_generator_arm64.cc12
-rw-r--r--compiler/optimizing/code_generator_mips.cc14
-rw-r--r--compiler/optimizing/code_generator_mips64.cc6
-rw-r--r--compiler/optimizing/code_generator_x86.cc15
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/instruction_builder.cc3
-rw-r--r--compiler/optimizing/nodes.h4
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc22
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc9
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc16
-rw-r--r--compiler/utils/assembler_test.h11
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc20
-rw-r--r--dex2oat/dex2oat.cc96
-rw-r--r--dex2oat/dex2oat_test.cc419
-rw-r--r--disassembler/disassembler_mips.cc2
-rw-r--r--patchoat/patchoat.cc12
-rw-r--r--patchoat/patchoat.h1
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc13
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S5
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc13
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S5
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc13
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S12
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc14
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S11
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc20
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S5
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S5
-rw-r--r--runtime/art_method-inl.h4
-rw-r--r--runtime/art_method.h18
-rw-r--r--runtime/base/mutex-inl.h5
-rw-r--r--runtime/class_linker.cc159
-rw-r--r--runtime/class_linker.h23
-rw-r--r--runtime/class_linker_test.cc3
-rw-r--r--runtime/common_runtime_test.cc2
-rw-r--r--runtime/common_runtime_test.h2
-rw-r--r--runtime/common_throws.cc135
-rw-r--r--runtime/common_throws.h2
-rw-r--r--runtime/dex2oat_environment_test.h188
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h40
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h1
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc12
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc16
-rw-r--r--runtime/fault_handler.h8
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h41
-rw-r--r--runtime/gc/collector/concurrent_copying.cc359
-rw-r--r--runtime/gc/collector/concurrent_copying.h46
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/globals.h6
-rw-r--r--runtime/image-inl.h19
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/image.h6
-rw-r--r--runtime/imtable.h77
-rw-r--r--runtime/interpreter/interpreter_common.h2
-rw-r--r--runtime/jit/jit.cc26
-rw-r--r--runtime/jit/jit.h21
-rw-r--r--runtime/jit/profile_saver.cc68
-rw-r--r--runtime/jit/profile_saver.h8
-rw-r--r--runtime/jit/profile_saver_options.h113
-rw-r--r--runtime/mirror/class-inl.h74
-rw-r--r--runtime/mirror/class.cc17
-rw-r--r--runtime/mirror/class.h52
-rw-r--r--runtime/native/dalvik_system_DexFile.cc1
-rw-r--r--runtime/oat_file_assistant.h1
-rw-r--r--runtime/oat_file_assistant_test.cc151
-rw-r--r--runtime/parsed_options.cc35
-rw-r--r--runtime/parsed_options.h2
-rw-r--r--runtime/profiler.cc920
-rw-r--r--runtime/profiler.h288
-rw-r--r--runtime/profiler_options.h159
-rw-r--r--runtime/runtime.cc38
-rw-r--r--runtime/runtime.h10
-rw-r--r--runtime/runtime_options.def3
-rw-r--r--runtime/runtime_options.h3
-rw-r--r--runtime/thread-inl.h14
-rw-r--r--runtime/thread.h2
-rw-r--r--test/478-checker-clinit-check-pruning/expected.txt1
-rw-r--r--test/478-checker-clinit-check-pruning/src/Main.java57
-rw-r--r--test/600-verifier-fails/expected.txt1
-rw-r--r--test/600-verifier-fails/src/Main.java3
89 files changed, 1816 insertions, 2668 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c09116ff5d..424aa7a7eb 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -65,15 +65,18 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
$(call dexpreopt-remove-classes.dex,$@)
# Dex file dependencies for each gtest.
+ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
+
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
-ART_GTEST_oat_file_assistant_test_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
+ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex
ART_GTEST_oat_test_DEX_DEPS := Main
ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
@@ -89,14 +92,25 @@ ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
+ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \
$(HOST_CORE_IMAGE_default_no-pic_64) \
- $(HOST_CORE_IMAGE_default_no-pic_32) \
- $(HOST_OUT_EXECUTABLES)/patchoatd
-ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \
+ $(HOST_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_default_no-pic_64) \
- $(TARGET_CORE_IMAGE_default_no-pic_32) \
- $(TARGET_OUT_EXECUTABLES)/patchoatd
+ $(TARGET_CORE_IMAGE_default_no-pic_32)
+
+ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
+ $(HOST_OUT_EXECUTABLES)/patchoatd
+ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
+ $(TARGET_OUT_EXECUTABLES)/patchoatd
+
+
+ART_GTEST_dex2oat_test_HOST_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
+ART_GTEST_dex2oat_test_TARGET_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
# TODO: document why this is needed.
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
@@ -157,6 +171,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
cmdline/cmdline_parser_test.cc \
dexdump/dexdump_test.cc \
dexlist/dexlist_test.cc \
+ dex2oat/dex2oat_test.cc \
imgdiag/imgdiag_test.cc \
oatdump/oatdump_test.cc \
profman/profile_assistant_test.cc \
@@ -808,11 +823,15 @@ ART_GTEST_jni_internal_test_DEX_DEPS :=
ART_GTEST_oat_file_assistant_test_DEX_DEPS :=
ART_GTEST_oat_file_assistant_test_HOST_DEPS :=
ART_GTEST_oat_file_assistant_test_TARGET_DEPS :=
+ART_GTEST_dex2oat_test_DEX_DEPS :=
+ART_GTEST_dex2oat_test_HOST_DEPS :=
+ART_GTEST_dex2oat_test_TARGET_DEPS :=
ART_GTEST_object_test_DEX_DEPS :=
ART_GTEST_proxy_test_DEX_DEPS :=
ART_GTEST_reflection_test_DEX_DEPS :=
ART_GTEST_stub_test_DEX_DEPS :=
ART_GTEST_transaction_test_DEX_DEPS :=
+ART_GTEST_dex2oat_environment_tests_DEX_DEPS :=
ART_VALGRIND_DEPENDENCIES :=
ART_VALGRIND_TARGET_DEPENDENCIES :=
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=))
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 7c53e01c47..7ded3bfd34 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -30,18 +30,15 @@ namespace art {
bool UsuallyEquals(double expected, double actual);
// This has a gtest dependency, which is why it's in the gtest only.
- bool operator==(const TestProfilerOptions& lhs, const TestProfilerOptions& rhs) {
+ bool operator==(const ProfileSaverOptions& lhs, const ProfileSaverOptions& rhs) {
return lhs.enabled_ == rhs.enabled_ &&
- lhs.output_file_name_ == rhs.output_file_name_ &&
- lhs.period_s_ == rhs.period_s_ &&
- lhs.duration_s_ == rhs.duration_s_ &&
- lhs.interval_us_ == rhs.interval_us_ &&
- UsuallyEquals(lhs.backoff_coefficient_, rhs.backoff_coefficient_) &&
- UsuallyEquals(lhs.start_immediately_, rhs.start_immediately_) &&
- UsuallyEquals(lhs.top_k_threshold_, rhs.top_k_threshold_) &&
- UsuallyEquals(lhs.top_k_change_threshold_, rhs.top_k_change_threshold_) &&
- lhs.profile_type_ == rhs.profile_type_ &&
- lhs.max_stack_depth_ == rhs.max_stack_depth_;
+ lhs.min_save_period_ms_ == rhs.min_save_period_ms_ &&
+ lhs.save_resolved_classes_delay_ms_ == rhs.save_resolved_classes_delay_ms_ &&
+ lhs.startup_method_samples_ == rhs.startup_method_samples_ &&
+ lhs.min_methods_to_save_ == rhs.min_methods_to_save_ &&
+ lhs.min_classes_to_save_ == rhs.min_classes_to_save_ &&
+ lhs.min_notification_before_wake_ == rhs.min_notification_before_wake_ &&
+ lhs.max_notification_before_wake_ == rhs.max_notification_before_wake_;
}
bool UsuallyEquals(double expected, double actual) {
@@ -476,68 +473,21 @@ TEST_F(CmdlineParserTest, TestJitOptions) {
} // TEST_F
/*
-* -X-profile-*
+* -Xps-*
*/
-TEST_F(CmdlineParserTest, TestProfilerOptions) {
- /*
- * Test successes
- */
-
- {
- TestProfilerOptions opt;
- opt.enabled_ = true;
-
- EXPECT_SINGLE_PARSE_VALUE(opt,
- "-Xenable-profiler",
- M::ProfilerOpts);
- }
-
- {
- TestProfilerOptions opt;
- // also need to test 'enabled'
- opt.output_file_name_ = "hello_world.txt";
-
- EXPECT_SINGLE_PARSE_VALUE(opt,
- "-Xprofile-filename:hello_world.txt ",
- M::ProfilerOpts);
- }
-
- {
- TestProfilerOptions opt = TestProfilerOptions();
- // also need to test 'enabled'
- opt.output_file_name_ = "output.txt";
- opt.period_s_ = 123u;
- opt.duration_s_ = 456u;
- opt.interval_us_ = 789u;
- opt.backoff_coefficient_ = 2.0;
- opt.start_immediately_ = true;
- opt.top_k_threshold_ = 50.0;
- opt.top_k_change_threshold_ = 60.0;
- opt.profile_type_ = kProfilerMethod;
- opt.max_stack_depth_ = 1337u;
-
- EXPECT_SINGLE_PARSE_VALUE(opt,
- "-Xprofile-filename:output.txt "
- "-Xprofile-period:123 "
- "-Xprofile-duration:456 "
- "-Xprofile-interval:789 "
- "-Xprofile-backoff:2.0 "
- "-Xprofile-start-immediately "
- "-Xprofile-top-k-threshold:50.0 "
- "-Xprofile-top-k-change-threshold:60.0 "
- "-Xprofile-type:method "
- "-Xprofile-max-stack-depth:1337",
- M::ProfilerOpts);
- }
-
- {
- TestProfilerOptions opt = TestProfilerOptions();
- opt.profile_type_ = kProfilerBoundedStack;
-
- EXPECT_SINGLE_PARSE_VALUE(opt,
- "-Xprofile-type:stack",
- M::ProfilerOpts);
- }
+TEST_F(CmdlineParserTest, ProfileSaverOptions) {
+ ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7);
+
+ EXPECT_SINGLE_PARSE_VALUE(opt,
+ "-Xjitsaveprofilinginfo "
+ "-Xps-min-save-period-ms:1 "
+ "-Xps-save-resolved-classes-delay-ms:2 "
+ "-Xps-startup-method-samples:3 "
+ "-Xps-min-methods-to-save:4 "
+ "-Xps-min-classes-to-save:5 "
+ "-Xps-min-notification-before-wake:6 "
+ "-Xps-max-notification-before-wake:7",
+ M::ProfileSaverOpts);
} // TEST_F
/* -Xexperimental:_ */
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 4797540c35..9b4042c590 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -31,7 +31,7 @@
#include "experimental_flags.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
-#include "profiler_options.h"
+#include "jit/profile_saver_options.h"
namespace art {
@@ -633,84 +633,17 @@ struct CmdlineType<LogVerbosity> : CmdlineTypeParser<LogVerbosity> {
static const char* Name() { return "LogVerbosity"; }
};
-// TODO: Replace with art::ProfilerOptions for the real thing.
-struct TestProfilerOptions {
- // Whether or not the applications should be profiled.
- bool enabled_;
- // Destination file name where the profiling data will be saved into.
- std::string output_file_name_;
- // Generate profile every n seconds.
- uint32_t period_s_;
- // Run profile for n seconds.
- uint32_t duration_s_;
- // Microseconds between samples.
- uint32_t interval_us_;
- // Coefficient to exponential backoff.
- double backoff_coefficient_;
- // Whether the profile should start upon app startup or be delayed by some random offset.
- bool start_immediately_;
- // Top K% of samples that are considered relevant when deciding if the app should be recompiled.
- double top_k_threshold_;
- // How much the top K% samples needs to change in order for the app to be recompiled.
- double top_k_change_threshold_;
- // The type of profile data dumped to the disk.
- ProfileDataType profile_type_;
- // The max depth of the stack collected by the profiler
- uint32_t max_stack_depth_;
-
- TestProfilerOptions() :
- enabled_(false),
- output_file_name_(),
- period_s_(0),
- duration_s_(0),
- interval_us_(0),
- backoff_coefficient_(0),
- start_immediately_(0),
- top_k_threshold_(0),
- top_k_change_threshold_(0),
- profile_type_(ProfileDataType::kProfilerMethod),
- max_stack_depth_(0) {
- }
-
- TestProfilerOptions(const TestProfilerOptions&) = default;
- TestProfilerOptions(TestProfilerOptions&&) = default;
-};
-
-static inline std::ostream& operator<<(std::ostream& stream, const TestProfilerOptions& options) {
- stream << "TestProfilerOptions {" << std::endl;
-
-#define PRINT_TO_STREAM(field) \
- stream << #field << ": '" << options.field << "'" << std::endl;
-
- PRINT_TO_STREAM(enabled_);
- PRINT_TO_STREAM(output_file_name_);
- PRINT_TO_STREAM(period_s_);
- PRINT_TO_STREAM(duration_s_);
- PRINT_TO_STREAM(interval_us_);
- PRINT_TO_STREAM(backoff_coefficient_);
- PRINT_TO_STREAM(start_immediately_);
- PRINT_TO_STREAM(top_k_threshold_);
- PRINT_TO_STREAM(top_k_change_threshold_);
- PRINT_TO_STREAM(profile_type_);
- PRINT_TO_STREAM(max_stack_depth_);
-
- stream << "}";
-
- return stream;
-#undef PRINT_TO_STREAM
-}
-
template <>
-struct CmdlineType<TestProfilerOptions> : CmdlineTypeParser<TestProfilerOptions> {
- using Result = CmdlineParseResult<TestProfilerOptions>;
+struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions> {
+ using Result = CmdlineParseResult<ProfileSaverOptions>;
private:
using StringResult = CmdlineParseResult<std::string>;
using DoubleResult = CmdlineParseResult<double>;
template <typename T>
- static Result ParseInto(TestProfilerOptions& options,
- T TestProfilerOptions::*pField,
+ static Result ParseInto(ProfileSaverOptions& options,
+ T ProfileSaverOptions::*pField,
CmdlineParseResult<T>&& result) {
assert(pField != nullptr);
@@ -722,36 +655,6 @@ struct CmdlineType<TestProfilerOptions> : CmdlineTypeParser<TestProfilerOptions>
return Result::CastError(result);
}
- template <typename T>
- static Result ParseIntoRangeCheck(TestProfilerOptions& options,
- T TestProfilerOptions::*pField,
- CmdlineParseResult<T>&& result,
- T min,
- T max) {
- if (result.IsSuccess()) {
- const T& value = result.GetValue();
-
- if (value < min || value > max) {
- CmdlineParseResult<T> out_of_range = CmdlineParseResult<T>::OutOfRange(value, min, max);
- return Result::CastError(out_of_range);
- }
- }
-
- return ParseInto(options, pField, std::forward<CmdlineParseResult<T>>(result));
- }
-
- static StringResult ParseStringAfterChar(const std::string& s, char c) {
- std::string parsed_value;
-
- std::string::size_type colon = s.find(c);
- if (colon == std::string::npos) {
- return StringResult::Usage(std::string() + "Missing char " + c + " in option " + s);
- }
- // Add one to remove the char we were trimming until.
- parsed_value = s.substr(colon + 1);
- return StringResult::Success(parsed_value);
- }
-
static std::string RemovePrefix(const std::string& source) {
size_t prefix_idx = source.find(":");
@@ -763,87 +666,64 @@ struct CmdlineType<TestProfilerOptions> : CmdlineTypeParser<TestProfilerOptions>
}
public:
- Result ParseAndAppend(const std::string& option, TestProfilerOptions& existing) {
+ Result ParseAndAppend(const std::string& option, ProfileSaverOptions& existing) {
// Special case which doesn't include a wildcard argument definition.
// We pass-it through as-is.
- if (option == "-Xenable-profiler") {
+ if (option == "-Xjitsaveprofilinginfo") {
existing.enabled_ = true;
return Result::SuccessNoValue();
}
- // The rest of these options are always the wildcard from '-Xprofile-*'
+ // The rest of these options are always the wildcard from '-Xps-*'
std::string suffix = RemovePrefix(option);
- if (StartsWith(option, "filename:")) {
- CmdlineType<std::string> type_parser;
-
+ if (StartsWith(option, "min-save-period-ms:")) {
+ CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
- &TestProfilerOptions::output_file_name_,
- type_parser.Parse(suffix));
- } else if (StartsWith(option, "period:")) {
+ &ProfileSaverOptions::min_save_period_ms_,
+ type_parser.Parse(suffix));
+ }
+ if (StartsWith(option, "save-resolved-classes-delay-ms:")) {
CmdlineType<unsigned int> type_parser;
-
return ParseInto(existing,
- &TestProfilerOptions::period_s_,
- type_parser.Parse(suffix));
- } else if (StartsWith(option, "duration:")) {
+ &ProfileSaverOptions::save_resolved_classes_delay_ms_,
+ type_parser.Parse(suffix));
+ }
+ if (StartsWith(option, "startup-method-samples:")) {
CmdlineType<unsigned int> type_parser;
-
return ParseInto(existing,
- &TestProfilerOptions::duration_s_,
- type_parser.Parse(suffix));
- } else if (StartsWith(option, "interval:")) {
+ &ProfileSaverOptions::startup_method_samples_,
+ type_parser.Parse(suffix));
+ }
+ if (StartsWith(option, "min-methods-to-save:")) {
CmdlineType<unsigned int> type_parser;
-
return ParseInto(existing,
- &TestProfilerOptions::interval_us_,
- type_parser.Parse(suffix));
- } else if (StartsWith(option, "backoff:")) {
- CmdlineType<double> type_parser;
-
- return ParseIntoRangeCheck(existing,
- &TestProfilerOptions::backoff_coefficient_,
- type_parser.Parse(suffix),
- 1.0,
- 10.0);
-
- } else if (option == "start-immediately") {
- existing.start_immediately_ = true;
- return Result::SuccessNoValue();
- } else if (StartsWith(option, "top-k-threshold:")) {
- CmdlineType<double> type_parser;
-
- return ParseIntoRangeCheck(existing,
- &TestProfilerOptions::top_k_threshold_,
- type_parser.Parse(suffix),
- 0.0,
- 100.0);
- } else if (StartsWith(option, "top-k-change-threshold:")) {
- CmdlineType<double> type_parser;
-
- return ParseIntoRangeCheck(existing,
- &TestProfilerOptions::top_k_change_threshold_,
- type_parser.Parse(suffix),
- 0.0,
- 100.0);
- } else if (option == "type:method") {
- existing.profile_type_ = kProfilerMethod;
- return Result::SuccessNoValue();
- } else if (option == "type:stack") {
- existing.profile_type_ = kProfilerBoundedStack;
- return Result::SuccessNoValue();
- } else if (StartsWith(option, "max-stack-depth:")) {
+ &ProfileSaverOptions::min_methods_to_save_,
+ type_parser.Parse(suffix));
+ }
+ if (StartsWith(option, "min-classes-to-save:")) {
+ CmdlineType<unsigned int> type_parser;
+ return ParseInto(existing,
+ &ProfileSaverOptions::min_classes_to_save_,
+ type_parser.Parse(suffix));
+ }
+ if (StartsWith(option, "min-notification-before-wake:")) {
+ CmdlineType<unsigned int> type_parser;
+ return ParseInto(existing,
+ &ProfileSaverOptions::min_notification_before_wake_,
+ type_parser.Parse(suffix));
+ }
+ if (StartsWith(option, "max-notification-before-wake:")) {
CmdlineType<unsigned int> type_parser;
-
return ParseInto(existing,
- &TestProfilerOptions::max_stack_depth_,
- type_parser.Parse(suffix));
+ &ProfileSaverOptions::max_notification_before_wake_,
+ type_parser.Parse(suffix));
} else {
return Result::Failure(std::string("Invalid suboption '") + option + "'");
}
}
- static const char* Name() { return "TestProfilerOptions"; }
+ static const char* Name() { return "ProfileSaverOptions"; }
static constexpr bool kCanParseBlankless = true;
};
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 131be37a33..e52dda35bb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -47,7 +47,6 @@
#include "driver/compiler_options.h"
#include "jni_internal.h"
#include "object_lock.h"
-#include "profiler.h"
#include "runtime.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
@@ -2522,28 +2521,11 @@ class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor
true);
}
// Create the conflict tables.
- FillIMTAndConflictTables(klass);
- return true;
- }
-
- private:
- void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
- if (!klass->ShouldHaveImt()) {
- return;
- }
- if (visited_classes_.find(klass) != visited_classes_.end()) {
- return;
- }
- if (klass->HasSuperClass()) {
- FillIMTAndConflictTables(klass->GetSuperClass());
- }
- if (!klass->IsTemp()) {
+ if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) {
Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass);
}
- visited_classes_.insert(klass);
+ return true;
}
-
- std::set<mirror::Class*> visited_classes_;
};
void CompilerDriver::InitializeClasses(jobject class_loader,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 063eb11718..da10568475 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1232,10 +1232,9 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
// Assign offsets for all runtime methods in the IMT since these may hold conflict tables
// live.
- if (as_klass->ShouldHaveImt()) {
- ImTable* imt = as_klass->GetImt(target_ptr_size_);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- ArtMethod* imt_method = imt->Get(i, target_ptr_size_);
+ if (as_klass->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_);
DCHECK(imt_method != nullptr);
if (imt_method->IsRuntimeMethod() &&
!IsInBootImage(imt_method) &&
@@ -1244,11 +1243,6 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
}
}
-
- if (as_klass->ShouldHaveImt()) {
- ImTable* imt = as_klass->GetImt(target_ptr_size_);
- TryAssignImTableOffset(imt, oat_index);
- }
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
@@ -1275,23 +1269,6 @@ bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
return native_object_relocations_.find(ptr) != native_object_relocations_.end();
}
-void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) {
- // No offset, or already assigned.
- if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) {
- return;
- }
- // If the method is a conflict method we also want to assign the conflict table offset.
- ImageInfo& image_info = GetImageInfo(oat_index);
- const size_t size = ImTable::SizeInBytes(target_ptr_size_);
- native_object_relocations_.emplace(
- imt,
- NativeObjectRelocation {
- oat_index,
- image_info.bin_slot_sizes_[kBinImTable],
- kNativeObjectRelocationTypeIMTable});
- image_info.bin_slot_sizes_[kBinImTable] += size;
-}
-
void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
// No offset, or already assigned.
if (table == nullptr || NativeRelocationAssigned(table)) {
@@ -1414,7 +1391,6 @@ void ImageWriter::CalculateNewObjectOffsets() {
bin_offset = RoundUp(bin_offset, method_alignment);
break;
}
- case kBinImTable:
case kBinIMTConflictTable: {
bin_offset = RoundUp(bin_offset, target_ptr_size_);
break;
@@ -1485,10 +1461,6 @@ size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) c
bin_slot_offsets_[kBinArtMethodClean],
bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]);
- // IMT section.
- ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables];
- *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]);
-
// Conflict tables section.
ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
*imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable],
@@ -1613,13 +1585,6 @@ class FixupRootVisitor : public RootVisitor {
ImageWriter* const image_writer_;
};
-void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- ArtMethod* method = orig->Get(i, target_ptr_size_);
- copy->Set(i, NativeLocationInImage(method), target_ptr_size_);
- }
-}
-
void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
const size_t count = orig->NumEntries(target_ptr_size_);
for (size_t i = 0; i < count; ++i) {
@@ -1677,12 +1642,6 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
case kNativeObjectRelocationTypeDexCacheArray:
// Nothing to copy here, everything is done in FixupDexCache().
break;
- case kNativeObjectRelocationTypeIMTable: {
- ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first);
- ImTable* dest_imt = reinterpret_cast<ImTable*>(dest);
- CopyAndFixupImTable(orig_imt, dest_imt);
- break;
- }
case kNativeObjectRelocationTypeIMTConflictTable: {
auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
CopyAndFixupImtConflictTable(
@@ -1891,25 +1850,13 @@ uintptr_t ImageWriter::NativeOffsetInImage(void* obj) {
}
template <typename T>
-std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
- std::ostringstream oss;
- oss << ptr;
- return oss.str();
-}
-
-template <>
-std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
- return PrettyMethod(method);
-}
-
-template <typename T>
T* ImageWriter::NativeLocationInImage(T* obj) {
if (obj == nullptr || IsInBootImage(obj)) {
return obj;
} else {
auto it = native_object_relocations_.find(obj);
- CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj)
- << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces();
+ CHECK(it != native_object_relocations_.end()) << obj << " spaces "
+ << Runtime::Current()->GetHeap()->DumpSpaces();
const NativeObjectRelocation& relocation = it->second;
ImageInfo& image_info = GetImageInfo(relocation.oat_index);
return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset);
@@ -2263,8 +2210,6 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat
return kBinDexCacheArray;
case kNativeObjectRelocationTypeRuntimeMethod:
return kBinRuntimeMethod;
- case kNativeObjectRelocationTypeIMTable:
- return kBinImTable;
case kNativeObjectRelocationTypeIMTConflictTable:
return kBinIMTConflictTable;
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1efdc22c0a..51976c511f 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -169,8 +169,6 @@ class ImageWriter FINAL {
// ArtMethods may be dirty if the class has native methods or a declaring class that isn't
// initialized.
kBinArtMethodDirty,
- // IMT (clean)
- kBinImTable,
// Conflict tables (clean).
kBinIMTConflictTable,
// Runtime methods (always clean, do not have a length prefix array).
@@ -193,7 +191,6 @@ class ImageWriter FINAL {
kNativeObjectRelocationTypeArtMethodDirty,
kNativeObjectRelocationTypeArtMethodArrayDirty,
kNativeObjectRelocationTypeRuntimeMethod,
- kNativeObjectRelocationTypeIMTable,
kNativeObjectRelocationTypeIMTConflictTable,
kNativeObjectRelocationTypeDexCacheArray,
};
@@ -404,7 +401,6 @@ class ImageWriter FINAL {
void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
@@ -437,8 +433,6 @@ class ImageWriter FINAL {
size_t oat_index)
SHARED_REQUIRES(Locks::mutator_lock_);
- void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
-
// Assign the offset for an IMT conflict table. Does nothing if the table already has a native
// relocation.
void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 83b4705302..5316d59bff 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1889,6 +1889,8 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
LocationSummary* locations = invoke->GetLocations();
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register hidden_reg = locations->GetTemp(1).AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1914,14 +1916,10 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
- __ LoadFromOffset(kLoadWord, temp, temp,
- mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
- uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex(), kArmPointerSize));
// temp = temp->GetImtEntryAt(method_offset);
- __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
uint32_t entry_point =
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value();
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
@@ -6961,11 +6959,8 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kArmPointerSize).SizeValue();
} else {
- __ LoadFromOffset(kLoadWord, locations->Out().AsRegister<Register>(),
- locations->InAt(0).AsRegister<Register>(),
- mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
- method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex(), kArmPointerSize));
+ method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ instruction->GetIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
}
__ LoadFromOffset(kLoadWord,
locations->Out().AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 07d5e50c6b..fc2c2c34aa 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3506,6 +3506,8 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
Register temp = XRegisterFrom(locations->GetTemp(0));
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
Location receiver = locations->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
@@ -3535,10 +3537,6 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
- __ Ldr(temp,
- MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
- uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex(), kArm64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
@@ -5355,10 +5353,8 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kArm64PointerSize).SizeValue();
} else {
- __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)),
- mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
- method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex(), kArm64PointerSize));
+ method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
}
__ Ldr(XRegisterFrom(locations->Out()),
MemOperand(XRegisterFrom(locations->InAt(0)), method_offset));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b9b3463f4d..4d44c18dcf 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3698,6 +3698,8 @@ void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
@@ -3714,10 +3716,6 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ LoadFromOffset(kLoadWord, temp, temp,
- mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
- uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex(), kMipsPointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -5167,12 +5165,8 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kMipsPointerSize).SizeValue();
} else {
- __ LoadFromOffset(kLoadWord,
- locations->Out().AsRegister<Register>(),
- locations->InAt(0).AsRegister<Register>(),
- mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
- method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex(), kMipsPointerSize));
+ method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ instruction->GetIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
}
__ LoadFromOffset(kLoadWord,
locations->Out().AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 2330c4803e..2e78884daf 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2932,6 +2932,8 @@ void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
@@ -2948,10 +2950,6 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ LoadFromOffset(kLoadDoubleword, temp, temp,
- mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
- uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex(), kMips64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index af10bad6cc..1261619536 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2027,6 +2027,8 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
LocationSummary* locations = invoke->GetLocations();
Register temp = locations->GetTemp(0).AsRegister<Register>();
XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -2053,12 +2055,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
- // temp = temp->GetAddressOfIMT()
- __ movl(temp,
- Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
// temp = temp->GetImtEntryAt(method_offset);
- uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex(), kX86PointerSize));
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp,
@@ -4078,12 +4075,8 @@ void InstructionCodeGeneratorX86::VisitClassTableGet(HClassTableGet* instruction
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kX86PointerSize).SizeValue();
} else {
- __ movl(locations->InAt(0).AsRegister<Register>(),
- Address(locations->InAt(0).AsRegister<Register>(),
- mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
- // temp = temp->GetImtEntryAt(method_offset);
- method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex(), kX86PointerSize));
+ method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ instruction->GetIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
}
__ movl(locations->Out().AsRegister<Register>(),
Address(locations->InAt(0).AsRegister<Register>(), method_offset));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2b21454101..5e30203b38 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2257,6 +2257,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
LocationSummary* locations = invoke->GetLocations();
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -2282,12 +2284,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
- // temp = temp->GetAddressOfIMT()
- __ movq(temp,
- Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
- // temp = temp->GetImtEntryAt(method_offset);
- uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex(), kX86_64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -4011,11 +4007,8 @@ void InstructionCodeGeneratorX86_64::VisitClassTableGet(HClassTableGet* instruct
method_offset = mirror::Class::EmbeddedVTableEntryOffset(
instruction->GetIndex(), kX86_64PointerSize).SizeValue();
} else {
- __ movq(locations->Out().AsRegister<CpuRegister>(),
- Address(locations->InAt(0).AsRegister<CpuRegister>(),
- mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
- method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex(), kX86_64PointerSize));
+ method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ instruction->GetIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
}
__ movq(locations->Out().AsRegister<CpuRegister>(),
Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index be4ea200a9..c67b2d5fe9 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -656,8 +656,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
}
ArtMethod* new_method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
- new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get(
- method_index, pointer_size);
+ new_method = ic.GetTypeAt(i)->GetEmbeddedImTableEntry(
+ method_index % mirror::Class::kImtSize, pointer_size);
if (new_method->IsRuntimeMethod()) {
// Bail out as soon as we see a conflict trampoline in one of the target's
// interface table.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index afac5f9cf1..b4125299ea 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -16,7 +16,6 @@
#include "instruction_builder.h"
-#include "art_method-inl.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "driver/compiler_options.h"
@@ -891,7 +890,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
return_type,
dex_pc,
method_idx,
- resolved_method->GetImtIndex());
+ resolved_method->GetDexMethodIndex());
}
return HandleInvoke(invoke,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 29df7c8ab8..6b2c33e668 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5029,7 +5029,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize;
+ return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
size_t ComputeHashCode() const OVERRIDE {
@@ -5076,7 +5076,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize;
+ return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
const FieldInfo& GetFieldInfo() const { return field_info_; }
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 764160adce..05eb06333e 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -32,21 +32,21 @@ static constexpr uint8_t expected_cfi_kThumb2[] = {
// 0x00000012: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0xD7, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
- 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0xD7, 0x42, 0xA9,
- 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
+ 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
+ 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0x17, 0x40, 0xF9,
+ 0xF5, 0x7B, 0x43, 0xA9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
- 0x44, 0x0E, 0x40, 0x44, 0x94, 0x06, 0x95, 0x04, 0x44, 0x9E, 0x02, 0x44,
+ 0x44, 0x0E, 0x40, 0x44, 0x94, 0x06, 0x44, 0x95, 0x04, 0x9E, 0x02, 0x44,
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
- 0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
+ 0x44, 0xD4, 0x44, 0xD5, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
// 0x00000000: str x0, [sp, #-64]!
// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: stp x20, x21, [sp, #40]
+// 0x00000004: str x20, [sp, #40]
// 0x00000008: .cfi_offset: r20 at cfa-24
-// 0x00000008: .cfi_offset: r21 at cfa-16
-// 0x00000008: str lr, [sp, #56]
+// 0x00000008: stp x21, lr, [sp, #48]
+// 0x0000000c: .cfi_offset: r21 at cfa-16
// 0x0000000c: .cfi_offset: r30 at cfa-8
// 0x0000000c: stp d8, d9, [sp, #24]
// 0x00000010: .cfi_offset_extended: r72 at cfa-40
@@ -55,10 +55,10 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
// 0x00000010: ldp d8, d9, [sp, #24]
// 0x00000014: .cfi_restore_extended: r72
// 0x00000014: .cfi_restore_extended: r73
-// 0x00000014: ldp x20, x21, [sp, #40]
+// 0x00000014: ldr x20, [sp, #40]
// 0x00000018: .cfi_restore: r20
-// 0x00000018: .cfi_restore: r21
-// 0x00000018: ldr lr, [sp, #56]
+// 0x00000018: ldp x21, lr, [sp, #48]
+// 0x0000001c: .cfi_restore: r21
// 0x0000001c: .cfi_restore: r30
// 0x0000001c: add sp, sp, #0x40 (64)
// 0x00000020: .cfi_def_cfa_offset: 0
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 696b8c6859..8fb539661f 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -146,7 +146,11 @@ void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex()), 0);
// The allocation entry point that deals with access checks does not work with inlined
// methods, so we need to check whether this allocation comes from an inlined method.
- if (has_only_one_use && !instruction->GetEnvironment()->IsFromInlinedInvoke()) {
+ // We also need to make the same check as for moving clinit check, whether the HLoadClass
+ // has the clinit check responsibility or not (HLoadClass can throw anyway).
+ if (has_only_one_use &&
+ !instruction->GetEnvironment()->IsFromInlinedInvoke() &&
+ CanMoveClinitCheck(load_class, instruction)) {
// We can remove the load class from the graph. If it needed access checks, we delegate
// the access check to the allocation.
if (load_class->NeedsAccessCheck()) {
@@ -203,7 +207,8 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input,
HInstruction* user) const {
// Determine if input and user come from the same dex instruction, so that we can move
// the clinit check responsibility from one to the other, i.e. from HClinitCheck (user)
- // to HLoadClass (input), or from HClinitCheck (input) to HInvokeStaticOrDirect (user).
+ // to HLoadClass (input), or from HClinitCheck (input) to HInvokeStaticOrDirect (user),
+ // or from HLoadClass (input) to HNewInstance (user).
// Start with a quick dex pc check.
if (user->GetDexPc() != input->GetDexPc()) {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 1842f00ff6..54ed62bef3 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -648,6 +648,15 @@ static inline dwarf::Reg DWARFReg(CPURegister reg) {
void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) {
int size = registers.RegisterSizeInBytes();
const Register sp = vixl_masm_->StackPointer();
+ // Since we are operating on register pairs, we would like to align on
+ // double the standard size; on the other hand, we don't want to insert
+ // an extra store, which will happen if the number of registers is even.
+ if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ ___ Str(dst0, MemOperand(sp, offset));
+ cfi_.RelOffset(DWARFReg(dst0), offset);
+ offset += size;
+ }
while (registers.Count() >= 2) {
const CPURegister& dst0 = registers.PopLowestIndex();
const CPURegister& dst1 = registers.PopLowestIndex();
@@ -667,6 +676,13 @@ void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) {
void Arm64Assembler::UnspillRegisters(vixl::CPURegList registers, int offset) {
int size = registers.RegisterSizeInBytes();
const Register sp = vixl_masm_->StackPointer();
+ // Be consistent with the logic for spilling registers.
+ if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ ___ Ldr(dst0, MemOperand(sp, offset));
+ cfi_.Restore(DWARFReg(dst0));
+ offset += size;
+ }
while (registers.Count() >= 2) {
const CPURegister& dst0 = registers.PopLowestIndex();
const CPURegister& dst1 = registers.PopLowestIndex();
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index afe0576906..92b4c8e041 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -344,6 +344,17 @@ class AssemblerTest : public testing::Test {
}
template <typename ImmType>
+ std::string RepeatFFIb(void (Ass::*f)(FPReg, FPReg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegistersImmBits<FPReg, FPReg, ImmType>(f,
+ imm_bits,
+ GetFPRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ template <typename ImmType>
std::string RepeatIbFF(void (Ass::*f)(ImmType, FPReg, FPReg), int imm_bits, std::string fmt) {
return RepeatTemplatedImmBitsRegisters<ImmType, FPReg, FPReg>(f,
GetFPRegisters(),
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index c722d0c333..a1d6ad6a2f 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -647,6 +647,26 @@ TEST_F(AssemblerMIPSTest, Movt) {
DriverStr(RepeatRRIb(&mips::MipsAssembler::Movt, 3, "movt ${reg1}, ${reg2}, $fcc{imm}"), "Movt");
}
+TEST_F(AssemblerMIPSTest, MovfS) {
+ DriverStr(RepeatFFIb(&mips::MipsAssembler::MovfS, 3, "movf.s ${reg1}, ${reg2}, $fcc{imm}"),
+ "MovfS");
+}
+
+TEST_F(AssemblerMIPSTest, MovfD) {
+ DriverStr(RepeatFFIb(&mips::MipsAssembler::MovfD, 3, "movf.d ${reg1}, ${reg2}, $fcc{imm}"),
+ "MovfD");
+}
+
+TEST_F(AssemblerMIPSTest, MovtS) {
+ DriverStr(RepeatFFIb(&mips::MipsAssembler::MovtS, 3, "movt.s ${reg1}, ${reg2}, $fcc{imm}"),
+ "MovtS");
+}
+
+TEST_F(AssemblerMIPSTest, MovtD) {
+ DriverStr(RepeatFFIb(&mips::MipsAssembler::MovtD, 3, "movt.d ${reg1}, ${reg2}, $fcc{imm}"),
+ "MovtD");
+}
+
TEST_F(AssemblerMIPSTest, CvtSW) {
DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "CvtSW");
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 24a4d586a2..c133980234 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -22,6 +22,7 @@
#include <fstream>
#include <iostream>
+#include <limits>
#include <sstream>
#include <string>
#include <unordered_set>
@@ -81,6 +82,9 @@
namespace art {
+static constexpr size_t kDefaultMinDexFilesForSwap = 2;
+static constexpr size_t kDefaultMinDexFileCumulativeSizeForSwap = 20 * MB;
+
static int original_argc;
static char** original_argv;
@@ -351,6 +355,20 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" --swap-fd=<file-descriptor>: specifies a file to use for swap (by descriptor).");
UsageError(" Example: --swap-fd=10");
UsageError("");
+ UsageError(" --swap-dex-size-threshold=<size>: specifies the minimum total dex file size in");
+ UsageError(" bytes to allow the use of swap.");
+ UsageError(" Example: --swap-dex-size-threshold=1000000");
+ UsageError(" Default: %zu", kDefaultMinDexFileCumulativeSizeForSwap);
+ UsageError("");
+ UsageError(" --swap-dex-count-threshold=<count>: specifies the minimum number of dex files to");
+ UsageError(" allow the use of swap.");
+ UsageError(" Example: --swap-dex-count-threshold=10");
+ UsageError(" Default: %zu", kDefaultMinDexFilesForSwap);
+ UsageError("");
+ UsageError(" --very-large-app-threshold=<size>: specifies the minimum total dex file size in");
+ UsageError(" bytes to consider the input \"very large\" and punt on the compilation.");
+ UsageError(" Example: --very-large-app-threshold=100000000");
+ UsageError("");
UsageError(" --app-image-fd=<file-descriptor>: specify output file descriptor for app image.");
UsageError(" Example: --app-image-fd=10");
UsageError("");
@@ -473,25 +491,6 @@ class WatchDog {
pthread_t pthread_;
};
-static constexpr size_t kMinDexFilesForSwap = 2;
-static constexpr size_t kMinDexFileCumulativeSizeForSwap = 20 * MB;
-
-static bool UseSwap(bool is_image, std::vector<const DexFile*>& dex_files) {
- if (is_image) {
- // Don't use swap, we know generation should succeed, and we don't want to slow it down.
- return false;
- }
- if (dex_files.size() < kMinDexFilesForSwap) {
- // If there are less dex files than the threshold, assume it's gonna be fine.
- return false;
- }
- size_t dex_files_size = 0;
- for (const auto* dex_file : dex_files) {
- dex_files_size += dex_file->GetHeader().file_size_;
- }
- return dex_files_size >= kMinDexFileCumulativeSizeForSwap;
-}
-
class Dex2Oat FINAL {
public:
explicit Dex2Oat(TimingLogger* timings) :
@@ -1132,6 +1131,21 @@ class Dex2Oat FINAL {
swap_file_name_ = option.substr(strlen("--swap-file=")).data();
} else if (option.starts_with("--swap-fd=")) {
ParseUintOption(option, "--swap-fd", &swap_fd_, Usage);
+ } else if (option.starts_with("--swap-dex-size-threshold=")) {
+ ParseUintOption(option,
+ "--swap-dex-size-threshold",
+ &min_dex_file_cumulative_size_for_swap_,
+ Usage);
+ } else if (option.starts_with("--swap-dex-count-threshold=")) {
+ ParseUintOption(option,
+ "--swap-dex-count-threshold",
+ &min_dex_files_for_swap_,
+ Usage);
+ } else if (option.starts_with("--very-large-app-threshold=")) {
+ ParseUintOption(option,
+ "--very-large-app-threshold",
+ &very_large_threshold_,
+ Usage);
} else if (option.starts_with("--app-image-file=")) {
app_image_file_name_ = option.substr(strlen("--app-image-file=")).data();
} else if (option.starts_with("--app-image-fd=")) {
@@ -1414,6 +1428,19 @@ class Dex2Oat FINAL {
}
// Note that dex2oat won't close the swap_fd_. The compiler driver's swap space will do that.
+ // If we need to downgrade the compiler-filter for size reasons, do that check now.
+ if (!IsBootImage() && IsVeryLarge(dex_files_)) {
+ if (!CompilerFilter::IsAsGoodAs(CompilerFilter::kVerifyAtRuntime,
+ compiler_options_->GetCompilerFilter())) {
+ LOG(INFO) << "Very large app, downgrading to verify-at-runtime.";
+ // Note: this change won't be reflected in the key-value store, as that had to be
+ // finalized before loading the dex files. This setup is currently required
+ // to get the size from the DexFile objects.
+ // TODO: refactor. b/29790079
+ compiler_options_->SetCompilerFilter(CompilerFilter::kVerifyAtRuntime);
+ }
+ }
+
if (IsBootImage()) {
// For boot image, pass opened dex files to the Runtime::Create().
// Note: Runtime acquires ownership of these dex files.
@@ -1842,10 +1869,6 @@ class Dex2Oat FINAL {
}
}
- CompilerOptions* GetCompilerOptions() const {
- return compiler_options_.get();
- }
-
bool IsImage() const {
return IsAppImage() || IsBootImage();
}
@@ -1897,6 +1920,30 @@ class Dex2Oat FINAL {
}
private:
+ bool UseSwap(bool is_image, const std::vector<const DexFile*>& dex_files) {
+ if (is_image) {
+ // Don't use swap, we know generation should succeed, and we don't want to slow it down.
+ return false;
+ }
+ if (dex_files.size() < min_dex_files_for_swap_) {
+ // If there are less dex files than the threshold, assume it's gonna be fine.
+ return false;
+ }
+ size_t dex_files_size = 0;
+ for (const auto* dex_file : dex_files) {
+ dex_files_size += dex_file->GetHeader().file_size_;
+ }
+ return dex_files_size >= min_dex_file_cumulative_size_for_swap_;
+ }
+
+ bool IsVeryLarge(std::vector<const DexFile*>& dex_files) {
+ size_t dex_files_size = 0;
+ for (const auto* dex_file : dex_files) {
+ dex_files_size += dex_file->GetHeader().file_size_;
+ }
+ return dex_files_size >= very_large_threshold_;
+ }
+
template <typename T>
static std::vector<T*> MakeNonOwningPointerVector(const std::vector<std::unique_ptr<T>>& src) {
std::vector<T*> result;
@@ -2486,6 +2533,9 @@ class Dex2Oat FINAL {
bool dump_slow_timing_;
std::string swap_file_name_;
int swap_fd_;
+ size_t min_dex_files_for_swap_ = kDefaultMinDexFilesForSwap;
+ size_t min_dex_file_cumulative_size_for_swap_ = kDefaultMinDexFileCumulativeSizeForSwap;
+ size_t very_large_threshold_ = std::numeric_limits<size_t>::max();
std::string app_image_file_name_;
int app_image_fd_;
std::string profile_file_;
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
new file mode 100644
index 0000000000..6188883358
--- /dev/null
+++ b/dex2oat/dex2oat_test.cc
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+#include <sstream>
+
+#include "common_runtime_test.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stringprintf.h"
+#include "dex2oat_environment_test.h"
+#include "oat.h"
+#include "oat_file.h"
+#include "utils.h"
+
+#include <sys/wait.h>
+#include <unistd.h>
+
+namespace art {
+
+class Dex2oatTest : public Dex2oatEnvironmentTest {
+ public:
+ virtual void TearDown() OVERRIDE {
+ Dex2oatEnvironmentTest::TearDown();
+
+ output_ = "";
+ error_msg_ = "";
+ success_ = false;
+ }
+
+ protected:
+ void GenerateOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter,
+ const std::vector<std::string>& extra_args = {},
+ bool expect_success = true) {
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + odex_location);
+ args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
+ args.push_back("--runtime-arg");
+ args.push_back("-Xnorelocate");
+
+ args.insert(args.end(), extra_args.begin(), extra_args.end());
+
+ std::string error_msg;
+ bool success = Dex2Oat(args, &error_msg);
+
+ if (expect_success) {
+ ASSERT_TRUE(success) << error_msg;
+
+ // Verify the odex file was generated as expected.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+
+ CheckFilter(filter, odex_file->GetCompilerFilter());
+ } else {
+ ASSERT_FALSE(success) << output_;
+
+ error_msg_ = error_msg;
+
+ // Verify there's no loadable odex file.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() == nullptr);
+ }
+ }
+
+ // Check the input compiler filter against the generated oat file's filter. Mayb be overridden
+ // in subclasses when equality is not expected.
+ virtual void CheckFilter(CompilerFilter::Filter expected, CompilerFilter::Filter actual) {
+ EXPECT_EQ(expected, actual);
+ }
+
+ bool Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) {
+ Runtime* runtime = Runtime::Current();
+
+ const std::vector<gc::space::ImageSpace*>& image_spaces =
+ runtime->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
+ *error_msg = "No image location found for Dex2Oat.";
+ return false;
+ }
+ std::string image_location = image_spaces[0]->GetImageLocation();
+
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetCompilerExecutable());
+ argv.push_back("--runtime-arg");
+ argv.push_back("-classpath");
+ argv.push_back("--runtime-arg");
+ std::string class_path = runtime->GetClassPathString();
+ if (class_path == "") {
+ class_path = OatFile::kSpecialSharedLibrary;
+ }
+ argv.push_back(class_path);
+ if (runtime->IsDebuggable()) {
+ argv.push_back("--debuggable");
+ }
+ runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+
+ if (!runtime->IsVerificationEnabled()) {
+ argv.push_back("--compiler-filter=verify-none");
+ }
+
+ if (runtime->MustRelocateIfPossible()) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xrelocate");
+ } else {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xnorelocate");
+ }
+
+ if (!kIsTargetBuild) {
+ argv.push_back("--host");
+ }
+
+ argv.push_back("--boot-image=" + image_location);
+
+ std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+ argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
+
+ argv.insert(argv.end(), dex2oat_args.begin(), dex2oat_args.end());
+
+ // We must set --android-root.
+ const char* android_root = getenv("ANDROID_ROOT");
+ CHECK(android_root != nullptr);
+ argv.push_back("--android-root=" + std::string(android_root));
+
+ std::string command_line(Join(argv, ' '));
+
+ // We need to fix up the '&' being used for "do not check classpath."
+ size_t ampersand = command_line.find(" &");
+ CHECK_NE(ampersand, std::string::npos);
+ command_line = command_line.replace(ampersand, 2, " \\&");
+
+ command_line += " 2>&1";
+
+ // We need dex2oat to actually log things.
+ setenv("ANDROID_LOG_TAGS", "*:d", 1);
+
+ FILE* pipe = popen(command_line.c_str(), "r");
+
+ setenv("ANDROID_LOG_TAGS", "*:e", 1);
+
+ if (pipe == nullptr) {
+ success_ = false;
+ } else {
+ char buffer[128];
+
+ while (fgets(buffer, 128, pipe) != nullptr) {
+ output_ += buffer;
+ }
+
+ int result = pclose(pipe);
+ success_ = result == 0;
+ }
+ return success_;
+ }
+
+ std::string output_ = "";
+ std::string error_msg_ = "";
+ bool success_ = false;
+};
+
+class Dex2oatSwapTest : public Dex2oatTest {
+ protected:
+ void RunTest(bool use_fd, bool expect_use, const std::vector<std::string>& extra_args = {}) {
+ std::string dex_location = GetScratchDir() + "/Dex2OatSwapTest.jar";
+ std::string odex_location = GetOdexDir() + "/Dex2OatSwapTest.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ std::vector<std::string> copy(extra_args);
+
+ std::unique_ptr<ScratchFile> sf;
+ if (use_fd) {
+ sf.reset(new ScratchFile());
+ copy.push_back(StringPrintf("--swap-fd=%d", sf->GetFd()));
+ } else {
+ std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
+ copy.push_back("--swap-file=" + swap_location);
+ }
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, copy);
+
+ CheckValidity();
+ ASSERT_TRUE(success_);
+ CheckResult(expect_use);
+ }
+
+ void CheckResult(bool expect_use) {
+ if (kIsTargetBuild) {
+ CheckTargetResult(expect_use);
+ } else {
+ CheckHostResult(expect_use);
+ }
+ }
+
+ void CheckTargetResult(bool expect_use ATTRIBUTE_UNUSED) {
+ // TODO: Ignore for now, as we won't capture any output (it goes to the logcat). We may do
+ // something for variants with file descriptor where we can control the lifetime of
+ // the swap file and thus take a look at it.
+ }
+
+ void CheckHostResult(bool expect_use) {
+ if (!kIsTargetBuild) {
+ if (expect_use) {
+ EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
+ << output_;
+ } else {
+ EXPECT_EQ(output_.find("Large app, accepted running with swap."), std::string::npos)
+ << output_;
+ }
+ }
+ }
+
+ // Check whether the dex2oat run was really successful.
+ void CheckValidity() {
+ if (kIsTargetBuild) {
+ CheckTargetValidity();
+ } else {
+ CheckHostValidity();
+ }
+ }
+
+ void CheckTargetValidity() {
+ // TODO: Ignore for now, as we won't capture any output (it goes to the logcat). We may do
+ // something for variants with file descriptor where we can control the lifetime of
+ // the swap file and thus take a look at it.
+ }
+
+ // On the host, we can get the dex2oat output. Here, look for "dex2oat took."
+ void CheckHostValidity() {
+ EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
+ }
+};
+
+TEST_F(Dex2oatSwapTest, DoNotUseSwapDefaultSingleSmall) {
+ RunTest(false /* use_fd */, false /* expect_use */);
+ RunTest(true /* use_fd */, false /* expect_use */);
+}
+
+TEST_F(Dex2oatSwapTest, DoNotUseSwapSingle) {
+ RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
+ RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
+}
+
+TEST_F(Dex2oatSwapTest, DoNotUseSwapSmall) {
+ RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
+ RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
+}
+
+TEST_F(Dex2oatSwapTest, DoUseSwapSingleSmall) {
+ RunTest(false /* use_fd */,
+ true /* expect_use */,
+ { "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
+ RunTest(true /* use_fd */,
+ true /* expect_use */,
+ { "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
+}
+
+class Dex2oatVeryLargeTest : public Dex2oatTest {
+ protected:
+ void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
+ CompilerFilter::Filter result ATTRIBUTE_UNUSED) OVERRIDE {
+ // Ignore, we'll do our own checks.
+ }
+
+ void RunTest(CompilerFilter::Filter filter,
+ bool expect_large,
+ const std::vector<std::string>& extra_args = {}) {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ std::vector<std::string> copy(extra_args);
+
+ GenerateOdexForTest(dex_location, odex_location, filter, copy);
+
+ CheckValidity();
+ ASSERT_TRUE(success_);
+ CheckResult(dex_location, odex_location, filter, expect_large);
+ }
+
+ void CheckResult(const std::string& dex_location,
+ const std::string& odex_location,
+ CompilerFilter::Filter filter,
+ bool expect_large) {
+ // Host/target independent checks.
+ std::string error_msg;
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+ if (expect_large) {
+ // Note: we cannot check the following:
+ // EXPECT_TRUE(CompilerFilter::IsAsGoodAs(CompilerFilter::kVerifyAtRuntime,
+ // odex_file->GetCompilerFilter()));
+ // The reason is that the filter override currently happens when the dex files are
+ // loaded in dex2oat, which is after the oat file has been started. Thus, the header
+ // store cannot be changed, and the original filter is set in stone.
+
+ for (const OatDexFile* oat_dex_file : odex_file->GetOatDexFiles()) {
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ ASSERT_TRUE(dex_file != nullptr);
+ uint32_t class_def_count = dex_file->NumClassDefs();
+ ASSERT_LT(class_def_count, std::numeric_limits<uint16_t>::max());
+ for (uint16_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) {
+ OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
+ EXPECT_EQ(oat_class.GetType(), OatClassType::kOatClassNoneCompiled);
+ }
+ }
+
+ // If the input filter was "below," it should have been used.
+ if (!CompilerFilter::IsAsGoodAs(CompilerFilter::kVerifyAtRuntime, filter)) {
+ EXPECT_EQ(odex_file->GetCompilerFilter(), filter);
+ }
+ } else {
+ EXPECT_EQ(odex_file->GetCompilerFilter(), filter);
+ }
+
+ // Host/target dependent checks.
+ if (kIsTargetBuild) {
+ CheckTargetResult(expect_large);
+ } else {
+ CheckHostResult(expect_large);
+ }
+ }
+
+ void CheckTargetResult(bool expect_large ATTRIBUTE_UNUSED) {
+ // TODO: Ignore for now. May do something for fd things.
+ }
+
+ void CheckHostResult(bool expect_large) {
+ if (!kIsTargetBuild) {
+ if (expect_large) {
+ EXPECT_NE(output_.find("Very large app, downgrading to verify-at-runtime."),
+ std::string::npos)
+ << output_;
+ } else {
+ EXPECT_EQ(output_.find("Very large app, downgrading to verify-at-runtime."),
+ std::string::npos)
+ << output_;
+ }
+ }
+ }
+
+ // Check whether the dex2oat run was really successful.
+ void CheckValidity() {
+ if (kIsTargetBuild) {
+ CheckTargetValidity();
+ } else {
+ CheckHostValidity();
+ }
+ }
+
+ void CheckTargetValidity() {
+ // TODO: Ignore for now.
+ }
+
+ // On the host, we can get the dex2oat output. Here, look for "dex2oat took."
+ void CheckHostValidity() {
+ EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
+ }
+};
+
+TEST_F(Dex2oatVeryLargeTest, DontUseVeryLarge) {
+ RunTest(CompilerFilter::kVerifyNone, false);
+ RunTest(CompilerFilter::kVerifyAtRuntime, false);
+ RunTest(CompilerFilter::kInterpretOnly, false);
+ RunTest(CompilerFilter::kSpeed, false);
+
+ RunTest(CompilerFilter::kVerifyNone, false, { "--very-large-app-threshold=1000000" });
+ RunTest(CompilerFilter::kVerifyAtRuntime, false, { "--very-large-app-threshold=1000000" });
+ RunTest(CompilerFilter::kInterpretOnly, false, { "--very-large-app-threshold=1000000" });
+ RunTest(CompilerFilter::kSpeed, false, { "--very-large-app-threshold=1000000" });
+}
+
+TEST_F(Dex2oatVeryLargeTest, UseVeryLarge) {
+ RunTest(CompilerFilter::kVerifyNone, false, { "--very-large-app-threshold=100" });
+ RunTest(CompilerFilter::kVerifyAtRuntime, false, { "--very-large-app-threshold=100" });
+ RunTest(CompilerFilter::kInterpretOnly, true, { "--very-large-app-threshold=100" });
+ RunTest(CompilerFilter::kSpeed, true, { "--very-large-app-threshold=100" });
+}
+
+} // namespace art
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index a95ea649c7..1f513113ec 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -384,6 +384,8 @@ static const MipsInstruction gMipsInstructions[] = {
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+ { kFpMask | (0x201 << 16), kCop1 | (0x200 << 16) | 17, "movf", "fadc" },
+ { kFpMask | (0x201 << 16), kCop1 | (0x201 << 16) | 17, "movt", "fadc" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 26, "rint", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 27, "class", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 5bb61bb829..0a7ffda3b4 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -494,17 +494,6 @@ void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
}
-void PatchOat::PatchImTables(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
- // We can safely walk target image since the conflict tables are independent.
- image_header->VisitPackedImTables(
- [this](ArtMethod* method) {
- return RelocatedAddressOfPointer(method);
- },
- image_->Begin(),
- pointer_size);
-}
-
void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
const size_t pointer_size = InstructionSetPointerSize(isa_);
// We can safely walk target image since the conflict tables are independent.
@@ -647,7 +636,6 @@ bool PatchOat::PatchImage(bool primary_image) {
PatchArtFields(image_header);
PatchArtMethods(image_header);
- PatchImTables(image_header);
PatchImtConflictTables(image_header);
PatchInternedStrings(image_header);
PatchClassTable(image_header);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 61ec695d83..3ef837fde9 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -117,7 +117,6 @@ class PatchOat {
bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchImTables(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchImtConflictTables(const ImageHeader* image_header)
SHARED_REQUIRES(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
diff --git a/runtime/Android.mk b/runtime/Android.mk
index aa12c83ceb..1c442fc8db 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -169,7 +169,6 @@ LIBART_COMMON_SRC_FILES := \
os_linux.cc \
parsed_options.cc \
primitive.cc \
- profiler.cc \
quick_exception_handler.cc \
quick/inline_method_analyser.cc \
reference_table.cc \
@@ -369,7 +368,6 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
oat.h \
object_callbacks.h \
process_state.h \
- profiler_options.h \
quick/inline_method_analyser.h \
runtime.h \
stack.h \
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index d81e0a9b96..d105c67d43 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -34,7 +34,7 @@
namespace art {
-extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal();
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_implicit_suspend();
@@ -107,8 +107,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED
*out_return_pc = (sc->arm_pc + instr_size) | 1;
}
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+ if (!IsValidImplicitCheck(info)) {
+ return false;
+ }
// The code that looks for the catch location needs to know the value of the
// ARM PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault. However the mapping table has
@@ -122,7 +124,10 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIB
uint32_t instr_size = GetInstructionSize(ptr);
sc->arm_lr = (sc->arm_pc + instr_size) | 1; // LR needs to point to gc map location
- sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
+ // Pass the faulting address as the first argument of
+ // art_quick_throw_null_pointer_exception_from_signal.
+ sc->arm_r0 = reinterpret_cast<uintptr_t>(info->si_addr);
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 27a41f09ad..0797def8e8 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -299,6 +299,11 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
+ * Call installed by a signal handler to create and deliver a NullPointerException.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal
+
+ /*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 3e9ad0da62..f591fccde2 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -29,7 +29,7 @@
#include "thread-inl.h"
extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal();
extern "C" void art_quick_implicit_suspend();
//
@@ -84,8 +84,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED
*out_return_pc = sc->pc + 4;
}
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+ if (!IsValidImplicitCheck(info)) {
+ return false;
+ }
// The code that looks for the catch location needs to know the value of the
// PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault.
@@ -95,7 +97,10 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIB
sc->regs[30] = sc->pc + 4; // LR needs to point to gc map location
- sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
+ // Pass the faulting address as the first argument of
+ // art_quick_throw_null_pointer_exception_from_signal.
+ sc->regs[0] = reinterpret_cast<uintptr_t>(info->si_addr);
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index a6490aed33..10ee63f74f 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -406,6 +406,11 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
+ * Call installed by a signal handler to create and deliver a NullPointerException.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal
+
+ /*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 8ea78eb900..754284c833 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -27,7 +27,7 @@
#include "thread-inl.h"
extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal();
//
// Mips specific fault handler functions.
@@ -71,8 +71,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
*out_return_pc = sc->sc_pc + 4;
}
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+ if (!IsValidImplicitCheck(info)) {
+ return false;
+ }
// The code that looks for the catch location needs to know the value of the
// PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault.
@@ -81,8 +83,11 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIB
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location
- sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ // Pass the faulting address as the first argument of
+ // art_quick_throw_null_pointer_exception_from_signal.
+ sc->sc_regs[0] = reinterpret_cast<uintptr_t>(info->si_addr);
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index bb89674caf..c1b8044be9 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -506,6 +506,18 @@ ENTRY art_quick_throw_null_pointer_exception
move $a0, rSELF # pass Thread::Current
END art_quick_throw_null_pointer_exception
+
+ /*
+ * Call installed by a signal handler to create and deliver a NullPointerException.
+ */
+ .extern artThrowNullPointerExceptionFromSignal
+ENTRY art_quick_throw_null_pointer_exception_from_signal
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ la $t9, artThrowNullPointerExceptionFromSignal
+ jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_throw_null_pointer_exception_from_signal
+
/*
* Called by managed code to create and deliver an ArithmeticException
*/
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 4abfcf12ff..c9a32ad7f9 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -27,7 +27,7 @@
#include "thread-inl.h"
extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal();
//
// Mips64 specific fault handler functions.
@@ -71,8 +71,11 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
*out_return_pc = sc->sc_pc + 4;
}
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+ if (!IsValidImplicitCheck(info)) {
+ return false;
+ }
+
// The code that looks for the catch location needs to know the value of the
// PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault.
@@ -81,8 +84,11 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIB
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location
- sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ // Pass the faulting address as the first argument of
+ // art_quick_throw_null_pointer_exception_from_signal.
+ sc->sc_regs[0] = reinterpret_cast<uintptr_t>(info->si_addr);
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 78ac748e32..ae6962076b 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -593,6 +593,17 @@ ENTRY art_quick_throw_null_pointer_exception
END art_quick_throw_null_pointer_exception
/*
+ * Call installed by a signal handler to create and deliver a NullPointerException
+ */
+ .extern artThrowNullPointerExceptionFromSignal
+ENTRY art_quick_throw_null_pointer_exception_from_signal
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNullPointerExceptionFromSignal
+ jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uinptr_t, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_throw_null_pointer_exception
+
+ /*
* Called by managed code to create and deliver an ArithmeticException
*/
.extern artThrowDivZeroFromCode
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index d7c4cb182a..24e3a0d53f 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -36,6 +36,7 @@
#define CTX_EIP uc_mcontext->__ss.__rip
#define CTX_EAX uc_mcontext->__ss.__rax
#define CTX_METHOD uc_mcontext->__ss.__rdi
+#define CTX_RDI uc_mcontext->__ss.__rdi
#define CTX_JMP_BUF uc_mcontext->__ss.__rdi
#else
// 32 bit mac build.
@@ -71,12 +72,12 @@ namespace art {
#if defined(__APPLE__) && defined(__x86_64__)
// mac symbols have a prefix of _ on x86_64
-extern "C" void _art_quick_throw_null_pointer_exception();
+extern "C" void _art_quick_throw_null_pointer_exception_from_signal();
extern "C" void _art_quick_throw_stack_overflow();
extern "C" void _art_quick_test_suspend();
#define EXT_SYM(sym) _ ## sym
#else
-extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal();
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_test_suspend();
#define EXT_SYM(sym) sym
@@ -292,7 +293,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
*out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
-bool NullPointerHandler::Action(int, siginfo_t*, void* context) {
+bool NullPointerHandler::Action(int, siginfo_t* sig, void* context) {
+ if (!IsValidImplicitCheck(sig)) {
+ return false;
+ }
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
@@ -314,7 +318,15 @@ bool NullPointerHandler::Action(int, siginfo_t*, void* context) {
*next_sp = retaddr;
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_throw_null_pointer_exception));
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(
+ EXT_SYM(art_quick_throw_null_pointer_exception_from_signal));
+ // Pass the faulting address as the first argument of
+ // art_quick_throw_null_pointer_exception_from_signal.
+#if defined(__x86_64__)
+ uc->CTX_RDI = reinterpret_cast<uintptr_t>(sig->si_addr);
+#else
+ uc->CTX_EAX = reinterpret_cast<uintptr_t>(sig->si_addr);
+#endif
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index b3dd4545f4..5851fbd804 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -284,6 +284,11 @@ END_MACRO
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
+ * Call installed by a signal handler to create and deliver a NullPointerException.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal
+
+ /*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 205307ce67..e777e6cfb2 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -309,6 +309,11 @@ END_MACRO
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
+ * Call installed by a signal handler to create and deliver a NullPointerException.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal
+
+ /*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 32ae6ffad5..26450c41c7 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -120,10 +120,6 @@ inline uint32_t ArtMethod::GetDexMethodIndex() {
return dex_method_index_;
}
-inline uint32_t ArtMethod::GetImtIndex() {
- return GetDexMethodIndex() % ImTable::kSize;
-}
-
inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(size_t pointer_size) {
return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
pointer_size);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 849af977e1..2b025f8c62 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -99,22 +99,6 @@ class ImtConflictTable {
return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
}
- // Return true if two conflict tables are the same.
- bool Equals(ImtConflictTable* other, size_t pointer_size) const {
- size_t num = NumEntries(pointer_size);
- if (num != other->NumEntries(pointer_size)) {
- return false;
- }
- for (size_t i = 0; i < num; ++i) {
- if (GetInterfaceMethod(i, pointer_size) != other->GetInterfaceMethod(i, pointer_size) ||
- GetImplementationMethod(i, pointer_size) !=
- other->GetImplementationMethod(i, pointer_size)) {
- return false;
- }
- }
- return true;
- }
-
// Visit all of the entries.
// NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
// and also returns one. The order is <interface, implementation>.
@@ -419,8 +403,6 @@ class ArtMethod FINAL {
ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t GetImtIndex() SHARED_REQUIRES(Locks::mutator_lock_);
-
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
dex_method_index_ = new_idx;
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index bd8de877e0..1c320243dc 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -73,6 +73,11 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
level == kThreadListLock ||
// Ignore logging which may or may not have set up thread data structures.
level == kLoggingLock ||
+ // When transitioning from suspended to runnable, a daemon thread might be in
+ // a situation where the runtime is shutting down. To not crash our debug locking
+ // mechanism we just pass null Thread* to the MutexLock during that transition
+ // (see Thread::TransitionFromSuspendedToRunnable).
+ level == kThreadSuspendCountLock ||
// Avoid recursive death.
level == kAbortLock) << level;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7c003151ea..fe7448fa25 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -857,13 +857,11 @@ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_
if (vtable != nullptr) {
SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_spaces);
}
- if (klass->ShouldHaveImt()) {
- ImTable* imt = klass->GetImt(pointer_size);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr, image_spaces);
+ if (klass->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ SanityCheckArtMethod(
+ klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_spaces);
}
- }
- if (klass->ShouldHaveEmbeddedVTable()) {
for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces);
}
@@ -3458,11 +3456,16 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class->SetClassFlags(mirror::kClassFlagObjectArray);
}
mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self);
- new_class->PopulateEmbeddedVTable(image_pointer_size_);
+ {
+ ArtMethod* imt[mirror::Class::kImtSize];
+ std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
+ new_class->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_);
+ }
mirror::Class::SetStatus(new_class, mirror::Class::kStatusInitialized, self);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
+
// All arrays have java/lang/Cloneable and java/io/Serializable as
// interfaces. We need to set that up here, so that stuff like
// "instanceof" works right.
@@ -5033,11 +5036,9 @@ bool ClassLinker::LinkClass(Thread* self,
if (!LinkSuperClass(klass)) {
return false;
}
- ArtMethod* imt_data[ImTable::kSize];
- // If there are any new conflicts compared to super class.
- bool new_conflict = false;
- std::fill_n(imt_data, arraysize(imt_data), Runtime::Current()->GetImtUnimplementedMethod());
- if (!LinkMethods(self, klass, interfaces, &new_conflict, imt_data)) {
+ ArtMethod* imt[mirror::Class::kImtSize];
+ std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
+ if (!LinkMethods(self, klass, interfaces, imt)) {
return false;
}
if (!LinkInstanceFields(self, klass)) {
@@ -5050,45 +5051,15 @@ bool ClassLinker::LinkClass(Thread* self,
CreateReferenceInstanceOffsets(klass);
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
- ImTable* imt = nullptr;
- if (klass->ShouldHaveImt()) {
- // If there are any new conflicts compared to the super class we can not make a copy. There
- // can be cases where both will have a conflict method at the same slot without having the same
- // set of conflicts. In this case, we can not share the IMT since the conflict table slow path
- // will possibly create a table that is incorrect for either of the classes.
- // Same IMT with new_conflict does not happen very often.
- if (!new_conflict && klass->HasSuperClass() && klass->GetSuperClass()->ShouldHaveImt()) {
- ImTable* super_imt = klass->GetSuperClass()->GetImt(image_pointer_size_);
- bool imt_equals = true;
- for (size_t i = 0; i < ImTable::kSize && imt_equals; ++i) {
- imt_equals = imt_equals && (super_imt->Get(i, image_pointer_size_) == imt_data[i]);
- }
- if (imt_equals) {
- imt = super_imt;
- }
- }
- if (imt == nullptr) {
- LinearAlloc* allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
- imt = reinterpret_cast<ImTable*>(
- allocator->Alloc(self, ImTable::SizeInBytes(image_pointer_size_)));
- if (imt == nullptr) {
- return false;
- }
- imt->Populate(imt_data, image_pointer_size_);
- }
- }
-
if (!klass->IsTemp() || (!init_done_ && klass->GetClassSize() == class_size)) {
// We don't need to retire this class as it has no embedded tables or it was created the
// correct size during class linker initialization.
CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get());
- if (klass->ShouldHaveEmbeddedVTable()) {
- klass->PopulateEmbeddedVTable(image_pointer_size_);
- }
- if (klass->ShouldHaveImt()) {
- klass->SetImt(imt, image_pointer_size_);
+ if (klass->ShouldHaveEmbeddedImtAndVTable()) {
+ klass->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_);
}
+
// This will notify waiters on klass that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
mirror::Class::SetStatus(klass, mirror::Class::kStatusResolved, self);
@@ -5480,7 +5451,6 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
bool ClassLinker::LinkMethods(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- bool* out_new_conflict,
ArtMethod** out_imt) {
self->AllowThreadSuspension();
// A map from vtable indexes to the method they need to be updated to point to. Used because we
@@ -5492,7 +5462,7 @@ bool ClassLinker::LinkMethods(Thread* self,
// any vtable entries with new default method implementations.
return SetupInterfaceLookupTable(self, klass, interfaces)
&& LinkVirtualMethods(self, klass, /*out*/ &default_translations)
- && LinkInterfaceMethods(self, klass, default_translations, out_new_conflict, out_imt);
+ && LinkInterfaceMethods(self, klass, default_translations, out_imt);
}
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
@@ -5650,7 +5620,7 @@ bool ClassLinker::LinkVirtualMethods(
StackHandleScope<2> hs(self);
Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
MutableHandle<mirror::PointerArray> vtable;
- if (super_class->ShouldHaveEmbeddedVTable()) {
+ if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
vtable = hs.NewHandle(AllocPointerArray(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
self->AssertPendingOOMException();
@@ -6050,7 +6020,6 @@ ArtMethod* ClassLinker::AddMethodToConflictTable(mirror::Class* klass,
void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
ArtMethod* current_method,
- /*out*/bool* new_conflict,
/*out*/ArtMethod** imt_ref) {
// Place method in imt if entry is empty, place conflict otherwise.
if (*imt_ref == unimplemented_method) {
@@ -6067,79 +6036,42 @@ void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method,
*imt_ref = current_method;
} else {
*imt_ref = imt_conflict_method;
- *new_conflict = true;
}
} else {
// Place the default conflict method. Note that there may be an existing conflict
// method in the IMT, but it could be one tailored to the super class, with a
// specific ImtConflictTable.
*imt_ref = imt_conflict_method;
- *new_conflict = true;
}
}
void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) {
- DCHECK(klass->ShouldHaveImt()) << PrettyClass(klass);
+ DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass);
DCHECK(!klass->IsTemp()) << PrettyClass(klass);
- ArtMethod* imt_data[ImTable::kSize];
+ ArtMethod* imt[mirror::Class::kImtSize];
Runtime* const runtime = Runtime::Current();
ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
- std::fill_n(imt_data, arraysize(imt_data), unimplemented_method);
+ std::fill_n(imt, arraysize(imt), unimplemented_method);
if (klass->GetIfTable() != nullptr) {
- bool new_conflict = false;
FillIMTFromIfTable(klass->GetIfTable(),
unimplemented_method,
conflict_method,
klass,
- /*create_conflict_tables*/true,
- /*ignore_copied_methods*/false,
- &new_conflict,
- &imt_data[0]);
- }
- if (!klass->ShouldHaveImt()) {
- return;
- }
- // Compare the IMT with the super class including the conflict methods. If they are equivalent,
- // we can just use the same pointer.
- ImTable* imt = nullptr;
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && super_class->ShouldHaveImt()) {
- ImTable* super_imt = super_class->GetImt(image_pointer_size_);
- bool same = true;
- for (size_t i = 0; same && i < ImTable::kSize; ++i) {
- ArtMethod* method = imt_data[i];
- ArtMethod* super_method = super_imt->Get(i, image_pointer_size_);
- if (method != super_method) {
- bool is_conflict_table = method->IsRuntimeMethod() &&
- method != unimplemented_method &&
- method != conflict_method;
- // Verify conflict contents.
- bool super_conflict_table = super_method->IsRuntimeMethod() &&
- super_method != unimplemented_method &&
- super_method != conflict_method;
- if (!is_conflict_table || !super_conflict_table) {
- same = false;
- } else {
- ImtConflictTable* table1 = method->GetImtConflictTable(image_pointer_size_);
- ImtConflictTable* table2 = super_method->GetImtConflictTable(image_pointer_size_);
- same = same && table1->Equals(table2, image_pointer_size_);
- }
- }
- }
- if (same) {
- imt = super_imt;
- }
+ true,
+ false,
+ &imt[0]);
}
- if (imt == nullptr) {
- imt = klass->GetImt(image_pointer_size_);
- DCHECK(imt != nullptr);
- imt->Populate(imt_data, image_pointer_size_);
- } else {
- klass->SetImt(imt, image_pointer_size_);
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_);
}
}
+static inline uint32_t GetIMTIndex(ArtMethod* interface_method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+}
+
ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
LinearAlloc* linear_alloc,
size_t image_pointer_size) {
@@ -6159,9 +6091,8 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
mirror::Class* klass,
bool create_conflict_tables,
bool ignore_copied_methods,
- /*out*/bool* new_conflict,
- /*out*/ArtMethod** imt) {
- uint32_t conflict_counts[ImTable::kSize] = {};
+ ArtMethod** imt) {
+ uint32_t conflict_counts[mirror::Class::kImtSize] = {};
for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
mirror::Class* interface = if_table->GetInterface(i);
const size_t num_virtuals = interface->NumVirtualMethods();
@@ -6191,7 +6122,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
// or interface methods in the IMT here they will not create extra conflicts since we compare
// names and signatures in SetIMTRef.
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = interface_method->GetImtIndex();
+ const uint32_t imt_index = GetIMTIndex(interface_method);
// There is only any conflicts if all of the interface methods for an IMT slot don't have
// the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6203,7 +6134,6 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
SetIMTRef(unimplemented_method,
imt_conflict_method,
implementation_method,
- /*out*/new_conflict,
/*out*/&imt[imt_index]);
}
}
@@ -6211,7 +6141,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
if (create_conflict_tables) {
// Create the conflict tables.
LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
- for (size_t i = 0; i < ImTable::kSize; ++i) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
size_t conflicts = conflict_counts[i];
if (imt[i] == imt_conflict_method) {
ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc);
@@ -6245,7 +6175,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
}
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = interface_method->GetImtIndex();
+ const uint32_t imt_index = GetIMTIndex(interface_method);
if (!imt[imt_index]->IsRuntimeMethod() ||
imt[imt_index] == unimplemented_method ||
imt[imt_index] == imt_conflict_method) {
@@ -6498,14 +6428,12 @@ static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size
void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
- bool* new_conflict,
ArtMethod** imt) {
DCHECK(klass->HasSuperClass());
mirror::Class* super_class = klass->GetSuperClass();
- if (super_class->ShouldHaveImt()) {
- ImTable* super_imt = super_class->GetImt(image_pointer_size_);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- imt[i] = super_imt->Get(i, image_pointer_size_);
+ if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
}
} else {
// No imt in the super class, need to reconstruct from the iftable.
@@ -6518,7 +6446,6 @@ void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
klass.Get(),
/*create_conflict_table*/false,
/*ignore_copied_methods*/true,
- /*out*/new_conflict,
/*out*/imt);
}
}
@@ -6529,7 +6456,6 @@ bool ClassLinker::LinkInterfaceMethods(
Thread* self,
Handle<mirror::Class> klass,
const std::unordered_map<size_t, ClassLinker::MethodTranslation>& default_translations,
- bool* out_new_conflict,
ArtMethod** out_imt) {
StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
@@ -6565,7 +6491,6 @@ bool ClassLinker::LinkInterfaceMethods(
FillImtFromSuperClass(klass,
unimplemented_method,
imt_conflict_method,
- out_new_conflict,
out_imt);
}
// Allocate method arrays before since we don't want miss visiting miranda method roots due to
@@ -6651,7 +6576,7 @@ bool ClassLinker::LinkInterfaceMethods(
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = interface_method->GetImtIndex();
+ uint32_t imt_index = GetIMTIndex(interface_method);
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -6697,7 +6622,6 @@ bool ClassLinker::LinkInterfaceMethods(
SetIMTRef(unimplemented_method,
imt_conflict_method,
vtable_method,
- /*out*/out_new_conflict,
/*out*/imt_ptr);
}
break;
@@ -6840,7 +6764,6 @@ bool ClassLinker::LinkInterfaceMethods(
SetIMTRef(unimplemented_method,
imt_conflict_method,
current_method,
- /*out*/out_new_conflict,
/*out*/imt_ptr);
}
}
@@ -7040,7 +6963,7 @@ bool ClassLinker::LinkInterfaceMethods(
}
// Fix up IMT next
- for (size_t i = 0; i < ImTable::kSize; ++i) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
auto it = move_table.find(out_imt[i]);
if (it != move_table.end()) {
out_imt[i] = it->second;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d6822c5225..ca5af19976 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -833,7 +833,6 @@ class ClassLinker {
bool LinkMethods(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- bool* out_new_conflict,
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -969,20 +968,19 @@ class ClassLinker {
// * kDefaultConflict - Conflicting method implementations were found when searching for
// target_method. The value of *out_default_method is null.
DefaultMethodSearchResult FindDefaultMethodImplementation(
- Thread* self,
- ArtMethod* target_method,
- Handle<mirror::Class> klass,
- /*out*/ArtMethod** out_default_method) const
+ Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method) const
SHARED_REQUIRES(Locks::mutator_lock_);
// Sets the imt entries and fixes up the vtable for the given class by linking all the interface
// methods. See LinkVirtualMethods for an explanation of what default_translations is.
bool LinkInterfaceMethods(
- Thread* self,
- Handle<mirror::Class> klass,
- const std::unordered_map<size_t, MethodTranslation>& default_translations,
- bool* out_new_conflict,
- ArtMethod** out_imt)
+ Thread* self,
+ Handle<mirror::Class> klass,
+ const std::unordered_map<size_t, MethodTranslation>& default_translations,
+ ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
@@ -1098,7 +1096,6 @@ class ClassLinker {
void SetIMTRef(ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
ArtMethod* current_method,
- /*out*/bool* new_conflict,
/*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
void FillIMTFromIfTable(mirror::IfTable* if_table,
@@ -1107,13 +1104,11 @@ class ClassLinker {
mirror::Class* klass,
bool create_conflict_tables,
bool ignore_copied_methods,
- /*out*/bool* new_conflict,
- /*out*/ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
void FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
- bool* new_conflict,
ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 9b59f2bba6..488826b6c4 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -148,8 +148,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(0U, array->NumInstanceFields());
EXPECT_EQ(0U, array->NumStaticFields());
EXPECT_EQ(2U, array->NumDirectInterfaces());
- EXPECT_FALSE(array->ShouldHaveImt());
- EXPECT_TRUE(array->ShouldHaveEmbeddedVTable());
+ EXPECT_TRUE(array->ShouldHaveEmbeddedImtAndVTable());
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != nullptr);
mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 3509d9aef9..741b682996 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -463,7 +463,7 @@ std::string CommonRuntimeTestImpl::GetTestAndroidRoot() {
#define ART_TARGET_NATIVETEST_DIR_STRING ""
#endif
-std::string CommonRuntimeTestImpl::GetTestDexFileName(const char* name) {
+std::string CommonRuntimeTestImpl::GetTestDexFileName(const char* name) const {
CHECK(name != nullptr);
std::string filename;
if (IsHost()) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 0ce40e8e22..b68eb19f2a 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -111,7 +111,7 @@ class CommonRuntimeTestImpl {
std::string GetTestAndroidRoot();
- std::string GetTestDexFileName(const char* name);
+ std::string GetTestDexFileName(const char* name) const;
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 4f705f2056..912a74a52a 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -402,12 +402,117 @@ void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
dex_file, type);
}
-void ThrowNullPointerExceptionFromDexPC() {
+static bool IsValidImplicitCheck(uintptr_t addr, ArtMethod* method, const Instruction& instr)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!CanDoImplicitNullCheckOn(addr)) {
+ return false;
+ }
+
+ switch (instr.Opcode()) {
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ // Without inlining, we could just check that the offset is the class offset.
+ // However, when inlining, the compiler can (validly) merge the null check with a field access
+ // on the same object. Note that the stack map at the NPE will reflect the invoke's location,
+ // which is the caller.
+ return true;
+ }
+
+ case Instruction::IGET:
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ case Instruction::IPUT:
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT: {
+ ArtField* field =
+ Runtime::Current()->GetClassLinker()->ResolveField(instr.VRegC_22c(), method, false);
+ return (addr == 0) ||
+ (addr == field->GetOffset().Uint32Value()) ||
+ (kEmitCompilerReadBarrier && (addr == mirror::Object::MonitorOffset().Uint32Value()));
+ }
+
+ case Instruction::IGET_QUICK:
+ case Instruction::IGET_BOOLEAN_QUICK:
+ case Instruction::IGET_BYTE_QUICK:
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT_QUICK:
+ case Instruction::IGET_WIDE_QUICK:
+ case Instruction::IGET_OBJECT_QUICK:
+ case Instruction::IPUT_QUICK:
+ case Instruction::IPUT_BOOLEAN_QUICK:
+ case Instruction::IPUT_BYTE_QUICK:
+ case Instruction::IPUT_CHAR_QUICK:
+ case Instruction::IPUT_SHORT_QUICK:
+ case Instruction::IPUT_WIDE_QUICK:
+ case Instruction::IPUT_OBJECT_QUICK: {
+ return (addr == 0u) ||
+ (addr == instr.VRegC_22c()) ||
+ (kEmitCompilerReadBarrier && (addr == mirror::Object::MonitorOffset().Uint32Value()));
+ }
+
+ case Instruction::AGET:
+ case Instruction::AGET_WIDE:
+ case Instruction::AGET_OBJECT:
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::AGET_SHORT:
+ case Instruction::APUT:
+ case Instruction::APUT_WIDE:
+ case Instruction::APUT_OBJECT:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_CHAR:
+ case Instruction::APUT_SHORT:
+ case Instruction::FILL_ARRAY_DATA:
+ case Instruction::ARRAY_LENGTH: {
+ // The length access should crash. We currently do not do implicit checks on
+ // the array access itself.
+ return (addr == 0u) ||
+ (addr == mirror::Array::LengthOffset().Uint32Value()) ||
+ (kEmitCompilerReadBarrier && (addr == mirror::Object::MonitorOffset().Uint32Value()));
+ }
+
+ default: {
+ // We have covered all the cases where an NPE could occur.
+ // Note that this must be kept in sync with the compiler, and adding
+ // any new way to do implicit checks in the compiler should also update
+ // this code.
+ return false;
+ }
+ }
+}
+
+void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
uint32_t throw_dex_pc;
ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc);
const DexFile::CodeItem* code = method->GetCodeItem();
CHECK_LT(throw_dex_pc, code->insns_size_in_code_units_);
const Instruction* instr = Instruction::At(&code->insns_[throw_dex_pc]);
+ if (check_address && !IsValidImplicitCheck(addr, method, *instr)) {
+ const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ LOG(FATAL) << "Invalid address for an implicit NullPointerException check: "
+ << "0x" << std::hex << addr << std::dec
+ << ", at "
+ << instr->DumpString(dex_file)
+ << " in "
+ << PrettyMethod(method);
+ }
+
switch (instr->Opcode()) {
case Instruction::INVOKE_DIRECT:
ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kDirect);
@@ -530,14 +635,32 @@ void ThrowNullPointerExceptionFromDexPC() {
ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to get length of null array");
break;
+ case Instruction::FILL_ARRAY_DATA: {
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
+ "Attempt to write to null array");
+ break;
+ }
+ case Instruction::INVOKE_LAMBDA:
+ case Instruction::BOX_LAMBDA:
+ case Instruction::UNBOX_LAMBDA:
+ case Instruction::LIBERATE_VARIABLE: {
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
+ "Using a null lambda");
+ break;
+ }
+ case Instruction::MONITOR_ENTER:
+ case Instruction::MONITOR_EXIT: {
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
+ "Attempt to do a synchronize operation on a null object");
+ break;
+ }
default: {
- // TODO: We should have covered all the cases where we expect a NPE above, this
- // message/logging is so we can improve any cases we've missed in the future.
const DexFile* dex_file =
method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- ThrowException("Ljava/lang/NullPointerException;", nullptr,
- StringPrintf("Null pointer exception during instruction '%s'",
- instr->DumpString(dex_file).c_str()).c_str());
+ LOG(FATAL) << "NullPointerException at an unexpected instruction: "
+ << instr->DumpString(dex_file)
+ << " in "
+ << PrettyMethod(method);
break;
}
}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 7a335859e5..cbd338d315 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -195,7 +195,7 @@ void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type)
SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionFromDexPC()
+void ThrowNullPointerExceptionFromDexPC(bool check_address = false, uintptr_t addr = 0)
SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerException(const char* msg)
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
new file mode 100644
index 0000000000..743fbb9c68
--- /dev/null
+++ b/runtime/dex2oat_environment_test.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX2OAT_ENVIRONMENT_TEST_H_
+#define ART_RUNTIME_DEX2OAT_ENVIRONMENT_TEST_H_
+
+#include <fstream>
+#include <string>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "common_runtime_test.h"
+#include "compiler_callbacks.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "oat_file_assistant.h"
+#include "os.h"
+#include "runtime.h"
+#include "utils.h"
+
+namespace art {
+
+// Test class that provides some helpers to set a test up for compilation using dex2oat.
+class Dex2oatEnvironmentTest : public CommonRuntimeTest {
+ public:
+ virtual void SetUp() OVERRIDE {
+ CommonRuntimeTest::SetUp();
+
+ // Create a scratch directory to work from.
+ scratch_dir_ = android_data_ + "/Dex2oatEnvironmentTest";
+ ASSERT_EQ(0, mkdir(scratch_dir_.c_str(), 0700));
+
+ // Create a subdirectory in scratch for odex files.
+ odex_oat_dir_ = scratch_dir_ + "/oat";
+ ASSERT_EQ(0, mkdir(odex_oat_dir_.c_str(), 0700));
+
+ odex_dir_ = odex_oat_dir_ + "/" + std::string(GetInstructionSetString(kRuntimeISA));
+ ASSERT_EQ(0, mkdir(odex_dir_.c_str(), 0700));
+
+ // Verify the environment is as we expect
+ uint32_t checksum;
+ std::string error_msg;
+ ASSERT_TRUE(OS::FileExists(GetSystemImageFile().c_str()))
+ << "Expected pre-compiled boot image to be at: " << GetSystemImageFile();
+ ASSERT_TRUE(OS::FileExists(GetDexSrc1().c_str()))
+ << "Expected dex file to be at: " << GetDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
+ << "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
+ ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, &error_msg))
+ << "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
+ << "Expected dex file to be at: " << GetDexSrc2();
+
+ // GetMultiDexSrc2 should have the same primary dex checksum as
+ // GetMultiDexSrc1, but a different secondary dex checksum.
+ static constexpr bool kVerifyChecksum = true;
+ std::vector<std::unique_ptr<const DexFile>> multi1;
+ ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(),
+ GetMultiDexSrc1().c_str(), kVerifyChecksum, &error_msg, &multi1)) << error_msg;
+ ASSERT_GT(multi1.size(), 1u);
+
+ std::vector<std::unique_ptr<const DexFile>> multi2;
+ ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(),
+ GetMultiDexSrc2().c_str(), kVerifyChecksum, &error_msg, &multi2)) << error_msg;
+ ASSERT_GT(multi2.size(), 1u);
+
+ ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
+ ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
+ }
+
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ // options->push_back(std::make_pair("-verbose:oat", nullptr));
+
+ // Set up the image location.
+ options->push_back(std::make_pair("-Ximage:" + GetImageLocation(),
+ nullptr));
+ // Make sure compilercallbacks are not set so that relocation will be
+ // enabled.
+ callbacks_.reset();
+ }
+
+ virtual void TearDown() OVERRIDE {
+ ClearDirectory(odex_dir_.c_str());
+ ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
+
+ ClearDirectory(odex_oat_dir_.c_str());
+ ASSERT_EQ(0, rmdir(odex_oat_dir_.c_str()));
+
+ ClearDirectory(scratch_dir_.c_str());
+ ASSERT_EQ(0, rmdir(scratch_dir_.c_str()));
+
+ CommonRuntimeTest::TearDown();
+ }
+
+ static void Copy(const std::string& src, const std::string& dst) {
+ std::ifstream src_stream(src, std::ios::binary);
+ std::ofstream dst_stream(dst, std::ios::binary);
+
+ dst_stream << src_stream.rdbuf();
+ }
+
+ // Returns the directory where the pre-compiled core.art can be found.
+ // TODO: We should factor out this into common tests somewhere rather than
+ // re-hardcoding it here (This was copied originally from the elf writer
+ // test).
+ std::string GetImageDirectory() const {
+ if (IsHost()) {
+ const char* host_dir = getenv("ANDROID_HOST_OUT");
+ CHECK(host_dir != nullptr);
+ return std::string(host_dir) + "/framework";
+ } else {
+ return std::string("/data/art-test");
+ }
+ }
+
+ std::string GetImageLocation() const {
+ return GetImageDirectory() + "/core.art";
+ }
+
+ std::string GetSystemImageFile() const {
+ return GetImageDirectory() + "/" + GetInstructionSetString(kRuntimeISA)
+ + "/core.art";
+ }
+
+ bool GetCachedImageFile(/*out*/std::string* image, std::string* error_msg) const {
+ std::string cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA), true);
+ return GetDalvikCacheFilename(GetImageLocation().c_str(), cache.c_str(), image, error_msg);
+ }
+
+ std::string GetDexSrc1() const {
+ return GetTestDexFileName("Main");
+ }
+
+ // Returns the path to a dex file equivalent to GetDexSrc1, but with the dex
+ // file stripped.
+ std::string GetStrippedDexSrc1() const {
+ return GetTestDexFileName("MainStripped");
+ }
+
+ std::string GetMultiDexSrc1() const {
+ return GetTestDexFileName("MultiDex");
+ }
+
+ // Returns the path to a multidex file equivalent to GetMultiDexSrc2, but
+ // with the contents of the secondary dex file changed.
+ std::string GetMultiDexSrc2() const {
+ return GetTestDexFileName("MultiDexModifiedSecondary");
+ }
+
+ std::string GetDexSrc2() const {
+ return GetTestDexFileName("Nested");
+ }
+
+ // Scratch directory, for dex and odex files (oat files will go in the
+ // dalvik cache).
+ const std::string& GetScratchDir() const {
+ return scratch_dir_;
+ }
+
+ // Odex directory is the subdirectory in the scratch directory where odex
+ // files should be located.
+ const std::string& GetOdexDir() const {
+ return odex_dir_;
+ }
+
+ private:
+ std::string scratch_dir_;
+ std::string odex_oat_dir_;
+ std::string odex_dir_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX2OAT_ENVIRONMENT_TEST_H_
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index db3f88ff6e..1b55d2f331 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -19,7 +19,7 @@
#include "entrypoint_utils.h"
-#include "art_method-inl.h"
+#include "art_method.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -448,23 +448,10 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_
: ClassLinker::kNoICCECheckForCache;
resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
}
+ // Resolution and access check.
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
- } else if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
- if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
- resolved_method->IsConstructor())) {
- // Hack for String init:
- //
- // We assume that the input of String.<init> in verified code is always
- // an unitialized reference. If it is a null constant, it must have been
- // optimized out by the compiler. Do not throw NullPointerException.
- } else {
- // Maintain interpreter-like semantics where NullPointerException is thrown
- // after potential NoSuchMethodError from class linker.
- ThrowNullPointerExceptionForMethodAccess(method_idx, type);
- return nullptr; // Failure.
- }
} else if (access_check) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
bool can_access_resolved_method =
@@ -482,6 +469,22 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_
return nullptr; // Failure.
}
}
+ // Next, null pointer check.
+ if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
+ if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
+ resolved_method->IsConstructor())) {
+ // Hack for String init:
+ //
+ // We assume that the input of String.<init> in verified code is always
+ // an unitialized reference. If it is a null constant, it must have been
+ // optimized out by the compiler. Do not throw NullPointerException.
+ } else {
+ // Maintain interpreter-like semantics where NullPointerException is thrown
+ // after potential NoSuchMethodError from class linker.
+ ThrowNullPointerExceptionForMethodAccess(method_idx, type);
+ return nullptr; // Failure.
+ }
+ }
switch (type) {
case kStatic:
case kDirect:
@@ -559,10 +562,9 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_
}
}
case kInterface: {
- uint32_t imt_index = resolved_method->GetImtIndex();
- size_t pointer_size = class_linker->GetImagePointerSize();
- ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
- Get(imt_index, pointer_size);
+ uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(
+ imt_index, class_linker->GetImagePointerSize());
if (!imt_method->IsRuntimeMethod()) {
if (kIsDebugBuild) {
mirror::Class* klass = (*this_object)->GetClass();
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 771e14396e..d0dad34789 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -120,6 +120,7 @@ extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
extern "C" void art_quick_throw_div_zero();
extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_null_pointer_exception_from_signal(uintptr_t address);
extern "C" void art_quick_throw_stack_overflow(void*);
extern "C" void art_quick_throw_string_bounds(int32_t index, int32_t limit);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 2778e32ece..ea9f7b0826 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -52,8 +52,18 @@ extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* excepti
extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
+ // We come from an explicit check in the generated code. This path is triggered
+ // only if the object is indeed null.
+ ThrowNullPointerExceptionFromDexPC(/* check_address */ false, 0U);
+ self->QuickDeliverException();
+}
+
+// Installed by a signal handler to throw a NPE exception.
+extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
- ThrowNullPointerExceptionFromDexPC();
+ ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 0a70be1c95..03771aa80e 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2169,12 +2169,13 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
dex_method_idx, sizeof(void*));
DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method);
ArtMethod* method = nullptr;
- ImTable* imt = cls->GetImt(sizeof(void*));
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
// If the dex cache already resolved the interface method, look whether we have
// a match in the ImtConflictTable.
- ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), sizeof(void*));
+ uint32_t imt_index = interface_method->GetDexMethodIndex();
+ ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
+ imt_index % mirror::Class::kImtSize, sizeof(void*));
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
DCHECK(current_table != nullptr);
@@ -2225,8 +2226,9 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
- uint32_t imt_index = interface_method->GetImtIndex();
- ArtMethod* conflict_method = imt->Get(imt_index, sizeof(void*));
+ uint32_t imt_index = interface_method->GetDexMethodIndex();
+ ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
+ imt_index % mirror::Class::kImtSize, sizeof(void*));
if (conflict_method->IsRuntimeMethod()) {
ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
cls.Get(),
@@ -2237,9 +2239,9 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
if (new_conflict_method != conflict_method) {
// Update the IMT if we create a new conflict method. No fence needed here, as the
// data is consistent.
- imt->Set(imt_index,
- new_conflict_method,
- sizeof(void*));
+ cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize,
+ new_conflict_method,
+ sizeof(void*));
}
}
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 625b1e8cc6..56e0fb78c1 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -96,6 +96,14 @@ class NullPointerHandler FINAL : public FaultHandler {
bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+ static bool IsValidImplicitCheck(siginfo_t* siginfo) {
+ // Our implicit NPE checks always limit the range to a page.
+ // Note that the runtime will do more exhaustive checks (that we cannot
+ // reasonably do in signal processing code) based on the dex instruction
+ // faulting.
+ return CanDoImplicitNullCheckOn(reinterpret_cast<uintptr_t>(siginfo->si_addr));
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(NullPointerHandler);
};
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 64fa4344d6..301111251a 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -28,7 +28,7 @@ namespace art {
namespace gc {
namespace collector {
-inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegionOrImmuneSpace(
+inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
// For the Baker-style RB, in a rare case, we could incorrectly change the object from white
// to gray even though the object has already been marked through. This happens if a mutator
@@ -69,6 +69,37 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegionOrImmuneSpace
return ref;
}
+template<bool kGrayImmuneObject>
+inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
+ if (kUseBakerReadBarrier) {
+ // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
+ // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
+ // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
+ // immune space objects (when updated_all_immune_objects_ is true).
+ if (kIsDebugBuild) {
+ if (Thread::Current() == thread_running_gc_) {
+ DCHECK(!kGrayImmuneObject ||
+ updated_all_immune_objects_.LoadRelaxed() ||
+ gc_grays_immune_objects_);
+ } else {
+ DCHECK(kGrayImmuneObject);
+ }
+ }
+ if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
+ return ref;
+ }
+ // This may or may not succeed, which is ok because the object may already be gray.
+ bool success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
+ if (success) {
+ MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ immune_gray_stack_.push_back(ref);
+ }
+ }
+ return ref;
+}
+
+template<bool kGrayImmuneObject>
inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
if (from_ref == nullptr) {
return nullptr;
@@ -109,10 +140,14 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
return to_ref;
}
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
- return MarkUnevacFromSpaceRegionOrImmuneSpace(from_ref, region_space_bitmap_);
+ return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
}
case space::RegionSpace::RegionType::kRegionTypeNone:
- return MarkNonMoving(from_ref);
+ if (immune_spaces_.ContainsObject(from_ref)) {
+ return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
+ } else {
+ return MarkNonMoving(from_ref);
+ }
default:
UNREACHABLE();
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dd750060b8..b7b5aa0059 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -50,14 +50,16 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
+ region_space_bitmap_(nullptr),
heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
rb_table_(heap_->GetReadBarrierTable()),
- force_evacuate_all_(false) {
+ force_evacuate_all_(false),
+ immune_gray_stack_lock_("concurrent copying immune gray stack lock",
+ kMarkSweepMarkStackLock) {
static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
"The region space size and the read barrier table region size must match");
- cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -139,19 +141,10 @@ void ConcurrentCopying::BindBitmaps() {
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
CHECK(space->IsZygoteSpace() || space->IsImageSpace());
immune_spaces_.AddSpace(space);
- const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
- "cc zygote space bitmap";
- // TODO: try avoiding using bitmaps for image/zygote to save space.
- accounting::ContinuousSpaceBitmap* bitmap =
- accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
- cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
- cc_bitmaps_.push_back(bitmap);
} else if (space == region_space_) {
accounting::ContinuousSpaceBitmap* bitmap =
accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
space->Begin(), space->Capacity());
- cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
- cc_bitmaps_.push_back(bitmap);
region_space_bitmap_ = bitmap;
}
}
@@ -179,6 +172,15 @@ void ConcurrentCopying::InitializePhase() {
} else {
force_evacuate_all_ = false;
}
+ if (kUseBakerReadBarrier) {
+ updated_all_immune_objects_.StoreRelaxed(false);
+ // GC may gray immune objects in the thread flip.
+ gc_grays_immune_objects_ = true;
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ DCHECK(immune_gray_stack_.empty());
+ }
+ }
BindBitmaps();
if (kVerboseMode) {
LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
@@ -303,30 +305,6 @@ void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
}
-// Used to visit objects in the immune spaces.
-class ConcurrentCopying::ImmuneSpaceObjVisitor {
- public:
- explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {}
-
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
- DCHECK(obj != nullptr);
- DCHECK(collector_->immune_spaces_.ContainsObject(obj));
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
- << "Immune space object must be already marked";
- }
- collector_->MarkUnevacFromSpaceRegionOrImmuneSpace(obj, cc_bitmap);
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
class EmptyCheckpoint : public Closure {
public:
explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
@@ -347,6 +325,27 @@ class EmptyCheckpoint : public Closure {
ConcurrentCopying* const concurrent_copying_;
};
+// Used to visit objects in the immune spaces.
+inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
+ DCHECK(obj != nullptr);
+ DCHECK(immune_spaces_.ContainsObject(obj));
+ // Update the fields without graying it or pushing it onto the mark stack.
+ Scan(obj);
+}
+
+class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
+ public:
+ explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
+ : collector_(cc) {}
+
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ collector_->ScanImmuneObject(obj);
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+};
+
// Concurrently mark roots that are guarded by read barriers and process the mark stack.
void ConcurrentCopying::MarkingPhase() {
TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
@@ -354,25 +353,46 @@ void ConcurrentCopying::MarkingPhase() {
LOG(INFO) << "GC MarkingPhase";
}
CHECK(weak_ref_access_enabled_);
- {
- // Mark the image root. The WB-based collectors do not need to
- // scan the image objects from roots by relying on the card table,
- // but it's necessary for the RB to-space invariant to hold.
- TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
- for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
- if (space->IsImageSpace()) {
- gc::space::ImageSpace* image = space->AsImageSpace();
- if (image != nullptr) {
- mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
- mirror::Object* marked_image_root = Mark(image_root);
- CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
- if (ReadBarrier::kEnableToSpaceInvariantChecks) {
- AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
- }
- }
- }
+
+ // Scan immune spaces.
+ // Update all the fields in the immune spaces first without graying the objects so that we
+ // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
+ // of the objects.
+ if (kUseBakerReadBarrier) {
+ gc_grays_immune_objects_ = false;
+ }
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ ImmuneSpaceScanObjVisitor visitor(this);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->Limit()),
+ visitor);
+ }
+ if (kUseBakerReadBarrier) {
+ // This release fence makes the field updates in the above loop visible before allowing mutator
+ // getting access to immune objects without graying it first.
+ updated_all_immune_objects_.StoreRelease(true);
+ // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
+ // the above loop because we would incorrectly disable the read barrier by whitening an object
+ // which may point to an unscanned, white object, breaking the to-space invariant.
+ //
+ // Make sure no mutators are in the middle of marking an immune object before whitening immune
+ // objects.
+ IssueEmptyCheckpoint();
+ MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ if (kVerboseMode) {
+ LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
}
+ for (mirror::Object* obj : immune_gray_stack_) {
+ DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
+ bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ DCHECK(success);
+ }
+ immune_gray_stack_.clear();
}
+
{
TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
@@ -383,16 +403,6 @@ void ConcurrentCopying::MarkingPhase() {
Runtime::Current()->VisitNonThreadRoots(this);
}
- // Immune spaces.
- for (auto& space : immune_spaces_.GetSpaces()) {
- DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
- accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- ImmuneSpaceObjVisitor visitor(this);
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
- }
-
Thread* self = Thread::Current();
{
TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
@@ -1239,6 +1249,9 @@ void ConcurrentCopying::ReclaimPhase() {
IssueEmptyCheckpoint();
// Disable the check.
is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
+ if (kUseBakerReadBarrier) {
+ updated_all_immune_objects_.StoreSequentiallyConsistent(false);
+ }
CheckEmptyMarkStack();
}
@@ -1288,13 +1301,9 @@ void ConcurrentCopying::ReclaimPhase() {
SwapBitmaps();
heap_->UnBindBitmaps();
- // Remove bitmaps for the immune spaces.
- while (!cc_bitmaps_.empty()) {
- accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
- cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
- delete cc_bitmap;
- cc_bitmaps_.pop_back();
- }
+ // Delete the region bitmap.
+ DCHECK(region_space_bitmap_ != nullptr);
+ delete region_space_bitmap_;
region_space_bitmap_ = nullptr;
}
@@ -1410,15 +1419,6 @@ void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset
// In a non-moving space.
if (immune_spaces_.ContainsObject(obj)) {
LOG(INFO) << "holder is in an immune image or the zygote space.";
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
- CHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap.";
- if (cc_bitmap->Test(obj)) {
- LOG(INFO) << "holder is marked in the bit map.";
- } else {
- LOG(INFO) << "holder is NOT marked in the bit map.";
- }
} else {
LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -1449,17 +1449,17 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o
mirror::Object* ref) {
// In a non-moving spaces. Check that the ref is marked.
if (immune_spaces_.ContainsObject(ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
- CHECK(cc_bitmap != nullptr)
- << "An immune space ref must have a bitmap. " << ref;
if (kUseBakerReadBarrier) {
- CHECK(cc_bitmap->Test(ref))
+ // Immune object may not be gray if called from the GC.
+ if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
+ return;
+ }
+ bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
+ CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
<< "Unmarked immune space ref. obj=" << obj << " rb_ptr="
- << obj->GetReadBarrierPointer() << " ref=" << ref;
- } else {
- CHECK(cc_bitmap->Test(ref))
- << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
+ << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
+ << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
+ << " updated_all_immune_objects=" << updated_all_immune_objects;
}
} else {
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -1510,7 +1510,7 @@ class ConcurrentCopying::RefFieldsVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_) {
- collector_->MarkRoot(root);
+ collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
}
private:
@@ -1520,6 +1520,7 @@ class ConcurrentCopying::RefFieldsVisitor {
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
+ DCHECK_EQ(Thread::Current(), thread_running_gc_);
RefFieldsVisitor visitor(this);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
@@ -1528,9 +1529,10 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
// Process a field.
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
+ DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark(ref);
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
if (to_ref == ref) {
return;
}
@@ -1569,10 +1571,11 @@ inline void ConcurrentCopying::VisitRoots(
}
}
+template<bool kGrayImmuneObject>
inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
DCHECK(!root->IsNull());
mirror::Object* const ref = root->AsMirrorPtr();
- mirror::Object* to_ref = Mark(ref);
+ mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
@@ -1593,14 +1596,46 @@ inline void ConcurrentCopying::VisitRoots(
for (size_t i = 0; i < count; ++i) {
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
- MarkRoot(root);
+ // kGrayImmuneObject is true because this is used for the thread flip.
+ MarkRoot</*kGrayImmuneObject*/true>(root);
}
}
}
+// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
+class ConcurrentCopying::ScopedGcGraysImmuneObjects {
+ public:
+ explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
+ : collector_(collector), enabled_(false) {
+ if (kUseBakerReadBarrier &&
+ collector_->thread_running_gc_ == Thread::Current() &&
+ !collector_->gc_grays_immune_objects_) {
+ collector_->gc_grays_immune_objects_ = true;
+ enabled_ = true;
+ }
+ }
+
+ ~ScopedGcGraysImmuneObjects() {
+ if (kUseBakerReadBarrier &&
+ collector_->thread_running_gc_ == Thread::Current() &&
+ enabled_) {
+ DCHECK(collector_->gc_grays_immune_objects_);
+ collector_->gc_grays_immune_objects_ = false;
+ }
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+ bool enabled_;
+};
+
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
+ // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
+ // barriers here because we need the updated reference to the int array class, etc. Temporary set
+ // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
+ ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
CHECK_ALIGNED(byte_size, kObjectAlignment);
memset(dummy_obj, 0, byte_size);
mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
@@ -1836,21 +1871,8 @@ mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
} else {
// from_ref is in a non-moving space.
if (immune_spaces_.ContainsObject(from_ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
- << "Immune space object must be already marked";
- }
- if (cc_bitmap->Test(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- } else {
- // Newly marked.
- to_ref = nullptr;
- }
+ // An immune object is alive.
+ to_ref = from_ref;
} else {
// Non-immune non-moving space. Use the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -1889,85 +1911,74 @@ bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
// ref is in a non-moving space (from_ref == to_ref).
DCHECK(!region_space_->HasAddress(ref)) << ref;
- if (immune_spaces_.ContainsObject(ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
- << "Immune space object must be already marked";
+ DCHECK(!immune_spaces_.ContainsObject(ref));
+ // Use the mark bitmap.
+ accounting::ContinuousSpaceBitmap* mark_bitmap =
+ heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
+ accounting::LargeObjectBitmap* los_bitmap =
+ heap_mark_bitmap_->GetLargeObjectBitmap(ref);
+ CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
+ bool is_los = mark_bitmap == nullptr;
+ if (!is_los && mark_bitmap->Test(ref)) {
+ // Already marked.
+ if (kUseBakerReadBarrier) {
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ }
+ } else if (is_los && los_bitmap->Test(ref)) {
+ // Already marked in LOS.
+ if (kUseBakerReadBarrier) {
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
}
- MarkUnevacFromSpaceRegionOrImmuneSpace(ref, cc_bitmap);
} else {
- // Use the mark bitmap.
- accounting::ContinuousSpaceBitmap* mark_bitmap =
- heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
- accounting::LargeObjectBitmap* los_bitmap =
- heap_mark_bitmap_->GetLargeObjectBitmap(ref);
- CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
- bool is_los = mark_bitmap == nullptr;
- if (!is_los && mark_bitmap->Test(ref)) {
- // Already marked.
- if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ // Not marked.
+ if (IsOnAllocStack(ref)) {
+ // If it's on the allocation stack, it's considered marked. Keep it white.
+ // Objects on the allocation stack need not be marked.
+ if (!is_los) {
+ DCHECK(!mark_bitmap->Test(ref));
+ } else {
+ DCHECK(!los_bitmap->Test(ref));
}
- } else if (is_los && los_bitmap->Test(ref)) {
- // Already marked in LOS.
if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
}
} else {
- // Not marked.
- if (IsOnAllocStack(ref)) {
- // If it's on the allocation stack, it's considered marked. Keep it white.
- // Objects on the allocation stack need not be marked.
- if (!is_los) {
- DCHECK(!mark_bitmap->Test(ref));
- } else {
- DCHECK(!los_bitmap->Test(ref));
+ // For the baker-style RB, we need to handle 'false-gray' cases. See the
+ // kRegionTypeUnevacFromSpace-case comment in Mark().
+ if (kUseBakerReadBarrier) {
+ // Test the bitmap first to reduce the chance of false gray cases.
+ if ((!is_los && mark_bitmap->Test(ref)) ||
+ (is_los && los_bitmap->Test(ref))) {
+ return ref;
}
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
+ }
+ // Not marked or on the allocation stack. Try to mark it.
+ // This may or may not succeed, which is ok.
+ bool cas_success = false;
+ if (kUseBakerReadBarrier) {
+ cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
+ }
+ if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
}
- } else {
- // For the baker-style RB, we need to handle 'false-gray' cases. See the
- // kRegionTypeUnevacFromSpace-case comment in Mark().
- if (kUseBakerReadBarrier) {
- // Test the bitmap first to reduce the chance of false gray cases.
- if ((!is_los && mark_bitmap->Test(ref)) ||
- (is_los && los_bitmap->Test(ref))) {
- return ref;
- }
+ } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked in LOS.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
}
- // Not marked or on the allocation stack. Try to mark it.
- // This may or may not succeed, which is ok.
- bool cas_success = false;
+ } else {
+ // Newly marked.
if (kUseBakerReadBarrier) {
- cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
- ReadBarrier::GrayPtr());
- }
- if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
- // Already marked.
- if (kUseBakerReadBarrier && cas_success &&
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- PushOntoFalseGrayStack(ref);
- }
- } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
- // Already marked in LOS.
- if (kUseBakerReadBarrier && cas_success &&
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- PushOntoFalseGrayStack(ref);
- }
- } else {
- // Newly marked.
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(ref);
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
}
+ PushOntoMarkStack(ref);
}
}
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a986a7a1db..166a1f0b2a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -61,10 +61,12 @@ class ConcurrentCopying : public GarbageCollector {
ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
- void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ virtual void RunPhases() OVERRIDE
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
+ void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
@@ -92,8 +94,9 @@ class ConcurrentCopying : public GarbageCollector {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
+ template<bool kGrayImmuneObject = true>
ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarking() const {
return is_marking_;
}
@@ -117,16 +120,19 @@ class ConcurrentCopying : public GarbageCollector {
void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
+ template<bool kGrayImmuneObject>
void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
@@ -146,9 +152,11 @@ class ConcurrentCopying : public GarbageCollector {
SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
@@ -182,14 +190,19 @@ class ConcurrentCopying : public GarbageCollector {
void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
- ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegionOrImmuneSpace(mirror::Object* from_ref,
+ ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
accounting::SpaceBitmap<kObjectAlignment>* bitmap)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ template<bool kGrayImmuneObject>
+ ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ void ScanImmuneObject(mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
@@ -207,8 +220,6 @@ class ConcurrentCopying : public GarbageCollector {
bool is_active_; // True while the collection is ongoing.
bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
ImmuneSpaces immune_spaces_;
- std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
- std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
// A cache of Heap::GetMarkBitmap().
accounting::HeapBitmap* heap_mark_bitmap_;
@@ -242,6 +253,10 @@ class ConcurrentCopying : public GarbageCollector {
accounting::ReadBarrierTable* rb_table_;
bool force_evacuate_all_; // True if all regions are evacuated.
+ Atomic<bool> updated_all_immune_objects_;
+ bool gc_grays_immune_objects_;
+ Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
class AssertToSpaceInvariantFieldVisitor;
class AssertToSpaceInvariantObjectVisitor;
@@ -250,14 +265,15 @@ class ConcurrentCopying : public GarbageCollector {
class ComputeUnevacFromSpaceLiveRatioVisitor;
class DisableMarkingCheckpoint;
class FlipCallback;
- class ImmuneSpaceObjVisitor;
+ class ImmuneSpaceScanObjVisitor;
class LostCopyVisitor;
class RefFieldsVisitor;
class RevokeThreadLocalMarkStackCheckpoint;
+ class ScopedGcGraysImmuneObjects;
+ class ThreadFlipVisitor;
class VerifyNoFromSpaceRefsFieldVisitor;
class VerifyNoFromSpaceRefsObjectVisitor;
class VerifyNoFromSpaceRefsVisitor;
- class ThreadFlipVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 8cadc2e0fc..e896c7a726 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1130,10 +1130,6 @@ static bool RelocateInPlace(ImageHeader& image_header,
image_header.VisitPackedArtFields(&field_visitor, target_base);
}
{
- TimingLogger::ScopedTiming timing("Fixup imt", &logger);
- image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size);
- }
- {
TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
}
diff --git a/runtime/globals.h b/runtime/globals.h
index 477cbdf5d4..0b44c47092 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -40,6 +40,12 @@ static constexpr size_t kStackAlignment = 16;
// compile-time constant so the compiler can generate better code.
static constexpr int kPageSize = 4096;
+// Returns whether the given memory offset can be used for generating
+// an implicit null check.
+static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
+ return offset < kPageSize;
+}
+
// Required object alignment
static constexpr size_t kObjectAlignment = 8;
static constexpr size_t kLargeObjectAlignment = kPageSize;
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index cd0557a235..ea75a622c7 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -20,7 +20,6 @@
#include "image.h"
#include "art_method.h"
-#include "imtable.h"
namespace art {
@@ -46,24 +45,6 @@ inline mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const {
}
template <typename Visitor>
-inline void ImageHeader::VisitPackedImTables(const Visitor& visitor,
- uint8_t* base,
- size_t pointer_size) const {
- const ImageSection& section = GetImageSection(kSectionImTables);
- for (size_t pos = 0; pos < section.Size();) {
- ImTable* imt = reinterpret_cast<ImTable*>(base + section.Offset() + pos);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- ArtMethod* orig = imt->Get(i, pointer_size);
- ArtMethod* updated = visitor(orig);
- if (updated != orig) {
- imt->Set(i, updated, pointer_size);
- }
- }
- pos += ImTable::SizeInBytes(pointer_size);
- }
-}
-
-template <typename Visitor>
inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
size_t pointer_size) const {
diff --git a/runtime/image.cc b/runtime/image.cc
index 2362a92c24..a9552c27d3 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '0', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 06f06eed0e..2ea9af7728 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -195,7 +195,6 @@ class PACKED(4) ImageHeader {
kSectionArtFields,
kSectionArtMethods,
kSectionRuntimeMethods,
- kSectionImTables,
kSectionIMTConflictTables,
kSectionDexCacheArrays,
kSectionInternedStrings,
@@ -280,11 +279,6 @@ class PACKED(4) ImageHeader {
void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
template <typename Visitor>
- void VisitPackedImTables(const Visitor& visitor,
- uint8_t* base,
- size_t pointer_size) const;
-
- template <typename Visitor>
void VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
size_t pointer_size) const;
diff --git a/runtime/imtable.h b/runtime/imtable.h
deleted file mode 100644
index 51faf70d14..0000000000
--- a/runtime/imtable.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_IMTABLE_H_
-#define ART_RUNTIME_IMTABLE_H_
-
-#ifndef IMT_SIZE
-#error IMT_SIZE not defined
-#endif
-
-namespace art {
-
-class ArtMethod;
-
-class ImTable {
- public:
- // Interface method table size. Increasing this value reduces the chance of two interface methods
- // colliding in the interface method table but increases the size of classes that implement
- // (non-marker) interfaces.
- static constexpr size_t kSize = IMT_SIZE;
-
- ArtMethod* Get(size_t index, size_t pointer_size) {
- DCHECK_LT(index, kSize);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
- if (pointer_size == 4) {
- uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
- return reinterpret_cast<ArtMethod*>(value);
- } else {
- uint64_t value = *reinterpret_cast<uint64_t*>(ptr);
- return reinterpret_cast<ArtMethod*>(value);
- }
- }
-
- void Set(size_t index, ArtMethod* method, size_t pointer_size) {
- DCHECK_LT(index, kSize);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
- if (pointer_size == 4) {
- uintptr_t value = reinterpret_cast<uintptr_t>(method);
- DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
- *reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value);
- } else {
- *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast<uint64_t>(method);
- }
- }
-
- static size_t OffsetOfElement(size_t index, size_t pointer_size) {
- return index * pointer_size;
- }
-
- void Populate(ArtMethod** data, size_t pointer_size) {
- for (size_t i = 0; i < kSize; ++i) {
- Set(i, data[i], pointer_size);
- }
- }
-
- constexpr static size_t SizeInBytes(size_t pointer_size) {
- return kSize * pointer_size;
- }
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_IMTABLE_H_
-
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 3750b7ad18..cc470f372b 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -679,7 +679,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
return false;
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
+ CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable());
ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
vtable_idx, sizeof(void*));
if (UNLIKELY(called_method == nullptr)) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ae5a0f6777..cfe6cd1856 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -59,8 +59,8 @@ JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& opt
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
jit_options->dump_info_on_shutdown_ =
options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
- jit_options->save_profiling_info_ =
- options.GetOrDefault(RuntimeArgumentMap::JITSaveProfilingInfo);
+ jit_options->profile_saver_options_ =
+ options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
jit_options->compile_threshold_ = options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
@@ -144,11 +144,10 @@ Jit::Jit() : dump_info_on_shutdown_(false),
cumulative_timings_("JIT timings"),
memory_use_("Memory used for compilation", 16),
lock_("JIT memory use lock"),
- use_jit_compilation_(true),
- save_profiling_info_(false) {}
+ use_jit_compilation_(true) {}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
- DCHECK(options->UseJitCompilation() || options->GetSaveProfilingInfo());
+ DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
std::unique_ptr<Jit> jit(new Jit);
jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
@@ -163,12 +162,12 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
return nullptr;
}
jit->use_jit_compilation_ = options->UseJitCompilation();
- jit->save_profiling_info_ = options->GetSaveProfilingInfo();
+ jit->profile_saver_options_ = options->GetProfileSaverOptions();
VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
<< ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
<< ", compile_threshold=" << options->GetCompileThreshold()
- << ", save_profiling_info=" << options->GetSaveProfilingInfo();
+ << ", profile_saver_options=" << options->GetProfileSaverOptions();
jit->hot_method_threshold_ = options->GetCompileThreshold();
@@ -310,13 +309,18 @@ void Jit::StartProfileSaver(const std::string& filename,
const std::vector<std::string>& code_paths,
const std::string& foreign_dex_profile_path,
const std::string& app_dir) {
- if (save_profiling_info_) {
- ProfileSaver::Start(filename, code_cache_.get(), code_paths, foreign_dex_profile_path, app_dir);
+ if (profile_saver_options_.IsEnabled()) {
+ ProfileSaver::Start(profile_saver_options_,
+ filename,
+ code_cache_.get(),
+ code_paths,
+ foreign_dex_profile_path,
+ app_dir);
}
}
void Jit::StopProfileSaver() {
- if (save_profiling_info_ && ProfileSaver::IsStarted()) {
+ if (profile_saver_options_.IsEnabled() && ProfileSaver::IsStarted()) {
ProfileSaver::Stop(dump_info_on_shutdown_);
}
}
@@ -330,7 +334,7 @@ bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
}
Jit::~Jit() {
- DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
+ DCHECK(!profile_saver_options_.IsEnabled() || !ProfileSaver::IsStarted());
if (dump_info_on_shutdown_) {
DumpInfo(LOG(INFO));
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index f3a6240e80..2aa6f3dc49 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -24,6 +24,7 @@
#include "base/timing_logger.h"
#include "object_callbacks.h"
#include "offline_profiling_info.h"
+#include "jit/profile_saver_options.h"
#include "thread_pool.h"
namespace art {
@@ -92,8 +93,8 @@ class Jit {
return use_jit_compilation_;
}
- bool SaveProfilingInfo() const {
- return save_profiling_info_;
+ bool GetSaveProfilingInfo() const {
+ return profile_saver_options_.IsEnabled();
}
// Wait until there is no more pending compilation tasks.
@@ -189,7 +190,7 @@ class Jit {
std::unique_ptr<jit::JitCodeCache> code_cache_;
bool use_jit_compilation_;
- bool save_profiling_info_;
+ ProfileSaverOptions profile_saver_options_;
static bool generate_debug_info_;
uint16_t hot_method_threshold_;
uint16_t warm_method_threshold_;
@@ -228,8 +229,11 @@ class JitOptions {
bool DumpJitInfoOnShutdown() const {
return dump_info_on_shutdown_;
}
+ const ProfileSaverOptions& GetProfileSaverOptions() const {
+ return profile_saver_options_;
+ }
bool GetSaveProfilingInfo() const {
- return save_profiling_info_;
+ return profile_saver_options_.IsEnabled();
}
bool UseJitCompilation() const {
return use_jit_compilation_;
@@ -237,8 +241,8 @@ class JitOptions {
void SetUseJitCompilation(bool b) {
use_jit_compilation_ = b;
}
- void SetSaveProfilingInfo(bool b) {
- save_profiling_info_ = b;
+ void SetSaveProfilingInfo(bool save_profiling_info) {
+ profile_saver_options_.SetEnabled(save_profiling_info);
}
void SetJitAtFirstUse() {
use_jit_compilation_ = true;
@@ -255,15 +259,14 @@ class JitOptions {
uint16_t priority_thread_weight_;
size_t invoke_transition_weight_;
bool dump_info_on_shutdown_;
- bool save_profiling_info_;
+ ProfileSaverOptions profile_saver_options_;
JitOptions()
: use_jit_compilation_(false),
code_cache_initial_capacity_(0),
code_cache_max_capacity_(0),
compile_threshold_(0),
- dump_info_on_shutdown_(false),
- save_profiling_info_(false) { }
+ dump_info_on_shutdown_(false) {}
DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 9822f6e851..4d4d1ea7c6 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -30,25 +30,11 @@
namespace art {
-// TODO: read the constants from ProfileOptions,
-// Add a random delay each time we go to sleep so that we don't hammer the CPU
-// with all profile savers running at the same time.
-static constexpr const uint64_t kMinSavePeriodNs = MsToNs(20 * 1000); // 20 seconds
-static constexpr const uint64_t kSaveResolvedClassesDelayMs = 2 * 1000; // 2 seconds
-// Minimum number of JIT samples during launch to include a method into the profile.
-static constexpr const size_t kStartupMethodSamples = 1;
-
-static constexpr const uint32_t kMinimumNumberOfMethodsToSave = 10;
-static constexpr const uint32_t kMinimumNumberOfClassesToSave = 10;
-static constexpr const uint32_t kMinimumNumberOfNotificationBeforeWake =
- kMinimumNumberOfMethodsToSave;
-static constexpr const uint32_t kMaximumNumberOfNotificationBeforeWake = 50;
-
-
ProfileSaver* ProfileSaver::instance_ = nullptr;
pthread_t ProfileSaver::profiler_pthread_ = 0U;
-ProfileSaver::ProfileSaver(const std::string& output_filename,
+ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
+ const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths,
const std::string& foreign_dex_profile_path,
@@ -72,7 +58,9 @@ ProfileSaver::ProfileSaver(const std::string& output_filename,
total_number_of_foreign_dex_marks_(0),
max_number_of_profile_entries_cached_(0),
total_number_of_hot_spikes_(0),
- total_number_of_wake_ups_(0) {
+ total_number_of_wake_ups_(0),
+ options_(options) {
+ DCHECK(options_.IsEnabled());
AddTrackedLocations(output_filename, app_data_dir, code_paths);
if (!app_data_dir.empty()) {
// The application directory is used to determine which dex files are owned by app.
@@ -93,14 +81,13 @@ void ProfileSaver::Run() {
Thread* self = Thread::Current();
// Fetch the resolved classes for the app images after sleeping for
- // kSaveResolvedClassesDelayMs.
+ // options_.GetSaveResolvedClassesDelayMs().
// TODO(calin) This only considers the case of the primary profile file.
// Anything that gets loaded in the same VM will not have their resolved
// classes save (unless they started before the initial saving was done).
{
MutexLock mu(self, wait_lock_);
- constexpr uint64_t kSleepTime = kSaveResolvedClassesDelayMs;
- const uint64_t end_time = NanoTime() + MsToNs(kSleepTime);
+ const uint64_t end_time = NanoTime() + MsToNs(options_.GetSaveResolvedClassesDelayMs());
while (true) {
const uint64_t current_time = NanoTime();
if (current_time >= end_time) {
@@ -108,7 +95,7 @@ void ProfileSaver::Run() {
}
period_condition_.TimedWait(self, NsToMs(end_time - current_time), 0);
}
- total_ms_of_sleep_ += kSaveResolvedClassesDelayMs;
+ total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
}
FetchAndCacheResolvedClassesAndMethods();
@@ -130,10 +117,11 @@ void ProfileSaver::Run() {
// We might have been woken up by a huge number of notifications to guarantee saving.
// If we didn't meet the minimum saving period go back to sleep (only if missed by
// a reasonable margin).
- while (kMinSavePeriodNs * 0.9 > sleep_time) {
+ uint64_t min_save_period_ns = MsToNs(options_.GetMinSavePeriodMs());
+ while (min_save_period_ns * 0.9 > sleep_time) {
{
MutexLock mu(self, wait_lock_);
- period_condition_.TimedWait(self, NsToMs(kMinSavePeriodNs - sleep_time), 0);
+ period_condition_.TimedWait(self, NsToMs(min_save_period_ns - sleep_time), 0);
sleep_time = NanoTime() - sleep_start;
}
// Check if the thread was woken up for shutdown.
@@ -183,12 +171,12 @@ void ProfileSaver::NotifyJitActivityInternal() {
jit_activity_notifications_++;
// Note that we are not as precise as we could be here but we don't want to wake the saver
// every time we see a hot method.
- if (jit_activity_notifications_ > kMinimumNumberOfNotificationBeforeWake) {
+ if (jit_activity_notifications_ > options_.GetMinNotificationBeforeWake()) {
MutexLock wait_mutex(Thread::Current(), wait_lock_);
- if ((NanoTime() - last_time_ns_saver_woke_up_) > kMinSavePeriodNs) {
+ if ((NanoTime() - last_time_ns_saver_woke_up_) > MsToNs(options_.GetMinSavePeriodMs())) {
WakeUpSaver();
}
- } else if (jit_activity_notifications_ > kMaximumNumberOfNotificationBeforeWake) {
+ } else if (jit_activity_notifications_ > options_.GetMaxNotificationBeforeWake()) {
// Make sure to wake up the saver if we see a spike in the number of notifications.
// This is a precaution to avoid "loosing" a big number of methods in case
// this is a spike with no jit after.
@@ -210,7 +198,9 @@ ProfileCompilationInfo* ProfileSaver::GetCachedProfiledInfo(const std::string& f
// Excludes native methods and classes in the boot image.
class GetMethodsVisitor : public ClassVisitor {
public:
- explicit GetMethodsVisitor(std::vector<MethodReference>* methods) : methods_(methods) {}
+ GetMethodsVisitor(std::vector<MethodReference>* methods, uint32_t startup_method_samples)
+ : methods_(methods),
+ startup_method_samples_(startup_method_samples) {}
virtual bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
@@ -218,7 +208,7 @@ class GetMethodsVisitor : public ClassVisitor {
}
for (ArtMethod& method : klass->GetMethods(sizeof(void*))) {
if (!method.IsNative()) {
- if (method.GetCounter() >= kStartupMethodSamples ||
+ if (method.GetCounter() >= startup_method_samples_ ||
method.GetProfilingInfo(sizeof(void*)) != nullptr) {
// Have samples, add to profile.
const DexFile* dex_file = method.GetInterfaceMethodIfProxy(sizeof(void*))->GetDexFile();
@@ -231,6 +221,7 @@ class GetMethodsVisitor : public ClassVisitor {
private:
std::vector<MethodReference>* const methods_;
+ uint32_t startup_method_samples_;
};
void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
@@ -242,11 +233,11 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
std::vector<MethodReference> methods;
{
ScopedTrace trace2("Get hot methods");
- GetMethodsVisitor visitor(&methods);
+ GetMethodsVisitor visitor(&methods, options_.GetStartupMethodSamples());
ScopedObjectAccess soa(Thread::Current());
class_linker->VisitClasses(&visitor);
VLOG(profiler) << "Methods with samples greater than "
- << kStartupMethodSamples << " = " << methods.size();
+ << options_.GetStartupMethodSamples() << " = " << methods.size();
}
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
uint64_t total_number_of_profile_entries_cached = 0;
@@ -315,11 +306,11 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) {
cached_info->GetNumberOfResolvedClasses() -
static_cast<int64_t>(last_save_number_of_classes_);
- if (delta_number_of_methods < kMinimumNumberOfMethodsToSave &&
- delta_number_of_classes < kMinimumNumberOfClassesToSave) {
+ if (delta_number_of_methods < options_.GetMinMethodsToSave() &&
+ delta_number_of_classes < options_.GetMinClassesToSave()) {
VLOG(profiler) << "Not enough information to save to: " << filename
- << " Nr of methods: " << delta_number_of_methods
- << " Nr of classes: " << delta_number_of_classes;
+ << " Number of methods: " << delta_number_of_methods
+ << " Number of classes: " << delta_number_of_classes;
total_number_of_skipped_writes_++;
continue;
}
@@ -398,12 +389,14 @@ static bool ShouldProfileLocation(const std::string& location) {
return true;
}
-void ProfileSaver::Start(const std::string& output_filename,
+void ProfileSaver::Start(const ProfileSaverOptions& options,
+ const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths,
const std::string& foreign_dex_profile_path,
const std::string& app_data_dir) {
- DCHECK(Runtime::Current()->SaveProfileInfo());
+ DCHECK(options.IsEnabled());
+ DCHECK(Runtime::Current()->GetJit() != nullptr);
DCHECK(!output_filename.empty());
DCHECK(jit_code_cache != nullptr);
@@ -433,7 +426,8 @@ void ProfileSaver::Start(const std::string& output_filename,
VLOG(profiler) << "Starting profile saver using output file: " << output_filename
<< ". Tracking: " << Join(code_paths_to_profile, ':');
- instance_ = new ProfileSaver(output_filename,
+ instance_ = new ProfileSaver(options,
+ output_filename,
jit_code_cache,
code_paths_to_profile,
foreign_dex_profile_path,
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 9c6d0fa1cf..59e2c94790 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -20,6 +20,7 @@
#include "base/mutex.h"
#include "jit_code_cache.h"
#include "offline_profiling_info.h"
+#include "profile_saver_options.h"
#include "safe_map.h"
namespace art {
@@ -28,7 +29,8 @@ class ProfileSaver {
public:
// Starts the profile saver thread if not already started.
// If the saver is already running it adds (output_filename, code_paths) to its tracked locations.
- static void Start(const std::string& output_filename,
+ static void Start(const ProfileSaverOptions& options,
+ const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths,
const std::string& foreign_dex_profile_path,
@@ -61,7 +63,8 @@ class ProfileSaver {
uint16_t method_idx);
private:
- ProfileSaver(const std::string& output_filename,
+ ProfileSaver(const ProfileSaverOptions& options,
+ const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
const std::vector<std::string>& code_paths,
const std::string& foreign_dex_profile_path,
@@ -155,6 +158,7 @@ class ProfileSaver {
uint64_t total_number_of_hot_spikes_;
uint64_t total_number_of_wake_ups_;
+ const ProfileSaverOptions options_;
DISALLOW_COPY_AND_ASSIGN(ProfileSaver);
};
diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h
new file mode 100644
index 0000000000..a6385d7469
--- /dev/null
+++ b/runtime/jit/profile_saver_options.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_PROFILE_SAVER_OPTIONS_H_
+#define ART_RUNTIME_JIT_PROFILE_SAVER_OPTIONS_H_
+
+#include <string>
+#include <ostream>
+
+namespace art {
+
+struct ProfileSaverOptions {
+ public:
+ static constexpr uint32_t kMinSavePeriodMs = 20 * 1000; // 20 seconds
+ static constexpr uint32_t kSaveResolvedClassesDelayMs = 2 * 1000; // 2 seconds
+ // Minimum number of JIT samples during launch to include a method into the profile.
+ static constexpr uint32_t kStartupMethodSamples = 1;
+ static constexpr uint32_t kMinMethodsToSave = 10;
+ static constexpr uint32_t kMinClassesToSave = 10;
+ static constexpr uint32_t kMinNotificationBeforeWake = 10;
+ static constexpr uint32_t kMaxNotificationBeforeWake = 50;
+
+ ProfileSaverOptions() :
+ enabled_(false),
+ min_save_period_ms_(kMinSavePeriodMs),
+ save_resolved_classes_delay_ms_(kSaveResolvedClassesDelayMs),
+ startup_method_samples_(kStartupMethodSamples),
+ min_methods_to_save_(kMinMethodsToSave),
+ min_classes_to_save_(kMinClassesToSave),
+ min_notification_before_wake_(kMinNotificationBeforeWake),
+ max_notification_before_wake_(kMaxNotificationBeforeWake) {}
+
+ ProfileSaverOptions(
+ bool enabled,
+ uint32_t min_save_period_ms,
+ uint32_t save_resolved_classes_delay_ms,
+ uint32_t startup_method_samples,
+ uint32_t min_methods_to_save,
+ uint32_t min_classes_to_save,
+ uint32_t min_notification_before_wake,
+ uint32_t max_notification_before_wake):
+ enabled_(enabled),
+ min_save_period_ms_(min_save_period_ms),
+ save_resolved_classes_delay_ms_(save_resolved_classes_delay_ms),
+ startup_method_samples_(startup_method_samples),
+ min_methods_to_save_(min_methods_to_save),
+ min_classes_to_save_(min_classes_to_save),
+ min_notification_before_wake_(min_notification_before_wake),
+ max_notification_before_wake_(max_notification_before_wake) {}
+
+ bool IsEnabled() const {
+ return enabled_;
+ }
+ void SetEnabled(bool enabled) {
+ enabled_ = enabled;
+ }
+
+ uint32_t GetMinSavePeriodMs() const {
+ return min_save_period_ms_;
+ }
+ uint32_t GetSaveResolvedClassesDelayMs() const {
+ return save_resolved_classes_delay_ms_;
+ }
+ uint32_t GetStartupMethodSamples() const {
+ return startup_method_samples_;
+ }
+ uint32_t GetMinMethodsToSave() const {
+ return min_methods_to_save_;
+ }
+ uint32_t GetMinClassesToSave() const {
+ return min_classes_to_save_;
+ }
+ uint32_t GetMinNotificationBeforeWake() const {
+ return min_notification_before_wake_;
+ }
+ uint32_t GetMaxNotificationBeforeWake() const {
+ return max_notification_before_wake_;
+ }
+
+ friend std::ostream & operator<<(std::ostream &os, const ProfileSaverOptions& pso) {
+ os << "enabled_" << pso.enabled_
+ << ", min_save_period_ms_" << pso.min_save_period_ms_
+ << ", save_resolved_classes_delay_ms_" << pso.save_resolved_classes_delay_ms_
+ << ", startup_method_samples_" << pso.startup_method_samples_
+ << ", min_methods_to_save_" << pso.min_methods_to_save_
+ << ", min_classes_to_save_" << pso.min_classes_to_save_
+ << ", min_notification_before_wake_" << pso.min_notification_before_wake_
+ << ", max_notification_before_wake_" << pso.max_notification_before_wake_;
+ return os;
+ }
+
+ bool enabled_;
+ uint32_t min_save_period_ms_;
+ uint32_t save_resolved_classes_delay_ms_;
+ uint32_t startup_method_samples_;
+ uint32_t min_methods_to_save_;
+ uint32_t min_classes_to_save_;
+ uint32_t min_notification_before_wake_;
+ uint32_t max_notification_before_wake_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_PROFILE_SAVER_OPTIONS_H_
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index b783a019e7..cefd9f0315 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -247,19 +247,38 @@ inline void Class::SetVTable(PointerArray* new_vtable) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
}
+inline MemberOffset Class::EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size) {
+ DCHECK_LT(i, kImtSize);
+ return MemberOffset(
+ EmbeddedImTableOffset(pointer_size).Uint32Value() + i * ImTableEntrySize(pointer_size));
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) {
+ DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()));
+ return GetFieldPtrWithSize<ArtMethod*>(
+ EmbeddedImTableEntryOffset(i, pointer_size), pointer_size);
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
+ DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()));
+ SetFieldPtrWithSize<false>(EmbeddedImTableEntryOffset(i, pointer_size), method, pointer_size);
+}
+
inline bool Class::HasVTable() {
- return GetVTable() != nullptr || ShouldHaveEmbeddedVTable();
+ return GetVTable() != nullptr || ShouldHaveEmbeddedImtAndVTable();
}
inline int32_t Class::GetVTableLength() {
- if (ShouldHaveEmbeddedVTable()) {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
return GetEmbeddedVTableLength();
}
return GetVTable() != nullptr ? GetVTable()->GetLength() : 0;
}
inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) {
- if (ShouldHaveEmbeddedVTable()) {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
return GetEmbeddedVTableEntry(i, pointer_size);
}
auto* vtable = GetVTable();
@@ -275,14 +294,6 @@ inline void Class::SetEmbeddedVTableLength(int32_t len) {
SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len);
}
-inline ImTable* Class::GetImt(size_t pointer_size) {
- return GetFieldPtrWithSize<ImTable*>(MemberOffset(ImtPtrOffset(pointer_size)), pointer_size);
-}
-
-inline void Class::SetImt(ImTable* imt, size_t pointer_size) {
- return SetFieldPtrWithSize<false>(MemberOffset(ImtPtrOffset(pointer_size)), imt, pointer_size);
-}
-
inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) {
return MemberOffset(
EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size));
@@ -530,7 +541,7 @@ template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) {
DCHECK(IsResolved());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+ if (ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()) {
// Static fields come after the embedded tables.
base = mirror::Class::ComputeClassSize(
true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
@@ -541,7 +552,7 @@ inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_siz
inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) {
DCHECK(IsLoaded());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (ShouldHaveEmbeddedVTable()) {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
// Static fields come after the embedded tables.
base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
0, 0, 0, 0, 0, pointer_size);
@@ -700,7 +711,7 @@ inline Object* Class::AllocNonMovableObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
}
-inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable,
+inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
uint32_t num_vtable_entries,
uint32_t num_8bit_static_fields,
uint32_t num_16bit_static_fields,
@@ -711,10 +722,11 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable,
// Space used by java.lang.Class and its instance fields.
uint32_t size = sizeof(Class);
// Space used by embedded tables.
- if (has_embedded_vtable) {
- size = RoundUp(size + sizeof(uint32_t), pointer_size);
- size += pointer_size; // size of pointer to IMT
- size += num_vtable_entries * VTableEntrySize(pointer_size);
+ if (has_embedded_tables) {
+ const uint32_t embedded_imt_size = kImtSize * ImTableEntrySize(pointer_size);
+ const uint32_t embedded_vtable_size = num_vtable_entries * VTableEntrySize(pointer_size);
+ size = RoundUp(size + sizeof(uint32_t) /* embedded vtable len */, pointer_size) +
+ embedded_imt_size + embedded_vtable_size;
}
// Space used by reference statics.
@@ -978,9 +990,18 @@ inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() {
return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
+inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ // Round up since we want the embedded imt and vtable to be pointer size aligned in case 64 bits.
+ // Add 32 bits for embedded vtable length.
+ return MemberOffset(
+ RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
+}
+
inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) {
CheckPointerSize(pointer_size);
- return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + pointer_size);
+ return MemberOffset(EmbeddedImTableOffset(pointer_size).Uint32Value() +
+ kImtSize * ImTableEntrySize(pointer_size));
}
inline void Class::CheckPointerSize(size_t pointer_size) {
@@ -1065,7 +1086,7 @@ inline void Class::FixupNativePointers(mirror::Class* dest,
dest->SetDexCacheStrings(new_strings);
}
// Fix up embedded tables.
- if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
+ if (!IsTemp() && ShouldHaveEmbeddedImtAndVTable<kVerifyNone, kReadBarrierOption>()) {
for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
ArtMethod* new_method = visitor(method);
@@ -1073,9 +1094,16 @@ inline void Class::FixupNativePointers(mirror::Class* dest,
dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
}
}
- }
- if (!IsTemp() && ShouldHaveImt<kVerifyNone, kReadBarrierOption>()) {
- dest->SetImt(visitor(GetImt(pointer_size)), pointer_size);
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ ArtMethod* method = GetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i,
+ pointer_size);
+ ArtMethod* new_method = visitor(method);
+ if (method != new_method) {
+ dest->SetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i,
+ new_method,
+ pointer_size);
+ }
+ }
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 9c77d3814c..b4a23badba 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -914,7 +914,13 @@ const DexFile::TypeList* Class::GetInterfaceTypeList() {
return GetDexFile().GetInterfacesList(*class_def);
}
-void Class::PopulateEmbeddedVTable(size_t pointer_size) {
+void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize],
+ size_t pointer_size) {
+ for (size_t i = 0; i < kImtSize; i++) {
+ auto method = methods[i];
+ DCHECK(method != nullptr);
+ SetEmbeddedImTableEntry(i, method, pointer_size);
+ }
PointerArray* table = GetVTableDuringLinking();
CHECK(table != nullptr) << PrettyClass(this);
const size_t table_length = table->GetLength();
@@ -961,7 +967,7 @@ class ReadBarrierOnNativeRootsVisitor {
class CopyClassVisitor {
public:
CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length,
- size_t copy_bytes, ImTable* imt,
+ size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize],
size_t pointer_size)
: self_(self), orig_(orig), new_length_(new_length),
copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
@@ -973,8 +979,7 @@ class CopyClassVisitor {
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
- h_new_class_obj->PopulateEmbeddedVTable(pointer_size_);
- h_new_class_obj->SetImt(imt_, pointer_size_);
+ h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_);
h_new_class_obj->SetClassSize(new_length_);
// Visit all of the references to make sure there is no from space references in the native
// roots.
@@ -987,13 +992,13 @@ class CopyClassVisitor {
Handle<mirror::Class>* const orig_;
const size_t new_length_;
const size_t copy_bytes_;
- ImTable* imt_;
+ ArtMethod* const (&imt_)[mirror::Class::kImtSize];
const size_t pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
};
Class* Class::CopyOf(Thread* self, int32_t new_length,
- ImTable* imt, size_t pointer_size) {
+ ArtMethod* const (&imt)[mirror::Class::kImtSize], size_t pointer_size) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 2adf54ab86..5235a3e8df 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -22,7 +22,6 @@
#include "class_flags.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
-#include "imtable.h"
#include "invoke_type.h"
#include "modifiers.h"
#include "object.h"
@@ -34,6 +33,10 @@
#include "thread.h"
#include "utils.h"
+#ifndef IMT_SIZE
+#error IMT_SIZE not defined
+#endif
+
namespace art {
class ArtField;
@@ -63,6 +66,11 @@ class MANAGED Class FINAL : public Object {
// 2 ref instance fields.]
static constexpr uint32_t kClassWalkSuper = 0xC0000000;
+ // Interface method table size. Increasing this value reduces the chance of two interface methods
+ // colliding in the interface method table but increases the size of classes that implement
+ // (non-marker) interfaces.
+ static constexpr size_t kImtSize = IMT_SIZE;
+
// Class Status
//
// kStatusRetired: Class that's temporarily used till class linking time
@@ -343,7 +351,7 @@ class MANAGED Class FINAL : public Object {
// be replaced with a class with the right size for embedded imt/vtable.
bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
Status s = GetStatus();
- return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable();
+ return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable();
}
String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name.
@@ -549,7 +557,7 @@ class MANAGED Class FINAL : public Object {
SHARED_REQUIRES(Locks::mutator_lock_);
// Compute how many bytes would be used a class with the given elements.
- static uint32_t ComputeClassSize(bool has_embedded_vtable,
+ static uint32_t ComputeClassSize(bool has_embedded_tables,
uint32_t num_vtable_entries,
uint32_t num_8bit_static_fields,
uint32_t num_16bit_static_fields,
@@ -822,29 +830,28 @@ class MANAGED Class FINAL : public Object {
return MemberOffset(sizeof(Class));
}
- static MemberOffset ImtPtrOffset(size_t pointer_size) {
- return MemberOffset(
- RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
- }
-
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) {
- return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>() &&
- GetIfTable<kVerifyFlags, kReadBarrierOption>() != nullptr &&
- !IsArrayClass<kVerifyFlags, kReadBarrierOption>();
- }
-
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveEmbeddedVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
}
bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size);
+
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
@@ -854,10 +861,6 @@ class MANAGED Class FINAL : public Object {
void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
- ImTable* GetImt(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- void SetImt(ImTable* imt, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -867,7 +870,7 @@ class MANAGED Class FINAL : public Object {
inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void PopulateEmbeddedVTable(size_t pointer_size)
+ void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
@@ -1192,7 +1195,7 @@ class MANAGED Class FINAL : public Object {
void AssertInitializedOrInitializingInThread(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_);
- Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
+ Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize],
size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -1319,7 +1322,10 @@ class MANAGED Class FINAL : public Object {
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
+
+ static MemberOffset EmbeddedImTableOffset(size_t pointer_size);
static MemberOffset EmbeddedVTableOffset(size_t pointer_size);
+
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 8c7c966102..46be5e6c80 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -33,7 +33,6 @@
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
#include "os.h"
-#include "profiler.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index bb7b40828e..d55e373419 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -28,7 +28,6 @@
#include "compiler_filter.h"
#include "oat_file.h"
#include "os.h"
-#include "profiler.h"
namespace art {
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index e3cc77f2c0..a1d3ed9241 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "oat_file_assistant.h"
-
#include <algorithm>
#include <fstream>
#include <string>
@@ -29,8 +27,10 @@
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
+#include "dex2oat_environment_test.h"
#include "gc/space/image_space.h"
#include "mem_map.h"
+#include "oat_file_assistant.h"
#include "oat_file_manager.h"
#include "os.h"
#include "scoped_thread_state_change.h"
@@ -39,52 +39,11 @@
namespace art {
-class OatFileAssistantTest : public CommonRuntimeTest {
+class OatFileAssistantTest : public Dex2oatEnvironmentTest {
public:
- virtual void SetUp() {
+ virtual void SetUp() OVERRIDE {
ReserveImageSpace();
- CommonRuntimeTest::SetUp();
-
- // Create a scratch directory to work from.
- scratch_dir_ = android_data_ + "/OatFileAssistantTest";
- ASSERT_EQ(0, mkdir(scratch_dir_.c_str(), 0700));
-
- // Create a subdirectory in scratch for odex files.
- odex_oat_dir_ = scratch_dir_ + "/oat";
- ASSERT_EQ(0, mkdir(odex_oat_dir_.c_str(), 0700));
-
- odex_dir_ = odex_oat_dir_ + "/" + std::string(GetInstructionSetString(kRuntimeISA));
- ASSERT_EQ(0, mkdir(odex_dir_.c_str(), 0700));
-
- // Verify the environment is as we expect
- uint32_t checksum;
- std::string error_msg;
- ASSERT_TRUE(OS::FileExists(GetSystemImageFile().c_str()))
- << "Expected pre-compiled boot image to be at: " << GetSystemImageFile();
- ASSERT_TRUE(OS::FileExists(GetDexSrc1().c_str()))
- << "Expected dex file to be at: " << GetDexSrc1();
- ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
- << "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
- ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, &error_msg))
- << "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
- ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
- << "Expected dex file to be at: " << GetDexSrc2();
-
- // GetMultiDexSrc2 should have the same primary dex checksum as
- // GetMultiDexSrc1, but a different secondary dex checksum.
- static constexpr bool kVerifyChecksum = true;
- std::vector<std::unique_ptr<const DexFile>> multi1;
- ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(),
- GetMultiDexSrc1().c_str(), kVerifyChecksum, &error_msg, &multi1)) << error_msg;
- ASSERT_GT(multi1.size(), 1u);
-
- std::vector<std::unique_ptr<const DexFile>> multi2;
- ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(),
- GetMultiDexSrc2().c_str(), kVerifyChecksum, &error_msg, &multi2)) << error_msg;
- ASSERT_GT(multi2.size(), 1u);
-
- ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
- ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
+ Dex2oatEnvironmentTest::SetUp();
}
// Pre-Relocate the image to a known non-zero offset so we don't have to
@@ -108,17 +67,6 @@ class OatFileAssistantTest : public CommonRuntimeTest {
return Exec(argv, error_msg);
}
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
- // options->push_back(std::make_pair("-verbose:oat", nullptr));
-
- // Set up the image location.
- options->push_back(std::make_pair("-Ximage:" + GetImageLocation(),
- nullptr));
- // Make sure compilercallbacks are not set so that relocation will be
- // enabled.
- callbacks_.reset();
- }
-
virtual void PreRuntimeCreate() {
std::string error_msg;
ASSERT_TRUE(PreRelocateImage(&error_msg)) << error_msg;
@@ -126,94 +74,10 @@ class OatFileAssistantTest : public CommonRuntimeTest {
UnreserveImageSpace();
}
- virtual void PostRuntimeCreate() {
+ virtual void PostRuntimeCreate() OVERRIDE {
ReserveImageSpace();
}
- virtual void TearDown() {
- ClearDirectory(odex_dir_.c_str());
- ASSERT_EQ(0, rmdir(odex_dir_.c_str()));
-
- ClearDirectory(odex_oat_dir_.c_str());
- ASSERT_EQ(0, rmdir(odex_oat_dir_.c_str()));
-
- ClearDirectory(scratch_dir_.c_str());
- ASSERT_EQ(0, rmdir(scratch_dir_.c_str()));
-
- CommonRuntimeTest::TearDown();
- }
-
- void Copy(std::string src, std::string dst) {
- std::ifstream src_stream(src, std::ios::binary);
- std::ofstream dst_stream(dst, std::ios::binary);
-
- dst_stream << src_stream.rdbuf();
- }
-
- // Returns the directory where the pre-compiled core.art can be found.
- // TODO: We should factor out this into common tests somewhere rather than
- // re-hardcoding it here (This was copied originally from the elf writer
- // test).
- std::string GetImageDirectory() {
- if (IsHost()) {
- const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != nullptr);
- return std::string(host_dir) + "/framework";
- } else {
- return std::string("/data/art-test");
- }
- }
-
- std::string GetImageLocation() {
- return GetImageDirectory() + "/core.art";
- }
-
- std::string GetSystemImageFile() {
- return GetImageDirectory() + "/" + GetInstructionSetString(kRuntimeISA)
- + "/core.art";
- }
-
- bool GetCachedImageFile(/*out*/std::string* image, std::string* error_msg) {
- std::string cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA), true);
- return GetDalvikCacheFilename(GetImageLocation().c_str(), cache.c_str(), image, error_msg);
- }
-
- std::string GetDexSrc1() {
- return GetTestDexFileName("Main");
- }
-
- // Returns the path to a dex file equivalent to GetDexSrc1, but with the dex
- // file stripped.
- std::string GetStrippedDexSrc1() {
- return GetTestDexFileName("MainStripped");
- }
-
- std::string GetMultiDexSrc1() {
- return GetTestDexFileName("MultiDex");
- }
-
- // Returns the path to a multidex file equivalent to GetMultiDexSrc2, but
- // with the contents of the secondary dex file changed.
- std::string GetMultiDexSrc2() {
- return GetTestDexFileName("MultiDexModifiedSecondary");
- }
-
- std::string GetDexSrc2() {
- return GetTestDexFileName("Nested");
- }
-
- // Scratch directory, for dex and odex files (oat files will go in the
- // dalvik cache).
- std::string GetScratchDir() {
- return scratch_dir_;
- }
-
- // Odex directory is the subdirectory in the scratch directory where odex
- // files should be located.
- std::string GetOdexDir() {
- return odex_dir_;
- }
-
// Generate a non-PIC odex file for the purposes of test.
// The generated odex file will be un-relocated.
void GenerateOdexForTest(const std::string& dex_location,
@@ -334,9 +198,6 @@ class OatFileAssistantTest : public CommonRuntimeTest {
image_reservation_.clear();
}
- std::string scratch_dir_;
- std::string odex_oat_dir_;
- std::string odex_dir_;
std::vector<std::unique_ptr<MemMap>> image_reservation_;
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index eac5b43ff2..595a47bb36 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -176,8 +176,13 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.WithType<unsigned int>()
.IntoKey(M::JITInvokeTransitionWeight)
.Define("-Xjitsaveprofilinginfo")
- .WithValue(true)
- .IntoKey(M::JITSaveProfilingInfo)
+ .WithType<ProfileSaverOptions>()
+ .AppendValues()
+ .IntoKey(M::ProfileSaverOpts)
+ .Define("-Xps-_") // profile saver options -Xps-<key>:<value>
+ .WithType<ProfileSaverOptions>()
+ .AppendValues()
+ .IntoKey(M::ProfileSaverOpts) // NOTE: Appends into same key as -Xjitsaveprofilinginfo
.Define("-XX:HspaceCompactForOOMMinIntervalMs=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::HSpaceCompactForOOMMinIntervalsMs)
@@ -244,14 +249,6 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
{"wallclock", TraceClockSource::kWall},
{"dualclock", TraceClockSource::kDual}})
.IntoKey(M::ProfileClock)
- .Define("-Xenable-profiler")
- .WithType<TestProfilerOptions>()
- .AppendValues()
- .IntoKey(M::ProfilerOpts) // NOTE: Appends into same key as -Xprofile-*
- .Define("-Xprofile-_") // -Xprofile-<key>:<value>
- .WithType<TestProfilerOptions>()
- .AppendValues()
- .IntoKey(M::ProfilerOpts) // NOTE: Appends into same key as -Xenable-profiler
.Define("-Xcompiler:_")
.WithType<std::string>()
.IntoKey(M::Compiler)
@@ -690,17 +687,13 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xmethod-trace\n");
UsageMessage(stream, " -Xmethod-trace-file:filename");
UsageMessage(stream, " -Xmethod-trace-file-size:integervalue\n");
- UsageMessage(stream, " -Xenable-profiler\n");
- UsageMessage(stream, " -Xprofile-filename:filename\n");
- UsageMessage(stream, " -Xprofile-period:integervalue\n");
- UsageMessage(stream, " -Xprofile-duration:integervalue\n");
- UsageMessage(stream, " -Xprofile-interval:integervalue\n");
- UsageMessage(stream, " -Xprofile-backoff:doublevalue\n");
- UsageMessage(stream, " -Xprofile-start-immediately\n");
- UsageMessage(stream, " -Xprofile-top-k-threshold:doublevalue\n");
- UsageMessage(stream, " -Xprofile-top-k-change-threshold:doublevalue\n");
- UsageMessage(stream, " -Xprofile-type:{method,stack}\n");
- UsageMessage(stream, " -Xprofile-max-stack-depth:integervalue\n");
+ UsageMessage(stream, " -Xps-min-save-period-ms:integervalue\n");
+ UsageMessage(stream, " -Xps-save-resolved-classes-delay-ms:integervalue\n");
+ UsageMessage(stream, " -Xps-startup-method-samples:integervalue\n");
+ UsageMessage(stream, " -Xps-min-methods-to-save:integervalue\n");
+ UsageMessage(stream, " -Xps-min-classes-to-save:integervalue\n");
+ UsageMessage(stream, " -Xps-min-notification-before-wake:integervalue\n");
+ UsageMessage(stream, " -Xps-max-notification-before-wake:integervalue\n");
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 5974fb6a6e..1f5beb9984 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -26,7 +26,7 @@
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "arch/instruction_set.h"
-#include "profiler_options.h"
+#include "jit/profile_saver_options.h"
#include "runtime_options.h"
namespace art {
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
deleted file mode 100644
index 6a77a9ed83..0000000000
--- a/runtime/profiler.cc
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "profiler.h"
-
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <sys/uio.h>
-
-#include <fstream>
-
-#include "art_method-inl.h"
-#include "base/stl_util.h"
-#include "base/time_utils.h"
-#include "base/unix_file/fd_file.h"
-#include "class_linker.h"
-#include "common_throws.h"
-#include "dex_file-inl.h"
-#include "instrumentation.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "os.h"
-#include "scoped_thread_state_change.h"
-#include "ScopedLocalRef.h"
-#include "thread.h"
-#include "thread_list.h"
-#include "utils.h"
-
-#include "entrypoints/quick/quick_entrypoints.h"
-
-namespace art {
-
-BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
-pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
-volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
-
-// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
-// wakelock or something to modify the run characteristics. This can be done when we
-// have some performance data after it's been used for a while.
-
-// Walk through the method within depth of max_depth_ on the Java stack
-class BoundedStackVisitor : public StackVisitor {
- public:
- BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack,
- Thread* thread,
- uint32_t max_depth)
- SHARED_REQUIRES(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- stack_(stack),
- max_depth_(max_depth),
- depth_(0) {}
-
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m->IsRuntimeMethod()) {
- return true;
- }
- uint32_t dex_pc_ = GetDexPc();
- stack_->push_back(std::make_pair(m, dex_pc_));
- ++depth_;
- if (depth_ < max_depth_) {
- return true;
- } else {
- return false;
- }
- }
-
- private:
- std::vector<std::pair<ArtMethod*, uint32_t>>* const stack_;
- const uint32_t max_depth_;
- uint32_t depth_;
-
- DISALLOW_COPY_AND_ASSIGN(BoundedStackVisitor);
-};
-
-// This is called from either a thread list traversal or from a checkpoint. Regardless
-// of which caller, the mutator lock must be held.
-static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
- BackgroundMethodSamplingProfiler* profiler =
- reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
- const ProfilerOptions profile_options = profiler->GetProfilerOptions();
- switch (profile_options.GetProfileType()) {
- case kProfilerMethod: {
- ArtMethod* method = thread->GetCurrentMethod(nullptr);
- if ((false) && method == nullptr) {
- LOG(INFO) << "No current method available";
- std::ostringstream os;
- thread->Dump(os);
- std::string data(os.str());
- LOG(INFO) << data;
- }
- profiler->RecordMethod(method);
- break;
- }
- case kProfilerBoundedStack: {
- std::vector<InstructionLocation> stack;
- uint32_t max_depth = profile_options.GetMaxStackDepth();
- BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
- bounded_stack_visitor.WalkStack();
- profiler->RecordStack(stack);
- break;
- }
- default:
- LOG(INFO) << "This profile type is not implemented.";
- }
-}
-
-// A closure that is called by the thread checkpoint code.
-class SampleCheckpoint FINAL : public Closure {
- public:
- explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
- profiler_(profiler) {}
-
- void Run(Thread* thread) OVERRIDE {
- Thread* self = Thread::Current();
- if (thread == nullptr) {
- LOG(ERROR) << "Checkpoint with nullptr thread";
- return;
- }
-
- // Grab the mutator lock (shared access).
- ScopedObjectAccess soa(self);
-
- // Grab a sample.
- GetSample(thread, this->profiler_);
-
- // And finally tell the barrier that we're done.
- this->profiler_->GetBarrier().Pass(self);
- }
-
- private:
- BackgroundMethodSamplingProfiler* const profiler_;
-};
-
-bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
- MutexLock mu(self, *Locks::profiler_lock_);
- return shutting_down_;
-}
-
-void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
- Runtime* runtime = Runtime::Current();
- BackgroundMethodSamplingProfiler* profiler =
- reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
-
- // Add a random delay for the first time run so that we don't hammer the CPU
- // with all profiles running at the same time.
- const int kRandomDelayMaxSecs = 30;
- const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
-
- srand(MicroTime() * getpid());
- int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
-
-
- CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsAotCompiler()));
-
- Thread* self = Thread::Current();
-
- double backoff = 1.0;
- while (true) {
- if (ShuttingDown(self)) {
- break;
- }
-
- {
- // wait until we need to run another profile
- uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
-
- // Add a startup delay to prevent all the profiles running at once.
- delay_secs += startup_delay;
-
- // Immediate startup for benchmarking?
- if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
- delay_secs = 0;
- }
-
- startup_delay = 0;
-
- VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
- MutexLock mu(self, profiler->wait_lock_);
- profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
- // We were either signaled by Stop or timedout, in either case ignore the timed out result.
-
- // Expand the backoff by its coefficient, but don't go beyond the max.
- backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
- }
-
- if (ShuttingDown(self)) {
- break;
- }
-
-
- uint64_t start_us = MicroTime();
- uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
- uint64_t now_us = start_us;
-
- VLOG(profiler) << "Starting profiling run now for "
- << PrettyDuration((end_us - start_us) * 1000);
-
- SampleCheckpoint check_point(profiler);
-
- size_t valid_samples = 0;
- while (now_us < end_us) {
- if (ShuttingDown(self)) {
- break;
- }
-
- usleep(profiler->options_.GetIntervalUs()); // Non-interruptible sleep.
-
- ThreadList* thread_list = runtime->GetThreadList();
-
- profiler->profiler_barrier_->Init(self, 0);
- size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
-
- // All threads are suspended, nothing to do.
- if (barrier_count == 0) {
- now_us = MicroTime();
- continue;
- }
-
- valid_samples += barrier_count;
-
- ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
-
- // Wait for the barrier to be crossed by all runnable threads. This wait
- // is done with a timeout so that we can detect problems with the checkpoint
- // running code. We should never see this.
- const uint32_t kWaitTimeoutMs = 10000;
-
- // Wait for all threads to pass the barrier.
- bool timed_out = profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
-
- // We should never get a timeout. If we do, it suggests a problem with the checkpoint
- // code. Crash the process in this case.
- CHECK(!timed_out);
-
- // Update the current time.
- now_us = MicroTime();
- }
-
- if (valid_samples > 0) {
- // After the profile has been taken, write it out.
- ScopedObjectAccess soa(self); // Acquire the mutator lock.
- uint32_t size = profiler->WriteProfile();
- VLOG(profiler) << "Profile size: " << size;
- }
- }
-
- LOG(INFO) << "Profiler shutdown";
- runtime->DetachCurrentThread();
- return nullptr;
-}
-
-// Write out the profile file if we are generating a profile.
-uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
- std::string full_name = output_filename_;
- VLOG(profiler) << "Saving profile to " << full_name;
-
- int fd = open(full_name.c_str(), O_RDWR);
- if (fd < 0) {
- // Open failed.
- LOG(ERROR) << "Failed to open profile file " << full_name;
- return 0;
- }
-
- // Lock the file for exclusive access. This will block if another process is using
- // the file.
- int err = flock(fd, LOCK_EX);
- if (err < 0) {
- LOG(ERROR) << "Failed to lock profile file " << full_name;
- return 0;
- }
-
- // Read the previous profile.
- profile_table_.ReadPrevious(fd, options_.GetProfileType());
-
- // Move back to the start of the file.
- lseek(fd, 0, SEEK_SET);
-
- // Format the profile output and write to the file.
- std::ostringstream os;
- uint32_t num_methods = DumpProfile(os);
- std::string data(os.str());
- const char *p = data.c_str();
- size_t length = data.length();
- size_t full_length = length;
- do {
- int n = ::write(fd, p, length);
- p += n;
- length -= n;
- } while (length > 0);
-
- // Truncate the file to the new length.
- if (ftruncate(fd, full_length) == -1) {
- LOG(ERROR) << "Failed to truncate profile file " << full_name;
- }
-
- // Now unlock the file, allowing another process in.
- err = flock(fd, LOCK_UN);
- if (err < 0) {
- LOG(ERROR) << "Failed to unlock profile file " << full_name;
- }
-
- // Done, close the file.
- ::close(fd);
-
- // Clean the profile for the next time.
- CleanProfile();
-
- return num_methods;
-}
-
-bool BackgroundMethodSamplingProfiler::Start(
- const std::string& output_filename, const ProfilerOptions& options) {
- if (!options.IsEnabled()) {
- return false;
- }
-
- CHECK(!output_filename.empty());
-
- Thread* self = Thread::Current();
- {
- MutexLock mu(self, *Locks::profiler_lock_);
- // Don't start two profiler threads.
- if (profiler_ != nullptr) {
- return true;
- }
- }
-
- LOG(INFO) << "Starting profiler using output file: " << output_filename
- << " and options: " << options;
- {
- MutexLock mu(self, *Locks::profiler_lock_);
- profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
-
- CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
- reinterpret_cast<void*>(profiler_)),
- "Profiler thread");
- }
- return true;
-}
-
-
-
-void BackgroundMethodSamplingProfiler::Stop() {
- BackgroundMethodSamplingProfiler* profiler = nullptr;
- pthread_t profiler_pthread = 0U;
- {
- MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
- CHECK(!shutting_down_);
- profiler = profiler_;
- shutting_down_ = true;
- profiler_pthread = profiler_pthread_;
- }
-
- // Now wake up the sampler thread if it sleeping.
- {
- MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
- profiler->period_condition_.Signal(Thread::Current());
- }
- // Wait for the sample thread to stop.
- CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
-
- {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- profiler_ = nullptr;
- }
- delete profiler;
-}
-
-
-void BackgroundMethodSamplingProfiler::Shutdown() {
- Stop();
-}
-
-BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
- const std::string& output_filename, const ProfilerOptions& options)
- : output_filename_(output_filename),
- options_(options),
- wait_lock_("Profile wait lock"),
- period_condition_("Profile condition", wait_lock_),
- profile_table_(wait_lock_),
- profiler_barrier_(new Barrier(0)) {
- // Populate the filtered_methods set.
- // This is empty right now, but to add a method, do this:
- //
- // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
-}
-
-// Filter out methods the profiler doesn't want to record.
-// We require mutator lock since some statistics will be updated here.
-bool BackgroundMethodSamplingProfiler::ProcessMethod(ArtMethod* method) {
- if (method == nullptr) {
- profile_table_.NullMethod();
- // Don't record a null method.
- return false;
- }
-
- mirror::Class* cls = method->GetDeclaringClass();
- if (cls != nullptr) {
- if (cls->GetClassLoader() == nullptr) {
- // Don't include things in the boot
- profile_table_.BootMethod();
- return false;
- }
- }
-
- bool is_filtered = false;
-
- if (strcmp(method->GetName(), "<clinit>") == 0) {
- // always filter out class init
- is_filtered = true;
- }
-
- // Filter out methods by name if there are any.
- if (!is_filtered && filtered_methods_.size() > 0) {
- std::string method_full_name = PrettyMethod(method);
-
- // Don't include specific filtered methods.
- is_filtered = filtered_methods_.count(method_full_name) != 0;
- }
- return !is_filtered;
-}
-
-// A method has been hit, record its invocation in the method map.
-// The mutator_lock must be held (shared) when this is called.
-void BackgroundMethodSamplingProfiler::RecordMethod(ArtMethod* method) {
- // Add to the profile table unless it is filtered out.
- if (ProcessMethod(method)) {
- profile_table_.Put(method);
- }
-}
-
-// Record the current bounded stack into sampling results.
-void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
- if (stack.size() == 0) {
- return;
- }
- // Get the method on top of the stack. We use this method to perform filtering.
- ArtMethod* method = stack.front().first;
- if (ProcessMethod(method)) {
- profile_table_.PutStack(stack);
- }
-}
-
-// Clean out any recordings for the method traces.
-void BackgroundMethodSamplingProfiler::CleanProfile() {
- profile_table_.Clear();
-}
-
-uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
- return profile_table_.Write(os, options_.GetProfileType());
-}
-
-// Profile Table.
-// This holds a mapping of ArtMethod* to a count of how many times a sample
-// hit it at the top of the stack.
-ProfileSampleResults::ProfileSampleResults(Mutex& lock)
- : lock_(lock),
- num_samples_(0U),
- num_null_methods_(0U),
- num_boot_methods_(0U),
- previous_num_samples_(0U),
- previous_num_null_methods_(0U),
- previous_num_boot_methods_(0U) {
- for (int i = 0; i < kHashSize; i++) {
- table[i] = nullptr;
- }
- method_context_table = nullptr;
- stack_trie_root_ = nullptr;
-}
-
-ProfileSampleResults::~ProfileSampleResults() {
- Clear();
-}
-
-// Add a method to the profile table. If it's the first time the method
-// has been seen, add it with count=1, otherwise increment the count.
-void ProfileSampleResults::Put(ArtMethod* method) {
- MutexLock mu(Thread::Current(), lock_);
- uint32_t index = Hash(method);
- if (table[index] == nullptr) {
- table[index] = new Map();
- }
- Map::iterator i = table[index]->find(method);
- if (i == table[index]->end()) {
- (*table[index])[method] = 1;
- } else {
- i->second++;
- }
- num_samples_++;
-}
-
-// Add a bounded stack to the profile table. Only the count of the method on
-// top of the frame will be increased.
-void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
- MutexLock mu(Thread::Current(), lock_);
- ScopedObjectAccess soa(Thread::Current());
- if (stack_trie_root_ == nullptr) {
- // The root of the stack trie is a dummy node so that we don't have to maintain
- // a collection of tries.
- stack_trie_root_ = new StackTrieNode();
- }
-
- StackTrieNode* current = stack_trie_root_;
- if (stack.size() == 0) {
- current->IncreaseCount();
- return;
- }
-
- for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
- iter != stack.rend(); ++iter) {
- InstructionLocation inst_loc = *iter;
- ArtMethod* method = inst_loc.first;
- if (method == nullptr) {
- // skip null method
- continue;
- }
- uint32_t dex_pc = inst_loc.second;
- uint32_t method_idx = method->GetDexMethodIndex();
- const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- MethodReference method_ref(dex_file, method_idx);
- StackTrieNode* child = current->FindChild(method_ref, dex_pc);
- if (child != nullptr) {
- current = child;
- } else {
- uint32_t method_size = 0;
- const DexFile::CodeItem* codeitem = method->GetCodeItem();
- if (codeitem != nullptr) {
- method_size = codeitem->insns_size_in_code_units_;
- }
- StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
- current->AppendChild(new_node);
- current = new_node;
- }
- }
-
- if (current != stack_trie_root_ && current->GetCount() == 0) {
- // Insert into method_context table;
- if (method_context_table == nullptr) {
- method_context_table = new MethodContextMap();
- }
- MethodReference method = current->GetMethod();
- MethodContextMap::iterator i = method_context_table->find(method);
- if (i == method_context_table->end()) {
- TrieNodeSet* node_set = new TrieNodeSet();
- node_set->insert(current);
- (*method_context_table)[method] = node_set;
- } else {
- TrieNodeSet* node_set = i->second;
- node_set->insert(current);
- }
- }
- current->IncreaseCount();
- num_samples_++;
-}
-
-// Write the profile table to the output stream. Also merge with the previous profile.
-uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
- ScopedObjectAccess soa(Thread::Current());
- num_samples_ += previous_num_samples_;
- num_null_methods_ += previous_num_null_methods_;
- num_boot_methods_ += previous_num_boot_methods_;
-
- VLOG(profiler) << "Profile: "
- << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
- os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
- uint32_t num_methods = 0;
- if (type == kProfilerMethod) {
- for (int i = 0 ; i < kHashSize; i++) {
- Map *map = table[i];
- if (map != nullptr) {
- for (const auto &meth_iter : *map) {
- ArtMethod *method = meth_iter.first;
- std::string method_name = PrettyMethod(method);
-
- const DexFile::CodeItem* codeitem = method->GetCodeItem();
- uint32_t method_size = 0;
- if (codeitem != nullptr) {
- method_size = codeitem->insns_size_in_code_units_;
- }
- uint32_t count = meth_iter.second;
-
- // Merge this profile entry with one from a previous run (if present). Also
- // remove the previous entry.
- PreviousProfile::iterator pi = previous_.find(method_name);
- if (pi != previous_.end()) {
- count += pi->second.count_;
- previous_.erase(pi);
- }
- os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
- ++num_methods;
- }
- }
- }
- } else if (type == kProfilerBoundedStack) {
- if (method_context_table != nullptr) {
- for (const auto &method_iter : *method_context_table) {
- MethodReference method = method_iter.first;
- TrieNodeSet* node_set = method_iter.second;
- std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
- uint32_t method_size = 0;
- uint32_t total_count = 0;
- PreviousContextMap new_context_map;
- for (const auto &trie_node_i : *node_set) {
- StackTrieNode* node = trie_node_i;
- method_size = node->GetMethodSize();
- uint32_t count = node->GetCount();
- uint32_t dexpc = node->GetDexPC();
- total_count += count;
-
- StackTrieNode* current = node->GetParent();
- // We go backward on the trie to retrieve context and dex_pc until the dummy root.
- // The format of the context is "method_1@pc_1@method_2@pc_2@..."
- std::vector<std::string> context_vector;
- while (current != nullptr && current->GetParent() != nullptr) {
- context_vector.push_back(StringPrintf("%s@%u",
- PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
- current->GetDexPC()));
- current = current->GetParent();
- }
- std::string context_sig = Join(context_vector, '@');
- new_context_map[std::make_pair(dexpc, context_sig)] = count;
- }
-
- PreviousProfile::iterator pi = previous_.find(method_name);
- if (pi != previous_.end()) {
- total_count += pi->second.count_;
- PreviousContextMap* previous_context_map = pi->second.context_map_;
- if (previous_context_map != nullptr) {
- for (const auto &context_i : *previous_context_map) {
- uint32_t count = context_i.second;
- PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
- if (ci == new_context_map.end()) {
- new_context_map[context_i.first] = count;
- } else {
- ci->second += count;
- }
- }
- }
- delete previous_context_map;
- previous_.erase(pi);
- }
- // We write out profile data with dex pc and context information in the following format:
- // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
- std::vector<std::string> context_count_vector;
- for (const auto &context_i : new_context_map) {
- context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
- context_i.second, context_i.first.second.c_str()));
- }
- os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
- method_size, Join(context_count_vector, '#').c_str());
- ++num_methods;
- }
- }
- }
-
- // Now we write out the remaining previous methods.
- for (const auto &pi : previous_) {
- if (type == kProfilerMethod) {
- os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
- } else if (type == kProfilerBoundedStack) {
- os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
- PreviousContextMap* previous_context_map = pi.second.context_map_;
- if (previous_context_map != nullptr) {
- std::vector<std::string> context_count_vector;
- for (const auto &context_i : *previous_context_map) {
- context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
- context_i.second, context_i.first.second.c_str()));
- }
- os << Join(context_count_vector, '#');
- }
- os << "]\n";
- }
- ++num_methods;
- }
- return num_methods;
-}
-
-void ProfileSampleResults::Clear() {
- num_samples_ = 0;
- num_null_methods_ = 0;
- num_boot_methods_ = 0;
- for (int i = 0; i < kHashSize; i++) {
- delete table[i];
- table[i] = nullptr;
- }
- if (stack_trie_root_ != nullptr) {
- stack_trie_root_->DeleteChildren();
- delete stack_trie_root_;
- stack_trie_root_ = nullptr;
- if (method_context_table != nullptr) {
- delete method_context_table;
- method_context_table = nullptr;
- }
- }
- for (auto &pi : previous_) {
- if (pi.second.context_map_ != nullptr) {
- delete pi.second.context_map_;
- pi.second.context_map_ = nullptr;
- }
- }
- previous_.clear();
-}
-
-uint32_t ProfileSampleResults::Hash(ArtMethod* method) {
- return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
-}
-
-// Read a single line into the given string. Returns true if everything OK, false
-// on EOF or error.
-static bool ReadProfileLine(int fd, std::string& line) {
- char buf[4];
- line.clear();
- while (true) {
- int n = read(fd, buf, 1); // TODO: could speed this up but is it worth it?
- if (n != 1) {
- return false;
- }
- if (buf[0] == '\n') {
- break;
- }
- line += buf[0];
- }
- return true;
-}
-
-void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
- // Reset counters.
- previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
-
- std::string line;
-
- // The first line contains summary information.
- if (!ReadProfileLine(fd, line)) {
- return;
- }
- std::vector<std::string> summary_info;
- Split(line, '/', &summary_info);
- if (summary_info.size() != 3) {
- // Bad summary info. It should be count/nullcount/bootcount
- return;
- }
- previous_num_samples_ = strtoul(summary_info[0].c_str(), nullptr, 10);
- previous_num_null_methods_ = strtoul(summary_info[1].c_str(), nullptr, 10);
- previous_num_boot_methods_ = strtoul(summary_info[2].c_str(), nullptr, 10);
-
- // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
- while (true) {
- if (!ReadProfileLine(fd, line)) {
- break;
- }
- std::vector<std::string> info;
- Split(line, '/', &info);
- if (info.size() != 3 && info.size() != 4) {
- // Malformed.
- break;
- }
- std::string methodname = info[0];
- uint32_t total_count = strtoul(info[1].c_str(), nullptr, 10);
- uint32_t size = strtoul(info[2].c_str(), nullptr, 10);
- PreviousContextMap* context_map = nullptr;
- if (type == kProfilerBoundedStack && info.size() == 4) {
- context_map = new PreviousContextMap();
- std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
- std::vector<std::string> context_count_pairs;
- Split(context_counts_str, '#', &context_count_pairs);
- for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
- std::vector<std::string> context_count;
- Split(context_count_pairs[i], ':', &context_count);
- if (context_count.size() == 2) {
- // Handles the situtation when the profile file doesn't contain context information.
- uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
- uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
- (*context_map)[std::make_pair(dexpc, "")] = count;
- } else {
- // Handles the situtation when the profile file contains context information.
- uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
- uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
- std::string context = context_count[2];
- (*context_map)[std::make_pair(dexpc, context)] = count;
- }
- }
- }
- previous_[methodname] = PreviousValue(total_count, size, context_map);
- }
-}
-
-bool ProfileFile::LoadFile(const std::string& fileName) {
- LOG(VERBOSE) << "reading profile file " << fileName;
- struct stat st;
- int err = stat(fileName.c_str(), &st);
- if (err == -1) {
- LOG(VERBOSE) << "not found";
- return false;
- }
- if (st.st_size == 0) {
- return false; // Empty profiles are invalid.
- }
- std::ifstream in(fileName.c_str());
- if (!in) {
- LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
- LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
- LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
- LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
- LOG(VERBOSE) << "errno: " << errno;
- return false;
- }
- // The first line contains summary information.
- std::string line;
- std::getline(in, line);
- if (in.eof()) {
- return false;
- }
- std::vector<std::string> summary_info;
- Split(line, '/', &summary_info);
- if (summary_info.size() != 3) {
- // Bad summary info. It should be total/null/boot.
- return false;
- }
- // This is the number of hits in all profiled methods (without null or boot methods)
- uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
-
- // Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
- // Store the info in descending order given by the most used methods.
- typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
- ProfileSet countSet;
- while (!in.eof()) {
- std::getline(in, line);
- if (in.eof()) {
- break;
- }
- std::vector<std::string> info;
- Split(line, '/', &info);
- if (info.size() != 3 && info.size() != 4) {
- // Malformed.
- return false;
- }
- int count = atoi(info[1].c_str());
- countSet.insert(std::make_pair(-count, info));
- }
-
- uint32_t curTotalCount = 0;
- ProfileSet::iterator end = countSet.end();
- const ProfileData* prevData = nullptr;
- for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
- const std::string& methodname = it->second[0];
- uint32_t count = -it->first;
- uint32_t size = strtoul(it->second[2].c_str(), nullptr, 10);
- double usedPercent = (count * 100.0) / total_count;
-
- curTotalCount += count;
- // Methods with the same count should be part of the same top K percentage bucket.
- double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
- ? prevData->GetTopKUsedPercentage()
- : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
-
- // Add it to the profile map.
- ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
- profile_map_[methodname] = curData;
- prevData = &curData;
- }
- return true;
-}
-
-bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
- ProfileMap::iterator i = profile_map_.find(method_name);
- if (i == profile_map_.end()) {
- return false;
- }
- *data = i->second;
- return true;
-}
-
-bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
- ProfileMap::iterator end = profile_map_.end();
- for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
- if (it->second.GetTopKUsedPercentage() < topKPercentage) {
- topKSamples.insert(it->first);
- }
- }
- return true;
-}
-
-StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
- if (children_.size() == 0) {
- return nullptr;
- }
- // Create a dummy node for searching.
- StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
- std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
- delete node;
- return (i == children_.end()) ? nullptr : *i;
-}
-
-void StackTrieNode::DeleteChildren() {
- for (auto &child : children_) {
- if (child != nullptr) {
- child->DeleteChildren();
- delete child;
- }
- }
-}
-
-} // namespace art
diff --git a/runtime/profiler.h b/runtime/profiler.h
deleted file mode 100644
index bd29f711c0..0000000000
--- a/runtime/profiler.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_PROFILER_H_
-#define ART_RUNTIME_PROFILER_H_
-
-#include <memory>
-#include <ostream>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "barrier.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "globals.h"
-#include "instrumentation.h"
-#include "profiler_options.h"
-#include "os.h"
-#include "safe_map.h"
-#include "method_reference.h"
-
-namespace art {
-
-namespace mirror {
- class Class;
-} // namespace mirror
-class ArtMethod;
-class Thread;
-
-typedef std::pair<ArtMethod*, uint32_t> InstructionLocation;
-
-// This class stores the sampled bounded stacks in a trie structure. A path of the trie represents
-// a particular context with the method on top of the stack being a leaf or an internal node of the
-// trie rather than the root.
-class StackTrieNode {
- public:
- StackTrieNode(MethodReference method, uint32_t dex_pc, uint32_t method_size,
- StackTrieNode* parent) :
- parent_(parent), method_(method), dex_pc_(dex_pc),
- count_(0), method_size_(method_size) {
- }
- StackTrieNode() : parent_(nullptr), method_(nullptr, 0),
- dex_pc_(0), count_(0), method_size_(0) {
- }
- StackTrieNode* GetParent() { return parent_; }
- MethodReference GetMethod() { return method_; }
- uint32_t GetCount() { return count_; }
- uint32_t GetDexPC() { return dex_pc_; }
- uint32_t GetMethodSize() { return method_size_; }
- void AppendChild(StackTrieNode* child) { children_.insert(child); }
- StackTrieNode* FindChild(MethodReference method, uint32_t dex_pc);
- void DeleteChildren();
- void IncreaseCount() { ++count_; }
-
- private:
- // Comparator for stack trie node.
- struct StackTrieNodeComparator {
- bool operator()(StackTrieNode* node1, StackTrieNode* node2) const {
- MethodReference mr1 = node1->GetMethod();
- MethodReference mr2 = node2->GetMethod();
- if (mr1.dex_file == mr2.dex_file) {
- if (mr1.dex_method_index == mr2.dex_method_index) {
- return node1->GetDexPC() < node2->GetDexPC();
- } else {
- return mr1.dex_method_index < mr2.dex_method_index;
- }
- } else {
- return mr1.dex_file < mr2.dex_file;
- }
- }
- };
-
- std::set<StackTrieNode*, StackTrieNodeComparator> children_;
- StackTrieNode* parent_;
- MethodReference method_;
- uint32_t dex_pc_;
- uint32_t count_;
- uint32_t method_size_;
-};
-
-//
-// This class holds all the results for all runs of the profiler. It also
-// counts the number of null methods (where we can't determine the method) and
-// the number of methods in the boot path (where we have already compiled the method).
-//
-// This object is an internal profiler object and uses the same locking as the profiler
-// itself.
-class ProfileSampleResults {
- public:
- explicit ProfileSampleResults(Mutex& lock);
- ~ProfileSampleResults();
-
- void Put(ArtMethod* method) REQUIRES(!lock_);
- void PutStack(const std::vector<InstructionLocation>& stack_dump) REQUIRES(!lock_);
- uint32_t Write(std::ostream &os, ProfileDataType type);
- void ReadPrevious(int fd, ProfileDataType type);
- void Clear();
- uint32_t GetNumSamples() { return num_samples_; }
- void NullMethod() { ++num_null_methods_; }
- void BootMethod() { ++num_boot_methods_; }
-
- private:
- uint32_t Hash(ArtMethod* method);
- static constexpr int kHashSize = 17;
- Mutex& lock_; // Reference to the main profiler lock - we don't need two of them.
- uint32_t num_samples_; // Total number of samples taken.
- uint32_t num_null_methods_; // Number of samples where can don't know the method.
- uint32_t num_boot_methods_; // Number of samples in the boot path.
-
- typedef std::map<ArtMethod*, uint32_t> Map; // Map of method vs its count.
- Map *table[kHashSize];
-
- typedef std::set<StackTrieNode*> TrieNodeSet;
- // Map of method hit by profiler vs the set of stack trie nodes for this method.
- typedef std::map<MethodReference, TrieNodeSet*, MethodReferenceComparator> MethodContextMap;
- MethodContextMap *method_context_table;
- StackTrieNode* stack_trie_root_; // Root of the trie that stores sampled stack information.
-
- // Map from <pc, context> to counts.
- typedef std::map<std::pair<uint32_t, std::string>, uint32_t> PreviousContextMap;
- struct PreviousValue {
- PreviousValue() : count_(0), method_size_(0), context_map_(nullptr) {}
- PreviousValue(uint32_t count, uint32_t method_size, PreviousContextMap* context_map)
- : count_(count), method_size_(method_size), context_map_(context_map) {}
- uint32_t count_;
- uint32_t method_size_;
- PreviousContextMap* context_map_;
- };
-
- typedef std::map<std::string, PreviousValue> PreviousProfile;
- PreviousProfile previous_;
- uint32_t previous_num_samples_;
- uint32_t previous_num_null_methods_; // Number of samples where can don't know the method.
- uint32_t previous_num_boot_methods_; // Number of samples in the boot path.
-};
-
-//
-// The BackgroundMethodSamplingProfiler runs in a thread. Most of the time it is sleeping but
-// occasionally wakes up and counts the number of times a method is called. Each time
-// it ticks, it looks at the current method and records it in the ProfileSampleResults
-// table.
-//
-// The timing is controlled by a number of variables:
-// 1. Period: the time between sampling runs.
-// 2. Interval: the time between each sample in a run.
-// 3. Duration: the duration of a run.
-//
-// So the profiler thread is sleeping for the 'period' time. It wakes up and runs for the
-// 'duration'. The run consists of a series of samples, each of which is 'interval' microseconds
-// apart. At the end of a run, it writes the results table to a file and goes back to sleep.
-
-class BackgroundMethodSamplingProfiler {
- public:
- // Start a profile thread with the user-supplied arguments.
- // Returns true if the profile was started or if it was already running. Returns false otherwise.
- static bool Start(const std::string& output_filename, const ProfilerOptions& options)
- REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
- !Locks::profiler_lock_);
-
- // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
- static void Stop() REQUIRES(!Locks::profiler_lock_, !wait_lock_, !Locks::profiler_lock_)
- NO_THREAD_SAFETY_ANALYSIS;
- // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
- static void Shutdown() REQUIRES(!Locks::profiler_lock_) NO_THREAD_SAFETY_ANALYSIS;
-
- void RecordMethod(ArtMethod *method) SHARED_REQUIRES(Locks::mutator_lock_);
- void RecordStack(const std::vector<InstructionLocation>& stack)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool ProcessMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- const ProfilerOptions& GetProfilerOptions() const { return options_; }
-
- Barrier& GetBarrier() {
- return *profiler_barrier_;
- }
-
- private:
- explicit BackgroundMethodSamplingProfiler(
- const std::string& output_filename, const ProfilerOptions& options);
-
- // The sampling interval in microseconds is passed as an argument.
- // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
- static void* RunProfilerThread(void* arg) REQUIRES(!Locks::profiler_lock_)
- NO_THREAD_SAFETY_ANALYSIS;
-
- uint32_t WriteProfile() SHARED_REQUIRES(Locks::mutator_lock_);
-
- void CleanProfile();
- uint32_t DumpProfile(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
- static bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
-
- static BackgroundMethodSamplingProfiler* profiler_ GUARDED_BY(Locks::profiler_lock_);
-
- // We need to shut the sample thread down at exit. Setting this to true will do that.
- static volatile bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
-
- // Sampling thread, non-zero when sampling.
- static pthread_t profiler_pthread_;
-
- // Some measure of the number of samples that are significant.
- static constexpr uint32_t kSignificantSamples = 10;
-
- // The name of the file where profile data will be written.
- std::string output_filename_;
- // The options used to start the profiler.
- const ProfilerOptions& options_;
-
-
- // Profile condition support.
- Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- ConditionVariable period_condition_ GUARDED_BY(wait_lock_);
-
- ProfileSampleResults profile_table_;
-
- std::unique_ptr<Barrier> profiler_barrier_;
-
- // Set of methods to be filtered out. This will probably be rare because
- // most of the methods we want to be filtered reside in the boot path and
- // are automatically filtered.
- typedef std::set<std::string> FilteredMethods;
- FilteredMethods filtered_methods_;
-
- DISALLOW_COPY_AND_ASSIGN(BackgroundMethodSamplingProfiler);
-};
-
-//
-// Contains profile data generated from previous runs of the program and stored
-// in a file. It is used to determine whether to compile a particular method or not.
-class ProfileFile {
- public:
- class ProfileData {
- public:
- ProfileData() : count_(0), method_size_(0), used_percent_(0), top_k_used_percentage_(0) {}
- ProfileData(const std::string& method_name, uint32_t count, uint32_t method_size,
- double used_percent, double top_k_used_percentage) :
- method_name_(method_name), count_(count), method_size_(method_size),
- used_percent_(used_percent), top_k_used_percentage_(top_k_used_percentage) {
- // TODO: currently method_size_ is unused
- UNUSED(method_size_);
- }
-
- double GetUsedPercent() const { return used_percent_; }
- uint32_t GetCount() const { return count_; }
- double GetTopKUsedPercentage() const { return top_k_used_percentage_; }
-
- private:
- std::string method_name_; // Method name.
- uint32_t count_; // Number of times it has been called.
- uint32_t method_size_; // Size of the method on dex instructions.
- double used_percent_; // Percentage of how many times this method was called.
- double top_k_used_percentage_; // The percentage of the group that comprise K% of the total
- // used methods this methods belongs to.
- };
-
- public:
- // Loads profile data from the given file. The new data are merged with any existing data.
- // Returns true if the file was loaded successfully and false otherwise.
- bool LoadFile(const std::string& filename);
-
- // Computes the group that comprise top_k_percentage of the total used methods.
- bool GetTopKSamples(std::set<std::string>& top_k_methods, double top_k_percentage);
-
- // If the given method has an entry in the profile table it updates the data
- // and returns true. Otherwise returns false and leaves the data unchanged.
- bool GetProfileData(ProfileData* data, const std::string& method_name);
-
- private:
- // Profile data is stored in a map, indexed by the full method name.
- typedef std::map<std::string, ProfileData> ProfileMap;
- ProfileMap profile_map_;
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_PROFILER_H_
diff --git a/runtime/profiler_options.h b/runtime/profiler_options.h
deleted file mode 100644
index 1db2f0508c..0000000000
--- a/runtime/profiler_options.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_PROFILER_OPTIONS_H_
-#define ART_RUNTIME_PROFILER_OPTIONS_H_
-
-#include <string>
-#include <ostream>
-
-namespace art {
-
-enum ProfileDataType {
- kProfilerMethod, // Method only
- kProfilerBoundedStack, // Methods with Dex PC on top of the stack
-};
-std::ostream& operator<<(std::ostream& os, const ProfileDataType& rhs);
-
-class ProfilerOptions {
- public:
- static constexpr bool kDefaultEnabled = false;
- static constexpr uint32_t kDefaultPeriodS = 10;
- static constexpr uint32_t kDefaultDurationS = 20;
- static constexpr uint32_t kDefaultIntervalUs = 500;
- static constexpr double kDefaultBackoffCoefficient = 2.0;
- static constexpr bool kDefaultStartImmediately = false;
- static constexpr double kDefaultTopKThreshold = 90.0;
- static constexpr double kDefaultChangeInTopKThreshold = 10.0;
- static constexpr ProfileDataType kDefaultProfileData = kProfilerMethod;
- static constexpr uint32_t kDefaultMaxStackDepth = 3;
-
- ProfilerOptions() :
- enabled_(kDefaultEnabled),
- period_s_(kDefaultPeriodS),
- duration_s_(kDefaultDurationS),
- interval_us_(kDefaultIntervalUs),
- backoff_coefficient_(kDefaultBackoffCoefficient),
- start_immediately_(kDefaultStartImmediately),
- top_k_threshold_(kDefaultTopKThreshold),
- top_k_change_threshold_(kDefaultChangeInTopKThreshold),
- profile_type_(kDefaultProfileData),
- max_stack_depth_(kDefaultMaxStackDepth) {}
-
- ProfilerOptions(bool enabled,
- uint32_t period_s,
- uint32_t duration_s,
- uint32_t interval_us,
- double backoff_coefficient,
- bool start_immediately,
- double top_k_threshold,
- double top_k_change_threshold,
- ProfileDataType profile_type,
- uint32_t max_stack_depth):
- enabled_(enabled),
- period_s_(period_s),
- duration_s_(duration_s),
- interval_us_(interval_us),
- backoff_coefficient_(backoff_coefficient),
- start_immediately_(start_immediately),
- top_k_threshold_(top_k_threshold),
- top_k_change_threshold_(top_k_change_threshold),
- profile_type_(profile_type),
- max_stack_depth_(max_stack_depth) {}
-
- bool IsEnabled() const {
- return enabled_;
- }
-
- uint32_t GetPeriodS() const {
- return period_s_;
- }
-
- uint32_t GetDurationS() const {
- return duration_s_;
- }
-
- uint32_t GetIntervalUs() const {
- return interval_us_;
- }
-
- double GetBackoffCoefficient() const {
- return backoff_coefficient_;
- }
-
- bool GetStartImmediately() const {
- return start_immediately_;
- }
-
- double GetTopKThreshold() const {
- return top_k_threshold_;
- }
-
- double GetTopKChangeThreshold() const {
- return top_k_change_threshold_;
- }
-
- ProfileDataType GetProfileType() const {
- return profile_type_;
- }
-
- uint32_t GetMaxStackDepth() const {
- return max_stack_depth_;
- }
-
- private:
- friend std::ostream & operator<<(std::ostream &os, const ProfilerOptions& po) {
- os << "enabled=" << po.enabled_
- << ", period_s=" << po.period_s_
- << ", duration_s=" << po.duration_s_
- << ", interval_us=" << po.interval_us_
- << ", backoff_coefficient=" << po.backoff_coefficient_
- << ", start_immediately=" << po.start_immediately_
- << ", top_k_threshold=" << po.top_k_threshold_
- << ", top_k_change_threshold=" << po.top_k_change_threshold_
- << ", profile_type=" << po.profile_type_
- << ", max_stack_depth=" << po.max_stack_depth_;
- return os;
- }
-
- friend class ParsedOptions;
-
- // Whether or not the applications should be profiled.
- bool enabled_;
- // Generate profile every n seconds.
- uint32_t period_s_;
- // Run profile for n seconds.
- uint32_t duration_s_;
- // Microseconds between samples.
- uint32_t interval_us_;
- // Coefficient to exponential backoff.
- double backoff_coefficient_;
- // Whether the profile should start upon app startup or be delayed by some random offset.
- bool start_immediately_;
- // Top K% of samples that are considered relevant when deciding if the app should be recompiled.
- double top_k_threshold_;
- // How much the top K% samples needs to change in order for the app to be recompiled.
- double top_k_change_threshold_;
- // The type of profile data dumped to the disk.
- ProfileDataType profile_type_;
- // The max depth of the stack collected by the profiler
- uint32_t max_stack_depth_;
-};
-
-} // namespace art
-
-
-#endif // ART_RUNTIME_PROFILER_OPTIONS_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 40e1b1363a..21cd2aa2c9 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -118,7 +118,6 @@
#include "oat_file_manager.h"
#include "os.h"
#include "parsed_options.h"
-#include "profiler.h"
#include "jit/profile_saver.h"
#include "quick/quick_method_frame_info.h"
#include "reflection.h"
@@ -628,17 +627,6 @@ bool Runtime::Start() {
VLOG(startup) << "Runtime::Start exiting";
finished_starting_ = true;
- if (profiler_options_.IsEnabled() && !profile_output_filename_.empty()) {
- // User has asked for a profile using -Xenable-profiler.
- // Create the profile file if it doesn't exist.
- int fd = open(profile_output_filename_.c_str(), O_RDWR|O_CREAT|O_EXCL, 0660);
- if (fd >= 0) {
- close(fd);
- } else if (errno != EEXIST) {
- LOG(WARNING) << "Failed to access the profile file. Profiler disabled.";
- }
- }
-
if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
Trace::Start(trace_config_->trace_file.c_str(),
@@ -1197,26 +1185,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
Trace::TraceOutputMode::kFile;
}
- {
- auto&& profiler_options = runtime_options.ReleaseOrDefault(Opt::ProfilerOpts);
- profile_output_filename_ = profiler_options.output_file_name_;
-
- // TODO: Don't do this, just change ProfilerOptions to include the output file name?
- ProfilerOptions other_options(
- profiler_options.enabled_,
- profiler_options.period_s_,
- profiler_options.duration_s_,
- profiler_options.interval_us_,
- profiler_options.backoff_coefficient_,
- profiler_options.start_immediately_,
- profiler_options.top_k_threshold_,
- profiler_options.top_k_change_threshold_,
- profiler_options.profile_type_,
- profiler_options.max_stack_depth_);
-
- profiler_options_ = other_options;
- }
-
// TODO: move this to just be an Trace::Start argument
Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
@@ -1759,7 +1727,6 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
return;
}
- profile_output_filename_ = profile_output_filename;
jit_->StartProfileSaver(profile_output_filename,
code_paths,
foreign_dex_profile_path,
@@ -2010,9 +1977,4 @@ bool Runtime::UseJitCompilation() const {
return (jit_ != nullptr) && jit_->UseJitCompilation();
}
-// Returns true if profile saving is enabled. GetJit() will be not null in this case.
-bool Runtime::SaveProfileInfo() const {
- return (jit_ != nullptr) && jit_->SaveProfilingInfo();
-}
-
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index b7f377ddf9..afa8e4818b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -36,7 +36,6 @@
#include "object_callbacks.h"
#include "offsets.h"
#include "process_state.h"
-#include "profiler_options.h"
#include "quick/quick_method_frame_info.h"
#include "runtime_stats.h"
#include "safe_map.h"
@@ -192,10 +191,6 @@ class Runtime {
return image_location_;
}
- const ProfilerOptions& GetProfilerOptions() const {
- return profiler_options_;
- }
-
// Starts a runtime, which may cause threads to be started and code to run.
bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
@@ -455,8 +450,6 @@ class Runtime {
// Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
bool UseJitCompilation() const;
- // Returns true if profile saving is enabled. GetJit() will be not null in this case.
- bool SaveProfileInfo() const;
void PreZygoteFork();
bool InitZygote();
@@ -782,9 +775,6 @@ class Runtime {
const bool is_running_on_memory_tool_;
- std::string profile_output_filename_;
- ProfilerOptions profiler_options_;
-
std::unique_ptr<TraceConfig> trace_config_;
instrumentation::Instrumentation instrumentation_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 635ff51697..31206b5a64 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -75,7 +75,6 @@ RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight)
RUNTIME_OPTIONS_KEY (unsigned int, JITInvokeTransitionWeight)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity)
-RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
MsToNs(100 * 1000)) // 100s
@@ -105,7 +104,7 @@ RUNTIME_OPTIONS_KEY (std::string, MethodTraceFile, "/data
RUNTIME_OPTIONS_KEY (unsigned int, MethodTraceFileSize, 10 * MB)
RUNTIME_OPTIONS_KEY (Unit, MethodTraceStreaming)
RUNTIME_OPTIONS_KEY (TraceClockSource, ProfileClock, kDefaultTraceClockSource) // -Xprofile:
-RUNTIME_OPTIONS_KEY (TestProfilerOptions, ProfilerOpts) // -Xenable-profiler, -Xprofile-*
+RUNTIME_OPTIONS_KEY (ProfileSaverOptions, ProfileSaverOpts) // -Xjitsaveprofilinginfo, -Xps-*
RUNTIME_OPTIONS_KEY (std::string, Compiler)
RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
CompilerOptions) // -Xcompiler-option ...
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index ab69d4f1cb..5fcb86e3f9 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -29,8 +29,8 @@
#include "jit/jit_code_cache.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
-#include "profiler_options.h"
#include "arch/instruction_set.h"
+#include "jit/profile_saver_options.h"
#include "verifier/verify_mode.h"
#include <stdio.h>
#include <stdarg.h>
@@ -41,7 +41,6 @@ class CompilerCallbacks;
class DexFile;
struct XGcOption;
struct BackgroundGcOption;
-struct TestProfilerOptions;
#define DECLARE_KEY(Type, Name) static const Key<Type> Name
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index d98f82a541..3fd66a7bcb 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -201,12 +201,22 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
<< " state=" << old_state_and_flags.as_struct.state;
} else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
// Wait while our suspend count is non-zero.
- MutexLock mu(this, *Locks::thread_suspend_count_lock_);
+
+ // We pass null to the MutexLock as we may be in a situation where the
+ // runtime is shutting down. Guarding ourselves from that situation
+ // requires to take the shutdown lock, which is undesirable here.
+ Thread* thread_to_pass = nullptr;
+ if (kIsDebugBuild && !IsDaemon()) {
+ // We know we can make our debug locking checks on non-daemon threads,
+ // so re-enable them on debug builds.
+ thread_to_pass = this;
+ }
+ MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
// Re-check when Thread::resume_cond_ is notified.
- Thread::resume_cond_->Wait(this);
+ Thread::resume_cond_->Wait(thread_to_pass);
old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 9b6a20ec59..ab2462535c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1060,6 +1060,8 @@ class Thread {
return tlsPtr_.mterp_alt_ibase;
}
+ // Notify that a signal is being handled. This is to protect us from doing recursive
+ // NPE handling after a SIGSEGV.
void NoteSignalBeingHandled() {
if (tls32_.handling_signal_) {
LOG(FATAL) << "Detected signal while processing a signal";
diff --git a/test/478-checker-clinit-check-pruning/expected.txt b/test/478-checker-clinit-check-pruning/expected.txt
index 7de097f666..6f73b656ed 100644
--- a/test/478-checker-clinit-check-pruning/expected.txt
+++ b/test/478-checker-clinit-check-pruning/expected.txt
@@ -10,3 +10,4 @@ Main$ClassWithClinit9's static initializer
Main$ClassWithClinit10's static initializer
Main$ClassWithClinit11's static initializer
Main$ClassWithClinit12's static initializer
+Main$ClassWithClinit13's static initializer
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index 79935134b4..6fc12f138c 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -16,6 +16,8 @@
public class Main {
+ static boolean doThrow = false;
+
/*
* Ensure an inlined static invoke explicitly triggers the
* initialization check of the called method's declaring class, and
@@ -310,12 +312,12 @@ public class Main {
/// CHECK-START: void Main.constClassAndInvokeStatic(java.lang.Iterable) liveness (before)
/// CHECK-NOT: ClinitCheck
- static void constClassAndInvokeStatic(Iterable it) {
+ static void constClassAndInvokeStatic(Iterable<?> it) {
$opt$inline$ignoreClass(ClassWithClinit7.class);
ClassWithClinit7.someStaticMethod(it);
}
- static void $opt$inline$ignoreClass(Class c) {
+ static void $opt$inline$ignoreClass(Class<?> c) {
}
static class ClassWithClinit7 {
@@ -324,7 +326,7 @@ public class Main {
}
// Note: not inlined from constClassAndInvokeStatic() but fully inlined from main().
- static void someStaticMethod(Iterable it) {
+ static void someStaticMethod(Iterable<?> it) {
// We're not inlining invoke-interface at the moment.
it.iterator();
}
@@ -341,7 +343,7 @@ public class Main {
/// CHECK-START: void Main.sgetAndInvokeStatic(java.lang.Iterable) liveness (before)
/// CHECK-NOT: ClinitCheck
- static void sgetAndInvokeStatic(Iterable it) {
+ static void sgetAndInvokeStatic(Iterable<?> it) {
$opt$inline$ignoreInt(ClassWithClinit8.value);
ClassWithClinit8.someStaticMethod(it);
}
@@ -356,7 +358,7 @@ public class Main {
}
// Note: not inlined from sgetAndInvokeStatic() but fully inlined from main().
- static void someStaticMethod(Iterable it) {
+ static void someStaticMethod(Iterable<?> it) {
// We're not inlining invoke-interface at the moment.
it.iterator();
}
@@ -372,7 +374,7 @@ public class Main {
/// CHECK: ClinitCheck
/// CHECK: InvokeStaticOrDirect clinit_check:none
- static void constClassSgetAndInvokeStatic(Iterable it) {
+ static void constClassSgetAndInvokeStatic(Iterable<?> it) {
$opt$inline$ignoreClass(ClassWithClinit9.class);
$opt$inline$ignoreInt(ClassWithClinit9.value);
ClassWithClinit9.someStaticMethod(it);
@@ -385,7 +387,7 @@ public class Main {
}
// Note: not inlined from constClassSgetAndInvokeStatic() but fully inlined from main().
- static void someStaticMethod(Iterable it) {
+ static void someStaticMethod(Iterable<?> it) {
// We're not inlining invoke-interface at the moment.
it.iterator();
}
@@ -403,12 +405,12 @@ public class Main {
/// CHECK-START: void Main.inlinedInvokeStaticViaNonStatic(java.lang.Iterable) liveness (before)
/// CHECK-NOT: ClinitCheck
- static void inlinedInvokeStaticViaNonStatic(Iterable it) {
+ static void inlinedInvokeStaticViaNonStatic(Iterable<?> it) {
inlinedInvokeStaticViaNonStaticHelper(null);
inlinedInvokeStaticViaNonStaticHelper(it);
}
- static void inlinedInvokeStaticViaNonStaticHelper(Iterable it) {
+ static void inlinedInvokeStaticViaNonStaticHelper(Iterable<?> it) {
ClassWithClinit10.inlinedForNull(it);
}
@@ -418,7 +420,7 @@ public class Main {
System.out.println("Main$ClassWithClinit10's static initializer");
}
- static void inlinedForNull(Iterable it) {
+ static void inlinedForNull(Iterable<?> it) {
if (it != null) {
// We're not inlining invoke-interface at the moment.
it.iterator();
@@ -443,7 +445,7 @@ public class Main {
/// CHECK-START: void Main.inlinedInvokeStaticViaStatic(java.lang.Iterable) liveness (before)
/// CHECK-NOT: ClinitCheck
- static void inlinedInvokeStaticViaStatic(Iterable it) {
+ static void inlinedInvokeStaticViaStatic(Iterable<?> it) {
ClassWithClinit11.callInlinedForNull(it);
}
@@ -453,11 +455,11 @@ public class Main {
System.out.println("Main$ClassWithClinit11's static initializer");
}
- static void callInlinedForNull(Iterable it) {
+ static void callInlinedForNull(Iterable<?> it) {
inlinedForNull(it);
}
- static void inlinedForNull(Iterable it) {
+ static void inlinedForNull(Iterable<?> it) {
// We're not inlining invoke-interface at the moment.
it.iterator();
}
@@ -475,7 +477,7 @@ public class Main {
/// CHECK-START: void Main.inlinedInvokeStaticViaStaticTwice(java.lang.Iterable) liveness (before)
/// CHECK-NOT: ClinitCheck
- static void inlinedInvokeStaticViaStaticTwice(Iterable it) {
+ static void inlinedInvokeStaticViaStaticTwice(Iterable<?> it) {
ClassWithClinit12.callInlinedForNull(null);
ClassWithClinit12.callInlinedForNull(it);
}
@@ -486,11 +488,11 @@ public class Main {
System.out.println("Main$ClassWithClinit12's static initializer");
}
- static void callInlinedForNull(Iterable it) {
+ static void callInlinedForNull(Iterable<?> it) {
inlinedForNull(it);
}
- static void inlinedForNull(Iterable it) {
+ static void inlinedForNull(Iterable<?> it) {
if (it != null) {
// We're not inlining invoke-interface at the moment.
it.iterator();
@@ -498,6 +500,28 @@ public class Main {
}
}
+ static class ClassWithClinit13 {
+ static {
+ System.out.println("Main$ClassWithClinit13's static initializer");
+ }
+
+ public static void $inline$forwardToGetIterator(Iterable<?> it) {
+ $noinline$getIterator(it);
+ }
+
+ public static void $noinline$getIterator(Iterable<?> it) {
+ // We're not inlining invoke-interface at the moment.
+ it.iterator();
+ }
+ }
+
+ // TODO: Write checker statements.
+ static Object $noinline$testInliningAndNewInstance(Iterable<?> it) {
+ if (doThrow) { throw new Error(); }
+ ClassWithClinit13.$inline$forwardToGetIterator(it);
+ return new ClassWithClinit13();
+ }
+
// TODO: Add a test for the case of a static method whose declaring
// class type index is not available (i.e. when `storage_index`
// equals `DexFile::kDexNoIndex` in
@@ -517,5 +541,6 @@ public class Main {
inlinedInvokeStaticViaNonStatic(it);
inlinedInvokeStaticViaStatic(it);
inlinedInvokeStaticViaStaticTwice(it);
+ $noinline$testInliningAndNewInstance(it);
}
}
diff --git a/test/600-verifier-fails/expected.txt b/test/600-verifier-fails/expected.txt
index 8399969a2d..eaa0c933c4 100644
--- a/test/600-verifier-fails/expected.txt
+++ b/test/600-verifier-fails/expected.txt
@@ -2,3 +2,4 @@ passed A
passed B
passed C
passed D
+passed E
diff --git a/test/600-verifier-fails/src/Main.java b/test/600-verifier-fails/src/Main.java
index 64c3d5c16a..fa25d58e43 100644
--- a/test/600-verifier-fails/src/Main.java
+++ b/test/600-verifier-fails/src/Main.java
@@ -38,7 +38,6 @@ public class Main {
test("B");
test("C");
test("D");
- // TODO: enable again
- // test("E");
+ test("E");
}
}