Reduce interpret-only compile time.
Before:
39.04user 5.18system 0:29.24elapsed 151%CPU (0avgtext+0avgdata 164176maxresident)k
38.87user 5.16system 0:29.14elapsed 151%CPU (0avgtext+0avgdata 164144maxresident)k
After:
36.26user 3.25system 0:27.00elapsed 146%CPU (0avgtext+0avgdata 162592maxresident)k
36.25user 3.28system 0:26.28elapsed 150%CPU (0avgtext+0avgdata 162688maxresident)k
Disabled implicit stack protection for the compiler, this reduces page faults.
Added support for not timing every method compilation and verification. NanoTime is
slow and adds ~2 seconds of real time. This is currently enabled since people want
to know which methods are slow to compile.
Bug: 16853450
Change-Id: I349ffb3f36db8c437137387aa6914dc17d743f09
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 42c7cca..c206e9f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -189,7 +189,8 @@
}
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
- if (!Runtime::Current()->IsZygote()) {
+ const bool is_zygote = Runtime::Current()->IsZygote();
+ if (!is_zygote) {
// Background compaction is currently not supported for command line runs.
if (background_collector_type_ != foreground_collector_type_) {
VLOG(heap) << "Disabling background compaction for non zygote";
@@ -222,6 +223,8 @@
requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+- nonmoving space (non_moving_space_capacity)+-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+ +-????????????????????????????????????????????+-
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+-main alloc space / bump space 1 (capacity_) +-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+-????????????????????????????????????????????+-
@@ -236,7 +239,6 @@
// from the main space.
// This is not the case if we support homogeneous compaction or have a moving background
// collector type.
- const bool is_zygote = Runtime::Current()->IsZygote();
bool separate_non_moving_space = is_zygote ||
support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
IsMovingGc(background_collector_type_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d525c28..f432c9c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -539,7 +539,9 @@
#endif
// Set stack_end_ to the bottom of the stack saving space of stack overflows
- bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+
+ Runtime* runtime = Runtime::Current();
+ bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsCompiler();
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 6f9680f..ef6b343 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -47,6 +47,7 @@
namespace art {
namespace verifier {
+static constexpr bool kTimeVerifyMethod = !kIsDebugBuild;
static constexpr bool gDebugVerify = false;
// TODO: Add a constant to method_verifier to turn on verbose logging?
@@ -252,7 +253,7 @@
bool allow_soft_failures,
bool need_precise_constants) {
MethodVerifier::FailureKind result = kNoFailure;
- uint64_t start_ns = NanoTime();
+ uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
method_idx, method, method_access_flags, true, allow_soft_failures,
@@ -280,10 +281,12 @@
}
result = kHardFailure;
}
- uint64_t duration_ns = NanoTime() - start_ns;
- if (duration_ns > MsToNs(100) && !kIsDebugBuild) {
- LOG(WARNING) << "Verification of " << PrettyMethod(method_idx, *dex_file)
- << " took " << PrettyDuration(duration_ns);
+ if (kTimeVerifyMethod) {
+ uint64_t duration_ns = NanoTime() - start_ns;
+ if (duration_ns > MsToNs(100)) {
+ LOG(WARNING) << "Verification of " << PrettyMethod(method_idx, *dex_file)
+ << " took " << PrettyDuration(duration_ns);
+ }
}
return result;
}