Add option to tune sample collection based on thread sensitivity
Bug: 28065407
Bug: 27865109
Change-Id: Icdb89f8f8874a41c07e73185523d18e8956620d3
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 1da1181..0af72ce 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -665,6 +665,10 @@
} else {
countdown_value = jit::kJitCheckForOSR;
}
+ if (jit::Jit::ShouldUsePriorityThreadWeight()) {
+ int32_t priority_thread_weight = cache->PriorityThreadWeight();
+ countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
+ }
}
/*
* The actual hotness threshold may exceed the range of our int16_t countdown value. This is
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4db9f71..3344346 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -86,9 +86,29 @@
}
}
+ if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
+ jit_options->priority_thread_weight_ =
+ *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
+ if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
+ LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
+ } else if (jit_options->priority_thread_weight_ == 0) {
+ LOG(FATAL) << "Priority thread weight cannot be 0.";
+ }
+ } else {
+ jit_options->priority_thread_weight_ =
+ std::max(jit_options->compile_threshold_ / 2000, static_cast<size_t>(1));
+ }
+
return jit_options;
}
+bool Jit::ShouldUsePriorityThreadWeight() {
+ // TODO(calin): verify that IsSensitiveThread covers only the cases we are interested on.
+ // In particular if apps can set StrictMode policies for any of their threads, case in which
+ // we need to find another way to track sensitive threads.
+ return Runtime::Current()->InJankPerceptibleProcessState() && Thread::IsSensitiveThread();
+}
+
void Jit::DumpInfo(std::ostream& os) {
code_cache_->Dump(os);
cumulative_timings_.Dump(os);
@@ -267,9 +287,13 @@
void Jit::CreateInstrumentationCache(size_t compile_threshold,
size_t warmup_threshold,
- size_t osr_threshold) {
+ size_t osr_threshold,
+ uint16_t priority_thread_weight) {
instrumentation_cache_.reset(
- new jit::JitInstrumentationCache(compile_threshold, warmup_threshold, osr_threshold));
+ new jit::JitInstrumentationCache(compile_threshold,
+ warmup_threshold,
+ osr_threshold,
+ priority_thread_weight));
}
void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 76f540d..e212366 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -48,7 +48,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void CreateInstrumentationCache(size_t compile_threshold,
size_t warmup_threshold,
- size_t osr_threshold);
+ size_t osr_threshold,
+ uint16_t priority_thread_weight);
void CreateThreadPool();
const JitCodeCache* GetCodeCache() const {
return code_cache_.get();
@@ -101,6 +102,9 @@
// Return whether we can invoke JIT code for `method`.
bool CanInvokeCompiledCode(ArtMethod* method);
+ // Return whether the runtime should use a priority thread weight when sampling.
+ static bool ShouldUsePriorityThreadWeight();
+
// If an OSR compiled version is available for `method`,
// and `dex_pc + dex_pc_offset` is an entry point of that compiled
// version, this method will jump to the compiled code, let it run,
@@ -154,6 +158,9 @@
size_t GetOsrThreshold() const {
return osr_threshold_;
}
+ uint16_t GetPriorityThreadWeight() const {
+ return priority_thread_weight_;
+ }
size_t GetCodeCacheInitialCapacity() const {
return code_cache_initial_capacity_;
}
@@ -187,6 +194,7 @@
size_t compile_threshold_;
size_t warmup_threshold_;
size_t osr_threshold_;
+ uint16_t priority_thread_weight_;
bool dump_info_on_shutdown_;
bool save_profiling_info_;
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index d2180c7..b2c0c20 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -82,10 +82,12 @@
JitInstrumentationCache::JitInstrumentationCache(uint16_t hot_method_threshold,
uint16_t warm_method_threshold,
- uint16_t osr_method_threshold)
+ uint16_t osr_method_threshold,
+ uint16_t priority_thread_weight)
: hot_method_threshold_(hot_method_threshold),
warm_method_threshold_(warm_method_threshold),
osr_method_threshold_(osr_method_threshold),
+ priority_thread_weight_(priority_thread_weight),
listener_(this) {
}
@@ -145,8 +147,13 @@
DCHECK_GT(warm_method_threshold_, 0);
DCHECK_GT(hot_method_threshold_, warm_method_threshold_);
DCHECK_GT(osr_method_threshold_, hot_method_threshold_);
+ DCHECK_GE(priority_thread_weight_, 1);
+ DCHECK_LE(priority_thread_weight_, hot_method_threshold_);
int32_t starting_count = method->GetCounter();
+ if (Jit::ShouldUsePriorityThreadWeight()) {
+ count *= priority_thread_weight_;
+ }
int32_t new_count = starting_count + count; // int32 here to avoid wrap-around;
if (starting_count < warm_method_threshold_) {
if (new_count >= warm_method_threshold_) {
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 7ffd4eb..d0545f8 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -99,7 +99,8 @@
public:
JitInstrumentationCache(uint16_t hot_method_threshold,
uint16_t warm_method_threshold,
- uint16_t osr_method_threshold);
+ uint16_t osr_method_threshold,
+ uint16_t priority_thread_weight);
void AddSamples(Thread* self, ArtMethod* method, uint16_t samples)
SHARED_REQUIRES(Locks::mutator_lock_);
void CreateThreadPool();
@@ -117,6 +118,10 @@
return warm_method_threshold_;
}
+ size_t PriorityThreadWeight() const {
+ return priority_thread_weight_;
+ }
+
// Wait until there is no more pending compilation tasks.
void WaitForCompilationToFinish(Thread* self);
@@ -124,6 +129,7 @@
uint16_t hot_method_threshold_;
uint16_t warm_method_threshold_;
uint16_t osr_method_threshold_;
+ uint16_t priority_thread_weight_;
JitInstrumentationListener listener_;
std::unique_ptr<ThreadPool> thread_pool_;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index a262c7a..6290cb2 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -67,19 +67,10 @@
* at any given time.
*/
-bool (*Monitor::is_sensitive_thread_hook_)() = nullptr;
uint32_t Monitor::lock_profiling_threshold_ = 0;
-bool Monitor::IsSensitiveThread() {
- if (is_sensitive_thread_hook_ != nullptr) {
- return (*is_sensitive_thread_hook_)();
- }
- return false;
-}
-
-void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)()) {
+void Monitor::Init(uint32_t lock_profiling_threshold) {
lock_profiling_threshold_ = lock_profiling_threshold;
- is_sensitive_thread_hook_ = is_sensitive_thread_hook;
}
Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 707d0f1..ae9b3cc 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -55,8 +55,7 @@
~Monitor();
- static bool IsSensitiveThread();
- static void Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)());
+ static void Init(uint32_t lock_profiling_threshold);
// Return the thread id of the lock owner or 0 when there is no owner.
static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
@@ -240,7 +239,6 @@
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
- static bool (*is_sensitive_thread_hook_)();
static uint32_t lock_profiling_threshold_;
Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 82ef2d8..671cb60 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -66,7 +66,7 @@
cp = EventLogWriteString(cp, procName, len);
// Emit the sensitive thread ("main thread") status, 5 bytes.
- cp = EventLogWriteInt(cp, Monitor::IsSensitiveThread());
+ cp = EventLogWriteInt(cp, Thread::IsSensitiveThread());
// Emit self thread name string, <= 37 bytes.
std::string thread_name;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 48c91f6..c8d4291 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -169,6 +169,9 @@
.Define("-Xjitosrthreshold:_")
.WithType<unsigned int>()
.IntoKey(M::JITOsrThreshold)
+ .Define("-Xjitprithreadweight:_")
+ .WithType<unsigned int>()
+ .IntoKey(M::JITPriorityThreadWeight)
.Define("-Xjitsaveprofilinginfo")
.WithValue(true)
.IntoKey(M::JITSaveProfilingInfo)
@@ -702,6 +705,7 @@
UsageMessage(stream, " -Xjitmaxsize:N\n");
UsageMessage(stream, " -Xjitwarmupthreshold:integervalue\n");
UsageMessage(stream, " -Xjitosrthreshold:integervalue\n");
+ UsageMessage(stream, " -Xjitprithreadweight:integervalue\n");
UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0a65b6b..37bb4c1 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -903,8 +903,8 @@
oat_file_manager_ = new OatFileManager;
- Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
- runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
+ Thread::SetSensitiveThreadHook(runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
+ Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold));
boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
@@ -1924,7 +1924,8 @@
if (jit_.get() != nullptr) {
jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold(),
jit_options_->GetWarmupThreshold(),
- jit_options_->GetOsrThreshold());
+ jit_options_->GetOsrThreshold(),
+ jit_options_->GetPriorityThreadWeight());
jit_->CreateThreadPool();
// Notify native debugger about the classes already loaded before the creation of the jit.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 6a50ffa..6433c33 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -71,6 +71,7 @@
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold)
+RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity)
RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 57ccabc..e015833 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -88,6 +88,8 @@
pthread_key_t Thread::pthread_key_self_;
ConditionVariable* Thread::resume_cond_ = nullptr;
const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
+bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
+
static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
// For implicit overflow checks we reserve an extra piece of memory at the bottom
diff --git a/runtime/thread.h b/runtime/thread.h
index 3123c71..2218b5a 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1097,6 +1097,13 @@
return debug_disallow_read_barrier_;
}
+ static bool IsSensitiveThread() {
+ if (is_sensitive_thread_hook_ != nullptr) {
+ return (*is_sensitive_thread_hook_)();
+ }
+ return false;
+ }
+
private:
explicit Thread(bool daemon);
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
@@ -1172,6 +1179,10 @@
ALWAYS_INLINE void PassActiveSuspendBarriers()
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+ static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
+ is_sensitive_thread_hook_ = is_sensitive_thread_hook;
+ }
+
// 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
// change from being Suspended to Runnable without a suspend request occurring.
union PACKED(4) StateAndFlags {
@@ -1214,6 +1225,10 @@
// their suspend count is > 0.
static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
+ // Hook passed by framework which returns true
+ // when StrictMode events are traced for the current thread.
+ static bool (*is_sensitive_thread_hook_)();
+
/***********************************************************************************************/
// Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
// pointer size differences. To encourage shorter encoding, more frequently used values appear