summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-11-09 17:18:05 +0000
committer Vladimir Marko <vmarko@google.com> 2017-11-14 11:40:19 +0000
commit103fa6ed700f4efca4ccf17eeef3fef231ceb7b5 (patch)
tree6642f46c648c795bd3f6097e9503bbdba9f9a0fe
parentf4580f43bcb74061ba8b7847ec65f2fe160f1751 (diff)
ART: Destroy unprocessed tasks in TaskProcessor.
In the past I've seen valgrind complain about leaked tasks in the TaskProcessor, so make sure we clean up properly. Also hold the mutex and condition variable directly in the object instead of allocating separate objects on the heap. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Change-Id: If0d4ac39bf3d8e2aa9b465186d4fa7c3cb746718
-rw-r--r--runtime/gc/task_processor.cc39
-rw-r--r--runtime/gc/task_processor.h20
2 files changed, 32 insertions, 27 deletions
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index e928644054..64e8322a0d 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -23,32 +23,37 @@ namespace art {
namespace gc {
TaskProcessor::TaskProcessor()
- : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false),
+ : lock_("Task processor lock", kReferenceProcessorLock),
+ cond_("Task processor condition", lock_),
+ is_running_(false),
running_thread_(nullptr) {
- // Piggyback off the reference processor lock level.
- cond_.reset(new ConditionVariable("Task processor condition", *lock_));
}
TaskProcessor::~TaskProcessor() {
- delete lock_;
+ if (!tasks_.empty()) {
+ LOG(WARNING) << "TaskProcessor: Finalizing " << tasks_.size() << " unprocessed tasks.";
+ for (HeapTask* task : tasks_) {
+ task->Finalize();
+ }
+ }
}
void TaskProcessor::AddTask(Thread* self, HeapTask* task) {
ScopedThreadStateChange tsc(self, kWaitingForTaskProcessor);
- MutexLock mu(self, *lock_);
+ MutexLock mu(self, lock_);
tasks_.insert(task);
- cond_->Signal(self);
+ cond_.Signal(self);
}
HeapTask* TaskProcessor::GetTask(Thread* self) {
ScopedThreadStateChange tsc(self, kWaitingForTaskProcessor);
- MutexLock mu(self, *lock_);
+ MutexLock mu(self, lock_);
while (true) {
if (tasks_.empty()) {
if (!is_running_) {
return nullptr;
}
- cond_->Wait(self); // Empty queue, wait until we are signalled.
+ cond_.Wait(self); // Empty queue, wait until we are signalled.
} else {
// Non empty queue, look at the top element and see if we are ready to run it.
const uint64_t current_time = NanoTime();
@@ -61,18 +66,18 @@ HeapTask* TaskProcessor::GetTask(Thread* self) {
return task;
}
DCHECK_GT(target_time, current_time);
- // Wait untl we hit the target run time.
+ // Wait until we hit the target run time.
const uint64_t delta_time = target_time - current_time;
const uint64_t ms_delta = NsToMs(delta_time);
const uint64_t ns_delta = delta_time - MsToNs(ms_delta);
- cond_->TimedWait(self, static_cast<int64_t>(ms_delta), static_cast<int32_t>(ns_delta));
+ cond_.TimedWait(self, static_cast<int64_t>(ms_delta), static_cast<int32_t>(ns_delta));
}
}
UNREACHABLE();
}
void TaskProcessor::UpdateTargetRunTime(Thread* self, HeapTask* task, uint64_t new_target_time) {
- MutexLock mu(self, *lock_);
+ MutexLock mu(self, lock_);
// Find the task.
auto range = tasks_.equal_range(task);
for (auto it = range.first; it != range.second; ++it) {
@@ -85,7 +90,7 @@ void TaskProcessor::UpdateTargetRunTime(Thread* self, HeapTask* task, uint64_t n
// If we became the first task then we may need to signal since we changed the task that we
// are sleeping on.
if (*tasks_.begin() == task) {
- cond_->Signal(self);
+ cond_.Signal(self);
}
return;
}
@@ -94,24 +99,24 @@ void TaskProcessor::UpdateTargetRunTime(Thread* self, HeapTask* task, uint64_t n
}
bool TaskProcessor::IsRunning() const {
- MutexLock mu(Thread::Current(), *lock_);
+ MutexLock mu(Thread::Current(), lock_);
return is_running_;
}
Thread* TaskProcessor::GetRunningThread() const {
- MutexLock mu(Thread::Current(), *lock_);
+ MutexLock mu(Thread::Current(), lock_);
return running_thread_;
}
void TaskProcessor::Stop(Thread* self) {
- MutexLock mu(self, *lock_);
+ MutexLock mu(self, lock_);
is_running_ = false;
running_thread_ = nullptr;
- cond_->Broadcast(self);
+ cond_.Broadcast(self);
}
void TaskProcessor::Start(Thread* self) {
- MutexLock mu(self, *lock_);
+ MutexLock mu(self, lock_);
is_running_ = true;
running_thread_ = self;
}
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index e40fa06319..f6b5607037 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -54,17 +54,17 @@ class TaskProcessor {
public:
TaskProcessor();
virtual ~TaskProcessor();
- void AddTask(Thread* self, HeapTask* task) REQUIRES(!*lock_);
- HeapTask* GetTask(Thread* self) REQUIRES(!*lock_);
- void Start(Thread* self) REQUIRES(!*lock_);
+ void AddTask(Thread* self, HeapTask* task) REQUIRES(!lock_);
+ HeapTask* GetTask(Thread* self) REQUIRES(!lock_);
+ void Start(Thread* self) REQUIRES(!lock_);
// Stop tells the RunAllTasks to finish up the remaining tasks as soon as
// possible then return.
- void Stop(Thread* self) REQUIRES(!*lock_);
- void RunAllTasks(Thread* self) REQUIRES(!*lock_);
- bool IsRunning() const REQUIRES(!*lock_);
+ void Stop(Thread* self) REQUIRES(!lock_);
+ void RunAllTasks(Thread* self) REQUIRES(!lock_);
+ bool IsRunning() const REQUIRES(!lock_);
void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
- REQUIRES(!*lock_);
- Thread* GetRunningThread() const REQUIRES(!*lock_);
+ REQUIRES(!lock_);
+ Thread* GetRunningThread() const REQUIRES(!lock_);
private:
class CompareByTargetRunTime {
@@ -74,9 +74,9 @@ class TaskProcessor {
}
};
- mutable Mutex* lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ConditionVariable cond_ GUARDED_BY(lock_);
bool is_running_ GUARDED_BY(lock_);
- std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
Thread* running_thread_ GUARDED_BY(lock_);