summaryrefslogtreecommitdiff
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/heap-inl.h22
-rw-r--r--runtime/gc/heap.cc58
-rw-r--r--runtime/gc/heap.h2
3 files changed, 69 insertions, 13 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 3101c68599..9d2f6d1238 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -48,11 +48,20 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
}
// Need to check that we arent the large object allocator since the large object allocation code
// path this function. If we didn't check we would have an infinite loop.
+ mirror::Object* obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
- return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
- pre_fence_visitor);
+ obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
+ pre_fence_visitor);
+ if (obj != nullptr) {
+ return obj;
+ } else {
+ // There should be an OOM exception, since we are retrying, clear it.
+ self->ClearException();
+ }
+ // If the large object allocation failed, try to use the normal spaces (main space,
+ // non moving space). This can happen if there is significant virtual address space
+ // fragmentation.
}
- mirror::Object* obj;
AllocationTimer alloc_timer(this, &obj);
size_t bytes_allocated;
size_t usable_size;
@@ -171,10 +180,13 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
}
template <bool kInstrumented, typename PreFenceVisitor>
-inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
+inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor) {
- return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
+ // Save and restore the class in case it moves.
+ StackHandleScope<1> hs(self);
+ auto klass_wrapper = hs.NewHandleWrapper(klass);
+ return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
kAllocatorTypeLOS,
pre_fence_visitor);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0cceaa4467..0fd0a9ff52 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -365,6 +365,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
uint8_t* heap_end = continuous_spaces_.back()->Limit();
size_t heap_capacity = heap_end - heap_begin;
// Remove the main backup space since it slows down the GC to have unused extra spaces.
+ // TODO: Avoid needing to do this.
if (main_space_backup_.get() != nullptr) {
RemoveSpace(main_space_backup_.get());
}
@@ -977,6 +978,22 @@ void Heap::DoPendingTransitionOrTrim() {
Trim();
}
+class TrimIndirectReferenceTableClosure : public Closure {
+ public:
+ explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
+ }
+ virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ ATRACE_BEGIN("Trimming reference table");
+ thread->GetJniEnv()->locals.Trim();
+ ATRACE_END();
+ barrier_->Pass(Thread::Current());
+ }
+
+ private:
+ Barrier* const barrier_;
+};
+
+
void Heap::Trim() {
Thread* self = Thread::Current();
{
@@ -998,6 +1015,19 @@ void Heap::Trim() {
WaitForGcToCompleteLocked(kGcCauseTrim, self);
collector_type_running_ = kCollectorTypeHeapTrim;
}
+ // Trim reference tables.
+ {
+ ScopedObjectAccess soa(self);
+ JavaVMExt* vm = soa.Vm();
+ // Trim globals indirect reference table.
+ vm->TrimGlobals();
+ // Trim locals indirect reference tables.
+ Barrier barrier(0);
+ TrimIndirectReferenceTableClosure closure(&barrier);
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ barrier.Increment(self, barrier_count);
+ }
uint64_t start_ns = NanoTime();
// Trim the managed spaces.
uint64_t total_alloc_space_allocated = 0;
@@ -1571,6 +1601,8 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const uint64_t space_size_before_compaction = from_space->Size();
AddSpace(to_space);
+ // Make sure that we will have enough room to copy.
+ CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
// Leave as prot read so that we can still run ROSAlloc verification on this space.
from_space->GetMemMap()->Protect(PROT_READ);
@@ -1689,8 +1721,8 @@ void Heap::TransitionCollector(CollectorType collector_type) {
RemoveSpace(temp_space_);
temp_space_ = nullptr;
mem_map->Protect(PROT_READ | PROT_WRITE);
- CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
- mem_map->Size());
+ CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
+ std::min(mem_map->Size(), growth_limit_), mem_map->Size());
mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
@@ -1703,9 +1735,9 @@ void Heap::TransitionCollector(CollectorType collector_type) {
if (kIsDebugBuild && kUseRosAlloc) {
mem_map->Protect(PROT_READ | PROT_WRITE);
}
- main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
- mem_map->Size(), mem_map->Size(),
- name, true));
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(
+ mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
+ mem_map->Size(), name, true));
if (kIsDebugBuild && kUseRosAlloc) {
mem_map->Protect(PROT_NONE);
}
@@ -1947,7 +1979,8 @@ void Heap::PreZygoteFork() {
MemMap* mem_map = main_space_->ReleaseMemMap();
RemoveSpace(main_space_);
space::Space* old_main_space = main_space_;
- CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
+ CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
+ mem_map->Size());
delete old_main_space;
AddSpace(main_space_);
} else {
@@ -2959,7 +2992,18 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
void Heap::ClearGrowthLimit() {
growth_limit_ = capacity_;
- non_moving_space_->ClearGrowthLimit();
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsMallocSpace()) {
+ gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
+ malloc_space->ClearGrowthLimit();
+ malloc_space->SetFootprintLimit(malloc_space->Capacity());
+ }
+ }
+ // This space isn't added for performance reasons.
+ if (main_space_backup_.get() != nullptr) {
+ main_space_backup_->ClearGrowthLimit();
+ main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
+ }
}
void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 69a573ef98..4e1a0ff242 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -654,7 +654,7 @@ class Heap {
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
- mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
+ mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);