summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2016-01-28 17:19:15 -0800
committer Hans Boehm <hboehm@google.com> 2016-01-28 17:19:15 -0800
commitb0171b9573c446724c10c86d41887d0133590b6c (patch)
tree6c75d0329a34bcde360dd5fa78d989a0023147af
parent4047c5b3b00f015b81cb52da0cda545d6a3820c8 (diff)
Do not use atomic increment in allocation as fence.
A sequentially consistent fetch_and_add implemented with ARM v8 acquire release operations is not a fence. Don't use it as one. The result may also be somewhat faster, since a sequentially consistent increment requires more fencing than needed for the increment. Bug: 16377103 Change-Id: I5b1add098d3488aa755f140612e54521b80aa749
-rw-r--r--runtime/atomic.h4
-rw-r--r--runtime/gc/heap-inl.h4
2 files changed, 6 insertions, 2 deletions
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 0faa3c69c6..d4a7f37bc6 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -278,6 +278,10 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value.
}
+ T FetchAndAddRelaxed(const T value) {
+ return this->fetch_add(value, std::memory_order_relaxed); // Return old_value.
+ }
+
T FetchAndSubSequentiallyConsistent(const T value) {
return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index d1ab587aea..f437830e11 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -145,9 +145,9 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
+ QuasiAtomic::ThreadFenceForConstructor();
new_num_bytes_allocated = static_cast<size_t>(
- num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_tl_bulk_allocated))
- + bytes_tl_bulk_allocated;
+ num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated)) + bytes_tl_bulk_allocated;
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
CHECK_LE(obj->SizeOf(), usable_size);