From 8994c180f34edd4133f0ca10170b03fadd081017 Mon Sep 17 00:00:00 2001 From: Jeff Sharkey Date: Fri, 11 Sep 2020 12:07:10 -0600 Subject: Collect Parcel statistics using std::atomics. Instead of using a heavy pthread_mutex_t for updating size and count statistics, this change shifts to using faster std::atomic values, an approach that was already pioneered over in hardware::Parcel. The Parcel benchmarks referenced below are showing a ~10% perf improvement for contended cases. Note that this is in addition to the improvements recently made with the shift to a linked-list pooling design, and removal of advisory native allocation updates. Here's the combined improvements of all three changes together; ~2x throughput for the 1-thread case, and ~9x throughput for the 16-thread case. 1 thread 4 threads 16 threads Combined 50.48% 15.58% 11.41% Bug: 165032569 Test: ./frameworks/base/libs/hwui/tests/scripts/prep_generic.sh little && atest CorePerfTests:android.os.ParcelObtainPerfTest Change-Id: I436e70cdfd06e747d5c8fcc0ddd6ecf92737cf9c Merged-In: I436e70cdfd06e747d5c8fcc0ddd6ecf92737cf9c --- libs/binder/Parcel.cpp | 43 ++++++++++++------------------------------- 1 file changed, 12 insertions(+), 31 deletions(-) diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp index a9c19b3d86..40dd09b180 100644 --- a/libs/binder/Parcel.cpp +++ b/libs/binder/Parcel.cpp @@ -77,9 +77,8 @@ namespace android { // many things compile this into prebuilts on the stack static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120); -static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; -static size_t gParcelGlobalAllocSize = 0; -static size_t gParcelGlobalAllocCount = 0; +static std::atomic gParcelGlobalAllocCount; +static std::atomic gParcelGlobalAllocSize; static size_t gMaxFds = 0; @@ -275,17 +274,11 @@ Parcel::~Parcel() } size_t Parcel::getGlobalAllocSize() { - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - size_t size = gParcelGlobalAllocSize; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); - return size; + return gParcelGlobalAllocSize.load(); } size_t Parcel::getGlobalAllocCount() { - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - size_t count = gParcelGlobalAllocCount; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); - return count; + return gParcelGlobalAllocCount.load(); } const uint8_t* Parcel::data() const @@ -2630,16 +2623,8 @@ void Parcel::freeDataNoInit() releaseObjects(); if (mData) { LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - if (mDataCapacity <= gParcelGlobalAllocSize) { - gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity; - } else { - gParcelGlobalAllocSize = 0; - } - if (gParcelGlobalAllocCount > 0) { - gParcelGlobalAllocCount--; - } - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); + gParcelGlobalAllocSize -= mDataCapacity; + gParcelGlobalAllocCount--; free(mData); } if (mObjects) free(mObjects); @@ -2685,13 +2670,15 @@ status_t Parcel::restartWrite(size_t desired) if (data) { LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - gParcelGlobalAllocSize += desired; - gParcelGlobalAllocSize -= mDataCapacity; + if (mDataCapacity > desired) { + gParcelGlobalAllocSize -= (mDataCapacity - desired); + } else { + gParcelGlobalAllocSize += (desired - mDataCapacity); + } + if (!mData) { gParcelGlobalAllocCount++; } - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mDataCapacity = desired; } @@ -2779,10 +2766,8 @@ status_t Parcel::continueWrite(size_t desired) mOwner = nullptr; LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); gParcelGlobalAllocSize += desired; gParcelGlobalAllocCount++; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mObjects = objects; @@ -2830,10 +2815,8 @@ status_t Parcel::continueWrite(size_t desired) if (data) { LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); gParcelGlobalAllocSize += desired; gParcelGlobalAllocSize -= mDataCapacity; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mDataCapacity = desired; } else { @@ -2865,10 +2848,8 @@ status_t Parcel::continueWrite(size_t desired) } LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); gParcelGlobalAllocSize += desired; gParcelGlobalAllocCount++; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mDataSize = mDataPos = 0; -- cgit v1.2.3-59-g8ed1b