diff options
| author | 2020-09-11 12:07:10 -0600 | |
|---|---|---|
| committer | 2020-10-29 20:00:09 +0000 | |
| commit | 8994c180f34edd4133f0ca10170b03fadd081017 (patch) | |
| tree | 334ed5293a6489cd88baf30ba564007f633fe655 | |
| parent | 3d67213a5af32a14398d943a8a6fae11c9a5dfd5 (diff) | |
Collect Parcel statistics using std::atomics.
Instead of using a heavy pthread_mutex_t for updating size and count
statistics, this change shifts to using faster std::atomic values,
an approach that was already pioneered over in hardware::Parcel.
The Parcel benchmarks referenced below are showing a ~10% perf
improvement for contended cases. Note that this is in addition to
the improvements recently made with the shift to a linked-list
pooling design, and removal of advisory native allocation updates.
Here's the combined improvements of all three changes together;
~2x throughput for the 1-thread case, and ~9x throughput for the
16-thread case.
1 thread 4 threads 16 threads
Combined 50.48% 15.58% 11.41%
Bug: 165032569
Test: ./frameworks/base/libs/hwui/tests/scripts/prep_generic.sh little && atest CorePerfTests:android.os.ParcelObtainPerfTest
Change-Id: I436e70cdfd06e747d5c8fcc0ddd6ecf92737cf9c
Merged-In: I436e70cdfd06e747d5c8fcc0ddd6ecf92737cf9c
| -rw-r--r-- | libs/binder/Parcel.cpp | 43 |
1 files changed, 12 insertions, 31 deletions
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp index a9c19b3d86..40dd09b180 100644 --- a/libs/binder/Parcel.cpp +++ b/libs/binder/Parcel.cpp @@ -77,9 +77,8 @@ namespace android { // many things compile this into prebuilts on the stack static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120); -static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; -static size_t gParcelGlobalAllocSize = 0; -static size_t gParcelGlobalAllocCount = 0; +static std::atomic<size_t> gParcelGlobalAllocCount; +static std::atomic<size_t> gParcelGlobalAllocSize; static size_t gMaxFds = 0; @@ -275,17 +274,11 @@ Parcel::~Parcel() } size_t Parcel::getGlobalAllocSize() { - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - size_t size = gParcelGlobalAllocSize; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); - return size; + return gParcelGlobalAllocSize.load(); } size_t Parcel::getGlobalAllocCount() { - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - size_t count = gParcelGlobalAllocCount; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); - return count; + return gParcelGlobalAllocCount.load(); } const uint8_t* Parcel::data() const @@ -2630,16 +2623,8 @@ void Parcel::freeDataNoInit() releaseObjects(); if (mData) { LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - if (mDataCapacity <= gParcelGlobalAllocSize) { - gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity; - } else { - gParcelGlobalAllocSize = 0; - } - if (gParcelGlobalAllocCount > 0) { - gParcelGlobalAllocCount--; - } - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); + gParcelGlobalAllocSize -= mDataCapacity; + gParcelGlobalAllocCount--; free(mData); } if (mObjects) free(mObjects); @@ -2685,13 +2670,15 @@ status_t Parcel::restartWrite(size_t desired) if (data) { LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); - gParcelGlobalAllocSize += desired; - gParcelGlobalAllocSize -= mDataCapacity; + if (mDataCapacity > desired) { + gParcelGlobalAllocSize -= (mDataCapacity - desired); + } else { + gParcelGlobalAllocSize += (desired - mDataCapacity); + } + if (!mData) { gParcelGlobalAllocCount++; } - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mDataCapacity = desired; } @@ -2779,10 +2766,8 @@ status_t Parcel::continueWrite(size_t desired) mOwner = nullptr; LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); gParcelGlobalAllocSize += desired; gParcelGlobalAllocCount++; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mObjects = objects; @@ -2830,10 +2815,8 @@ status_t Parcel::continueWrite(size_t desired) if (data) { LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); gParcelGlobalAllocSize += desired; gParcelGlobalAllocSize -= mDataCapacity; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mDataCapacity = desired; } else { @@ -2865,10 +2848,8 @@ status_t Parcel::continueWrite(size_t desired) } LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); - pthread_mutex_lock(&gParcelGlobalAllocSizeLock); gParcelGlobalAllocSize += desired; gParcelGlobalAllocCount++; - pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); mData = data; mDataSize = mDataPos = 0; |