summaryrefslogtreecommitdiff
path: root/libs/binder/RpcState.cpp
diff options
context:
space:
mode:
author Steven Moreland <smoreland@google.com> 2021-06-04 02:19:37 +0000
committer Steven Moreland <smoreland@google.com> 2021-06-08 02:09:41 +0000
commitd45be62a2bdd204d5d728aaa56653ea5e072cd9c (patch)
tree465335e9616a7c73cf2e05bcc559da6874c0bdd5 /libs/binder/RpcState.cpp
parentc9d7b53c6b7e94592818d2552d57f18ce2722c9b (diff)
libbinder: RPC limit on oneway transactions
Artificial large limits on oneway transactions. Warn after 1000 pending transactions and invalidate the connection after 10000. This is a pretty arbitrary limit, approximating the order of magnitude of average sized binder transactions you can have before filling up the binder buffer (I originally did this analysis for HIDL passthrough mode, which uses 3000). However, I've raised the limit because in this case, we must actually terminate the RPC connection. If this happens, it's already quite a bad situation. Best we can hope for is to reset everything up. Unlike kernel binder, we can't return an error when the queue fills up, since we're processing this on the other side of the transaction. We must also do this type of analysis on the server side, regardless of (potentially better) logic in the client side, so that this code also guards against broken clients. Fixes: 183140903 Test: binderRpcTest Change-Id: Ibe1a854b526a7f862776df36d555cb346f01fde7
Diffstat (limited to 'libs/binder/RpcState.cpp')
-rw-r--r--libs/binder/RpcState.cpp29
1 files changed, 23 insertions, 6 deletions
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 7e731f3426..3113841d27 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -617,7 +617,7 @@ status_t RpcState::processTransactInternal(const base::unique_fd& fd, const sp<R
(void)session->shutdownAndWait(false);
replyStatus = BAD_VALUE;
} else if (transaction->flags & IBinder::FLAG_ONEWAY) {
- std::lock_guard<std::mutex> _l(mNodeMutex);
+ std::unique_lock<std::mutex> _l(mNodeMutex);
auto it = mNodeForAddress.find(addr);
if (it->second.binder.promote() != target) {
ALOGE("Binder became invalid during transaction. Bad client? %s",
@@ -626,16 +626,33 @@ status_t RpcState::processTransactInternal(const base::unique_fd& fd, const sp<R
} else if (transaction->asyncNumber != it->second.asyncNumber) {
// we need to process some other asynchronous transaction
// first
- // TODO(b/183140903): limit enqueues/detect overfill for bad client
- // TODO(b/183140903): detect when an object is deleted when it still has
- // pending async transactions
it->second.asyncTodo.push(BinderNode::AsyncTodo{
.ref = target,
.data = std::move(transactionData),
.asyncNumber = transaction->asyncNumber,
});
- LOG_RPC_DETAIL("Enqueuing %" PRId64 " on %s", transaction->asyncNumber,
- addr.toString().c_str());
+
+ size_t numPending = it->second.asyncTodo.size();
+ LOG_RPC_DETAIL("Enqueuing %" PRId64 " on %s (%zu pending)",
+ transaction->asyncNumber, addr.toString().c_str(), numPending);
+
+ constexpr size_t kArbitraryOnewayCallTerminateLevel = 10000;
+ constexpr size_t kArbitraryOnewayCallWarnLevel = 1000;
+ constexpr size_t kArbitraryOnewayCallWarnPer = 1000;
+
+ if (numPending >= kArbitraryOnewayCallWarnLevel) {
+ if (numPending >= kArbitraryOnewayCallTerminateLevel) {
+ ALOGE("WARNING: %zu pending oneway transactions. Terminating!", numPending);
+ _l.unlock();
+ (void)session->shutdownAndWait(false);
+ return FAILED_TRANSACTION;
+ }
+
+ if (numPending % kArbitraryOnewayCallWarnPer == 0) {
+ ALOGW("Warning: many oneway transactions built up on %p (%zu)",
+ target.get(), numPending);
+ }
+ }
return OK;
}
}