diff options
author | 2021-06-04 02:19:37 +0000 | |
---|---|---|
committer | 2021-06-08 02:09:41 +0000 | |
commit | d45be62a2bdd204d5d728aaa56653ea5e072cd9c (patch) | |
tree | 465335e9616a7c73cf2e05bcc559da6874c0bdd5 | |
parent | c9d7b53c6b7e94592818d2552d57f18ce2722c9b (diff) |
libbinder: RPC limit on oneway transactions
Artificial large limits on oneway transactions. Warn after 1000 pending
transactions and invalidate the connection after 10000. This is a pretty
arbitrary limit, approximating the order of magnitude of average sized
binder transactions you can have before filling up the binder buffer
(I originally did this analysis for HIDL passthrough mode, which uses
3000). However, I've raised the limit because in this case, we must
actually terminate the RPC connection. If this happens, it's already
quite a bad situation. Best we can hope for is to reset everything up.
Unlike kernel binder, we can't return an error when the queue fills up,
since we're processing this on the other side of the transaction. We
must also do this type of analysis on the server side, regardless of
(potentially better) logic in the client side, so that this code also
guards against broken clients.
Fixes: 183140903
Test: binderRpcTest
Change-Id: Ibe1a854b526a7f862776df36d555cb346f01fde7
-rw-r--r-- | libs/binder/RpcState.cpp | 29 | ||||
-rw-r--r-- | libs/binder/tests/binderRpcTest.cpp | 34 |
2 files changed, 57 insertions, 6 deletions
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp index 7e731f3426..3113841d27 100644 --- a/libs/binder/RpcState.cpp +++ b/libs/binder/RpcState.cpp @@ -617,7 +617,7 @@ status_t RpcState::processTransactInternal(const base::unique_fd& fd, const sp<R (void)session->shutdownAndWait(false); replyStatus = BAD_VALUE; } else if (transaction->flags & IBinder::FLAG_ONEWAY) { - std::lock_guard<std::mutex> _l(mNodeMutex); + std::unique_lock<std::mutex> _l(mNodeMutex); auto it = mNodeForAddress.find(addr); if (it->second.binder.promote() != target) { ALOGE("Binder became invalid during transaction. Bad client? %s", @@ -626,16 +626,33 @@ status_t RpcState::processTransactInternal(const base::unique_fd& fd, const sp<R } else if (transaction->asyncNumber != it->second.asyncNumber) { // we need to process some other asynchronous transaction // first - // TODO(b/183140903): limit enqueues/detect overfill for bad client - // TODO(b/183140903): detect when an object is deleted when it still has - // pending async transactions it->second.asyncTodo.push(BinderNode::AsyncTodo{ .ref = target, .data = std::move(transactionData), .asyncNumber = transaction->asyncNumber, }); - LOG_RPC_DETAIL("Enqueuing %" PRId64 " on %s", transaction->asyncNumber, - addr.toString().c_str()); + + size_t numPending = it->second.asyncTodo.size(); + LOG_RPC_DETAIL("Enqueuing %" PRId64 " on %s (%zu pending)", + transaction->asyncNumber, addr.toString().c_str(), numPending); + + constexpr size_t kArbitraryOnewayCallTerminateLevel = 10000; + constexpr size_t kArbitraryOnewayCallWarnLevel = 1000; + constexpr size_t kArbitraryOnewayCallWarnPer = 1000; + + if (numPending >= kArbitraryOnewayCallWarnLevel) { + if (numPending >= kArbitraryOnewayCallTerminateLevel) { + ALOGE("WARNING: %zu pending oneway transactions. Terminating!", numPending); + _l.unlock(); + (void)session->shutdownAndWait(false); + return FAILED_TRANSACTION; + } + + if (numPending % kArbitraryOnewayCallWarnPer == 0) { + ALOGW("Warning: many oneway transactions built up on %p (%zu)", + target.get(), numPending); + } + } return OK; } } diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp index c84666047d..82f8a3e273 100644 --- a/libs/binder/tests/binderRpcTest.cpp +++ b/libs/binder/tests/binderRpcTest.cpp @@ -941,6 +941,40 @@ TEST_P(BinderRpc, OnewayCallQueueing) { for (auto& t : threads) t.join(); } +TEST_P(BinderRpc, OnewayCallExhaustion) { + constexpr size_t kNumClients = 2; + constexpr size_t kTooLongMs = 1000; + + auto proc = createRpcTestSocketServerProcess(kNumClients /*threads*/, 2 /*sessions*/); + + // Build up oneway calls on the second session to make sure it terminates + // and shuts down. The first session should be unaffected (proc destructor + // checks the first session). + auto iface = interface_cast<IBinderRpcTest>(proc.proc.sessions.at(1).root); + + std::vector<std::thread> threads; + for (size_t i = 0; i < kNumClients; i++) { + // one of these threads will get stuck queueing a transaction once the + // socket fills up, the other will be able to fill up transactions on + // this object + threads.push_back(std::thread([&] { + while (iface->sleepMsAsync(kTooLongMs).isOk()) { + } + })); + } + for (auto& t : threads) t.join(); + + Status status = iface->sleepMsAsync(kTooLongMs); + EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status; + + // the second session should be shutdown in the other process by the time we + // are able to join above (it'll only be hung up once it finishes processing + // any pending commands). We need to erase this session from the record + // here, so that the destructor for our session won't check that this + // session is valid, but we still want it to test the other session. + proc.proc.sessions.erase(proc.proc.sessions.begin() + 1); +} + TEST_P(BinderRpc, Callbacks) { const static std::string kTestString = "good afternoon!"; |