From 5ae62560923bb111d3092d96a4ceb8cf3b8965d3 Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Thu, 10 Jun 2021 03:21:42 +0000 Subject: libbinder: RpcState pass connection, not fd In preparation for better nested commands logic, which requires RpcState to keep track of when transactions can be nested. Bug: 167966510 Test: binderRpcTest Change-Id: Ib1328136bf706c069e0b3c1b8e7c3416d4ff32a7 --- libs/binder/RpcSession.cpp | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'libs/binder/RpcSession.cpp') diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp index a759ae36c3..b2d1a1a869 100644 --- a/libs/binder/RpcSession.cpp +++ b/libs/binder/RpcSession.cpp @@ -108,7 +108,7 @@ sp RpcSession::getRootObject() { status_t status = ExclusiveConnection::find(sp::fromExisting(this), ConnectionUse::CLIENT, &connection); if (status != OK) return nullptr; - return state()->getRootObject(connection.fd(), sp::fromExisting(this)); + return state()->getRootObject(connection.get(), sp::fromExisting(this)); } status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) { @@ -116,7 +116,7 @@ status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) { status_t status = ExclusiveConnection::find(sp::fromExisting(this), ConnectionUse::CLIENT, &connection); if (status != OK) return status; - return state()->getMaxThreads(connection.fd(), sp::fromExisting(this), maxThreads); + return state()->getMaxThreads(connection.get(), sp::fromExisting(this), maxThreads); } bool RpcSession::shutdownAndWait(bool wait) { @@ -146,7 +146,7 @@ status_t RpcSession::transact(const sp& binder, uint32_t code, const Pa : ConnectionUse::CLIENT, &connection); if (status != OK) return status; - return state()->transact(connection.fd(), binder, code, data, + return state()->transact(connection.get(), binder, code, data, sp::fromExisting(this), reply, flags); } @@ -155,7 +155,7 @@ status_t RpcSession::sendDecStrong(const RpcAddress& address) { status_t status = ExclusiveConnection::find(sp::fromExisting(this), ConnectionUse::CLIENT_REFCOUNT, &connection); if (status != OK) return status; - return state()->sendDecStrong(connection.fd(), sp::fromExisting(this), address); + return state()->sendDecStrong(connection.get(), sp::fromExisting(this), address); } std::unique_ptr RpcSession::FdTrigger::make() { @@ -225,7 +225,7 @@ status_t RpcSession::readId() { ConnectionUse::CLIENT, &connection); if (status != OK) return status; - status = state()->getSessionId(connection.fd(), sp::fromExisting(this), &id); + status = state()->getSessionId(connection.get(), sp::fromExisting(this), &id); if (status != OK) return status; LOG_RPC_DETAIL("RpcSession %p has id %d", this, id); @@ -265,8 +265,7 @@ RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(base::unique_fd fd) { // be able to do nested calls (we can't only read from it) sp connection = assignServerToThisThread(std::move(fd)); - status_t status = - mState->readConnectionInit(connection->fd, sp::fromExisting(this)); + status_t status = mState->readConnectionInit(connection, sp::fromExisting(this)); return PreJoinSetupResult{ .connection = std::move(connection), @@ -279,7 +278,7 @@ void RpcSession::join(sp&& session, PreJoinSetupResult&& setupResult if (setupResult.status == OK) { while (true) { - status_t status = session->state()->getAndExecuteCommand(connection->fd, session, + status_t status = session->state()->getAndExecuteCommand(connection, session, RpcState::CommandType::ANY); if (status != OK) { LOG_RPC_DETAIL("Binder connection thread closing w/ status %s", @@ -454,8 +453,7 @@ bool RpcSession::addClientConnection(unique_fd fd) { mClientConnections.push_back(connection); } - status_t status = - mState->sendConnectionInit(connection->fd, sp::fromExisting(this)); + status_t status = mState->sendConnectionInit(connection, sp::fromExisting(this)); { std::lock_guard _l(mMutex); -- cgit v1.2.3-59-g8ed1b From c7d40135ed5fb30434c95f3058324b0af6916711 Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Thu, 10 Jun 2021 03:42:11 +0000 Subject: libbinder: RPC disallow nested oneway transactions Previously, nested transactions were accidentally allowed while processing oneway transactions. This changes things so that nested transactions are only explicitly allowed when a synchronous transaction is being processed (like how kernel binder is). Future considerations: this CL makes it more explicit that we allow refcount transactions as part of nested transactions. This is okay because 'drainCommands' will process these, but there might be some delay. We could make refcount behavior nicer if we always preferred using an active threadpool (if one is available) to process them. Bug: 167966510 Test: binderRpcTest Change-Id: Iaeb472896654ff4bcd75b20394f8f3230febaabf --- libs/binder/RpcSession.cpp | 24 +++++++++--- libs/binder/RpcState.cpp | 10 ++++- libs/binder/include/binder/RpcSession.h | 2 + libs/binder/tests/IBinderRpcTest.aidl | 1 + libs/binder/tests/binderRpcTest.cpp | 69 ++++++++++++++++++++------------- 5 files changed, 73 insertions(+), 33 deletions(-) (limited to 'libs/binder/RpcSession.cpp') diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp index b2d1a1a869..4a6362a1ab 100644 --- a/libs/binder/RpcSession.cpp +++ b/libs/binder/RpcSession.cpp @@ -541,13 +541,27 @@ status_t RpcSession::ExclusiveConnection::find(const sp& session, Co (session->mClientConnectionsOffset + 1) % session->mClientConnections.size(); } - // USE SERVING SOCKET (for nested transaction) - // - // asynchronous calls cannot be nested + // USE SERVING SOCKET (e.g. nested transaction) if (use != ConnectionUse::CLIENT_ASYNC) { + sp exclusiveServer; // server connections are always assigned to a thread - findConnection(tid, &exclusive, nullptr /*available*/, session->mServerConnections, - 0 /* index hint */); + findConnection(tid, &exclusiveServer, nullptr /*available*/, + session->mServerConnections, 0 /* index hint */); + + // asynchronous calls cannot be nested, we currently allow ref count + // calls to be nested (so that you can use this without having extra + // threads). Note 'drainCommands' is used so that these ref counts can't + // build up. + if (exclusiveServer != nullptr) { + if (exclusiveServer->allowNested) { + // guaranteed to be processed as nested command + exclusive = exclusiveServer; + } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) { + // prefer available socket, but if we don't have one, don't + // wait for one + exclusive = exclusiveServer; + } + } } // if our thread is already using a connection, prioritize using that diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp index 5a2015691f..050f4fbeb3 100644 --- a/libs/binder/RpcState.cpp +++ b/libs/binder/RpcState.cpp @@ -633,6 +633,7 @@ processTransactInternalTailCall: // TODO(b/182939933): heap allocation just for lookup in mNodeForAddress, // maybe add an RpcAddress 'view' if the type remains 'heavy' auto addr = RpcAddress::fromRawEmbedded(&transaction->address); + bool oneway = transaction->flags & IBinder::FLAG_ONEWAY; status_t replyStatus = OK; sp target; @@ -661,7 +662,7 @@ processTransactInternalTailCall: addr.toString().c_str()); (void)session->shutdownAndWait(false); replyStatus = BAD_VALUE; - } else if (transaction->flags & IBinder::FLAG_ONEWAY) { + } else if (oneway) { std::unique_lock _l(mNodeMutex); auto it = mNodeForAddress.find(addr); if (it->second.binder.promote() != target) { @@ -718,7 +719,12 @@ processTransactInternalTailCall: data.markForRpc(session); if (target) { + bool origAllowNested = connection->allowNested; + connection->allowNested = !oneway; + replyStatus = target->transact(transaction->code, data, &reply, transaction->flags); + + connection->allowNested = origAllowNested; } else { LOG_RPC_DETAIL("Got special transaction %u", transaction->code); @@ -754,7 +760,7 @@ processTransactInternalTailCall: } } - if (transaction->flags & IBinder::FLAG_ONEWAY) { + if (oneway) { if (replyStatus != OK) { ALOGW("Oneway call failed with error: %d", replyStatus); } diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h index 27baa9d8d9..e40154bb4a 100644 --- a/libs/binder/include/binder/RpcSession.h +++ b/libs/binder/include/binder/RpcSession.h @@ -191,6 +191,8 @@ private: // whether this or another thread is currently using this fd to make // or receive transactions. std::optional exclusiveTid; + + bool allowNested = false; }; status_t readId(); diff --git a/libs/binder/tests/IBinderRpcTest.aidl b/libs/binder/tests/IBinderRpcTest.aidl index b0c8b2d8b3..9e1078870c 100644 --- a/libs/binder/tests/IBinderRpcTest.aidl +++ b/libs/binder/tests/IBinderRpcTest.aidl @@ -55,6 +55,7 @@ interface IBinderRpcTest { oneway void sleepMsAsync(int ms); void doCallback(IBinderRpcCallback callback, boolean isOneway, boolean delayed, @utf8InCpp String value); + oneway void doCallbackAsync(IBinderRpcCallback callback, boolean isOneway, boolean delayed, @utf8InCpp String value); void die(boolean cleanup); void scheduleShutdown(); diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp index a79295a792..69682b0ae5 100644 --- a/libs/binder/tests/binderRpcTest.cpp +++ b/libs/binder/tests/binderRpcTest.cpp @@ -214,7 +214,8 @@ public: if (delayed) { std::thread([=]() { ALOGE("Executing delayed callback: '%s'", value.c_str()); - (void)doCallback(callback, oneway, false, value); + Status status = doCallback(callback, oneway, false, value); + ALOGE("Delayed callback status: '%s'", status.toString8().c_str()); }).detach(); return Status::ok(); } @@ -226,6 +227,11 @@ public: return callback->sendCallback(value); } + Status doCallbackAsync(const sp& callback, bool oneway, bool delayed, + const std::string& value) override { + return doCallback(callback, oneway, delayed, value); + } + Status die(bool cleanup) override { if (cleanup) { exit(1); @@ -978,31 +984,42 @@ TEST_P(BinderRpc, OnewayCallExhaustion) { TEST_P(BinderRpc, Callbacks) { const static std::string kTestString = "good afternoon!"; - for (bool oneway : {true, false}) { - for (bool delayed : {true, false}) { - auto proc = createRpcTestSocketServerProcess(1, 1, 1); - auto cb = sp::make(); - - EXPECT_OK(proc.rootIface->doCallback(cb, oneway, delayed, kTestString)); - - using std::literals::chrono_literals::operator""s; - std::unique_lock _l(cb->mMutex); - cb->mCv.wait_for(_l, 1s, [&] { return !cb->mValues.empty(); }); - - EXPECT_EQ(cb->mValues.size(), 1) << "oneway: " << oneway << "delayed: " << delayed; - if (cb->mValues.empty()) continue; - EXPECT_EQ(cb->mValues.at(0), kTestString) - << "oneway: " << oneway << "delayed: " << delayed; - - // since we are severing the connection, we need to go ahead and - // tell the server to shutdown and exit so that waitpid won't hang - EXPECT_OK(proc.rootIface->scheduleShutdown()); - - // since this session has a reverse connection w/ a threadpool, we - // need to manually shut it down - EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true)); - - proc.expectAlreadyShutdown = true; + for (bool callIsOneway : {true, false}) { + for (bool callbackIsOneway : {true, false}) { + for (bool delayed : {true, false}) { + auto proc = createRpcTestSocketServerProcess(1, 1, 1); + auto cb = sp::make(); + + if (callIsOneway) { + EXPECT_OK(proc.rootIface->doCallbackAsync(cb, callbackIsOneway, delayed, + kTestString)); + } else { + EXPECT_OK( + proc.rootIface->doCallback(cb, callbackIsOneway, delayed, kTestString)); + } + + using std::literals::chrono_literals::operator""s; + std::unique_lock _l(cb->mMutex); + cb->mCv.wait_for(_l, 1s, [&] { return !cb->mValues.empty(); }); + + EXPECT_EQ(cb->mValues.size(), 1) + << "callIsOneway: " << callIsOneway + << " callbackIsOneway: " << callbackIsOneway << " delayed: " << delayed; + if (cb->mValues.empty()) continue; + EXPECT_EQ(cb->mValues.at(0), kTestString) + << "callIsOneway: " << callIsOneway + << " callbackIsOneway: " << callbackIsOneway << " delayed: " << delayed; + + // since we are severing the connection, we need to go ahead and + // tell the server to shutdown and exit so that waitpid won't hang + EXPECT_OK(proc.rootIface->scheduleShutdown()); + + // since this session has a reverse connection w/ a threadpool, we + // need to manually shut it down + EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true)); + + proc.expectAlreadyShutdown = true; + } } } } -- cgit v1.2.3-59-g8ed1b From 19fc9f7991f27fd75b6a1d872e91d7377b89401d Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Thu, 10 Jun 2021 03:57:30 +0000 Subject: libbinder: RPC disambiguate server/client Now: - server: RpcServer - client: a client of an RpcServer - incoming: a thread processing commands (either as part of an RpcServer's sessions or back to a client which has a threadpool) - outgoing: a thread for sending commands (either to an RpcServer's sessions or back to a client which has a threadpool) Bug: 167966510 Test: binderRpcTest Change-Id: Iea286ab0ff6f9fb775994247003b8d29c999e10a --- libs/binder/RpcServer.cpp | 6 +-- libs/binder/RpcSession.cpp | 75 +++++++++++++++++---------------- libs/binder/RpcState.cpp | 14 +++--- libs/binder/RpcState.h | 6 +-- libs/binder/RpcWireFormat.h | 2 +- libs/binder/include/binder/RpcServer.h | 4 +- libs/binder/include/binder/RpcSession.h | 28 ++++++------ 7 files changed, 68 insertions(+), 67 deletions(-) (limited to 'libs/binder/RpcSession.cpp') diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp index 60be406f6f..ad377d3737 100644 --- a/libs/binder/RpcServer.cpp +++ b/libs/binder/RpcServer.cpp @@ -301,7 +301,7 @@ void RpcServer::establishConnection(sp&& server, base::unique_fd clie } if (reverse) { - LOG_ALWAYS_FATAL_IF(!session->addClientConnection(std::move(clientFd)), + LOG_ALWAYS_FATAL_IF(!session->addOutgoingConnection(std::move(clientFd)), "server state must already be initialized"); return; } @@ -350,7 +350,7 @@ bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) { return true; } -void RpcServer::onSessionLockedAllServerThreadsEnded(const sp& session) { +void RpcServer::onSessionLockedAllIncomingThreadsEnded(const sp& session) { auto id = session->mId; LOG_ALWAYS_FATAL_IF(id == std::nullopt, "Server sessions must be initialized with ID"); LOG_RPC_DETAIL("Dropping session %d", *id); @@ -362,7 +362,7 @@ void RpcServer::onSessionLockedAllServerThreadsEnded(const sp& sessi (void)mSessions.erase(it); } -void RpcServer::onSessionServerThreadEnded() { +void RpcServer::onSessionIncomingThreadEnded() { mShutdownCv.notify_all(); } diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp index 4a6362a1ab..3dbd11fd37 100644 --- a/libs/binder/RpcSession.cpp +++ b/libs/binder/RpcSession.cpp @@ -51,7 +51,7 @@ RpcSession::~RpcSession() { LOG_RPC_DETAIL("RpcSession destroyed %p", this); std::lock_guard _l(mMutex); - LOG_ALWAYS_FATAL_IF(mServerConnections.size() != 0, + LOG_ALWAYS_FATAL_IF(mIncomingConnections.size() != 0, "Should not be able to destroy a session with servers in use."); } @@ -61,10 +61,10 @@ sp RpcSession::make() { void RpcSession::setMaxThreads(size_t threads) { std::lock_guard _l(mMutex); - LOG_ALWAYS_FATAL_IF(!mClientConnections.empty() || !mServerConnections.empty(), + LOG_ALWAYS_FATAL_IF(!mOutgoingConnections.empty() || !mIncomingConnections.empty(), "Must set max threads before setting up connections, but has %zu client(s) " "and %zu server(s)", - mClientConnections.size(), mServerConnections.size()); + mOutgoingConnections.size(), mIncomingConnections.size()); mMaxThreads = threads; } @@ -100,7 +100,7 @@ bool RpcSession::addNullDebuggingClient() { return false; } - return addClientConnection(std::move(serverFd)); + return addOutgoingConnection(std::move(serverFd)); } sp RpcSession::getRootObject() { @@ -233,13 +233,13 @@ status_t RpcSession::readId() { return OK; } -void RpcSession::WaitForShutdownListener::onSessionLockedAllServerThreadsEnded( +void RpcSession::WaitForShutdownListener::onSessionLockedAllIncomingThreadsEnded( const sp& session) { (void)session; mShutdown = true; } -void RpcSession::WaitForShutdownListener::onSessionServerThreadEnded() { +void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() { mCv.notify_all(); } @@ -263,7 +263,7 @@ void RpcSession::preJoinThreadOwnership(std::thread thread) { RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(base::unique_fd fd) { // must be registered to allow arbitrary client code executing commands to // be able to do nested calls (we can't only read from it) - sp connection = assignServerToThisThread(std::move(fd)); + sp connection = assignIncomingConnectionToThisThread(std::move(fd)); status_t status = mState->readConnectionInit(connection, sp::fromExisting(this)); @@ -291,7 +291,7 @@ void RpcSession::join(sp&& session, PreJoinSetupResult&& setupResult statusToString(setupResult.status).c_str()); } - LOG_ALWAYS_FATAL_IF(!session->removeServerConnection(connection), + LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection), "bad state: connection object guaranteed to be in list"); sp listener; @@ -308,7 +308,7 @@ void RpcSession::join(sp&& session, PreJoinSetupResult&& setupResult session = nullptr; if (listener != nullptr) { - listener->onSessionServerThreadEnded(); + listener->onSessionIncomingThreadEnded(); } } @@ -319,9 +319,9 @@ wp RpcSession::server() { bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) { { std::lock_guard _l(mMutex); - LOG_ALWAYS_FATAL_IF(mClientConnections.size() != 0, + LOG_ALWAYS_FATAL_IF(mOutgoingConnections.size() != 0, "Must only setup session once, but already has %zu clients", - mClientConnections.size()); + mOutgoingConnections.size()); } if (!setupOneSocketConnection(addr, RPC_SESSION_ID_NEW, false /*reverse*/)) return false; @@ -427,7 +427,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, int32_t LOG_ALWAYS_FATAL_IF(!ownershipTransferred); return true; } else { - return addClientConnection(std::move(serverFd)); + return addOutgoingConnection(std::move(serverFd)); } } @@ -435,7 +435,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, int32_t return false; } -bool RpcSession::addClientConnection(unique_fd fd) { +bool RpcSession::addOutgoingConnection(unique_fd fd) { sp connection = sp::make(); { std::lock_guard _l(mMutex); @@ -450,7 +450,7 @@ bool RpcSession::addClientConnection(unique_fd fd) { connection->fd = std::move(fd); connection->exclusiveTid = gettid(); - mClientConnections.push_back(connection); + mOutgoingConnections.push_back(connection); } status_t status = mState->sendConnectionInit(connection, sp::fromExisting(this)); @@ -480,25 +480,26 @@ bool RpcSession::setForServer(const wp& server, const wp RpcSession::assignServerToThisThread(unique_fd fd) { +sp RpcSession::assignIncomingConnectionToThisThread(unique_fd fd) { std::lock_guard _l(mMutex); sp session = sp::make(); session->fd = std::move(fd); session->exclusiveTid = gettid(); - mServerConnections.push_back(session); + mIncomingConnections.push_back(session); return session; } -bool RpcSession::removeServerConnection(const sp& connection) { +bool RpcSession::removeIncomingConnection(const sp& connection) { std::lock_guard _l(mMutex); - if (auto it = std::find(mServerConnections.begin(), mServerConnections.end(), connection); - it != mServerConnections.end()) { - mServerConnections.erase(it); - if (mServerConnections.size() == 0) { + if (auto it = std::find(mIncomingConnections.begin(), mIncomingConnections.end(), connection); + it != mIncomingConnections.end()) { + mIncomingConnections.erase(it); + if (mIncomingConnections.size() == 0) { sp listener = mEventListener.promote(); if (listener) { - listener->onSessionLockedAllServerThreadsEnded(sp::fromExisting(this)); + listener->onSessionLockedAllIncomingThreadsEnded( + sp::fromExisting(this)); } } return true; @@ -523,11 +524,11 @@ status_t RpcSession::ExclusiveConnection::find(const sp& session, Co // CHECK FOR DEDICATED CLIENT SOCKET // // A server/looper should always use a dedicated connection if available - findConnection(tid, &exclusive, &available, session->mClientConnections, - session->mClientConnectionsOffset); + findConnection(tid, &exclusive, &available, session->mOutgoingConnections, + session->mOutgoingConnectionsOffset); // WARNING: this assumes a server cannot request its client to send - // a transaction, as mServerConnections is excluded below. + // a transaction, as mIncomingConnections is excluded below. // // Imagine we have more than one thread in play, and a single thread // sends a synchronous, then an asynchronous command. Imagine the @@ -537,29 +538,29 @@ status_t RpcSession::ExclusiveConnection::find(const sp& session, Co // command. So, we move to considering the second available thread // for subsequent calls. if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) { - session->mClientConnectionsOffset = - (session->mClientConnectionsOffset + 1) % session->mClientConnections.size(); + session->mOutgoingConnectionsOffset = (session->mOutgoingConnectionsOffset + 1) % + session->mOutgoingConnections.size(); } // USE SERVING SOCKET (e.g. nested transaction) if (use != ConnectionUse::CLIENT_ASYNC) { - sp exclusiveServer; + sp exclusiveIncoming; // server connections are always assigned to a thread - findConnection(tid, &exclusiveServer, nullptr /*available*/, - session->mServerConnections, 0 /* index hint */); + findConnection(tid, &exclusiveIncoming, nullptr /*available*/, + session->mIncomingConnections, 0 /* index hint */); // asynchronous calls cannot be nested, we currently allow ref count // calls to be nested (so that you can use this without having extra // threads). Note 'drainCommands' is used so that these ref counts can't // build up. - if (exclusiveServer != nullptr) { - if (exclusiveServer->allowNested) { + if (exclusiveIncoming != nullptr) { + if (exclusiveIncoming->allowNested) { // guaranteed to be processed as nested command - exclusive = exclusiveServer; + exclusive = exclusiveIncoming; } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) { // prefer available socket, but if we don't have one, don't // wait for one - exclusive = exclusiveServer; + exclusive = exclusiveIncoming; } } } @@ -575,16 +576,16 @@ status_t RpcSession::ExclusiveConnection::find(const sp& session, Co break; } - if (session->mClientConnections.size() == 0) { + if (session->mOutgoingConnections.size() == 0) { ALOGE("Session has no client connections. This is required for an RPC server to make " "any non-nested (e.g. oneway or on another thread) calls. Use: %d. Server " "connections: %zu", - static_cast(use), session->mServerConnections.size()); + static_cast(use), session->mIncomingConnections.size()); return WOULD_BLOCK; } LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...", - session->mClientConnections.size(), session->mServerConnections.size()); + session->mOutgoingConnections.size(), session->mIncomingConnections.size()); session->mAvailableConnectionCv.wait(_l); } session->mWaitingThreads--; diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp index 050f4fbeb3..8dd6dafd60 100644 --- a/libs/binder/RpcState.cpp +++ b/libs/binder/RpcState.cpp @@ -272,7 +272,7 @@ status_t RpcState::rpcRec(const sp& connection, status_t RpcState::sendConnectionInit(const sp& connection, const sp& session) { - RpcClientConnectionInit init{ + RpcOutgoingConnectionInit init{ .msg = RPC_CONNECTION_INIT_OKAY, }; return rpcSend(connection, session, "connection init", &init, sizeof(init)); @@ -280,7 +280,7 @@ status_t RpcState::sendConnectionInit(const sp& conne status_t RpcState::readConnectionInit(const sp& connection, const sp& session) { - RpcClientConnectionInit init; + RpcOutgoingConnectionInit init; if (status_t status = rpcRec(connection, session, "connection init", &init, sizeof(init)); status != OK) return status; @@ -470,7 +470,7 @@ status_t RpcState::waitForReply(const sp& connection, if (command.command == RPC_COMMAND_REPLY) break; - if (status_t status = processServerCommand(connection, session, command, CommandType::ANY); + if (status_t status = processCommand(connection, session, command, CommandType::ANY); status != OK) return status; } @@ -539,7 +539,7 @@ status_t RpcState::getAndExecuteCommand(const sp& con status != OK) return status; - return processServerCommand(connection, session, command, type); + return processCommand(connection, session, command, type); } status_t RpcState::drainCommands(const sp& connection, @@ -553,9 +553,9 @@ status_t RpcState::drainCommands(const sp& connection return OK; } -status_t RpcState::processServerCommand(const sp& connection, - const sp& session, const RpcWireHeader& command, - CommandType type) { +status_t RpcState::processCommand(const sp& connection, + const sp& session, const RpcWireHeader& command, + CommandType type) { IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull(); IPCThreadState::SpGuard spGuard{ .address = __builtin_frame_address(0), diff --git a/libs/binder/RpcState.h b/libs/binder/RpcState.h index 0dcbb22755..d306595d83 100644 --- a/libs/binder/RpcState.h +++ b/libs/binder/RpcState.h @@ -145,9 +145,9 @@ private: [[nodiscard]] status_t waitForReply(const sp& connection, const sp& session, Parcel* reply); - [[nodiscard]] status_t processServerCommand(const sp& connection, - const sp& session, - const RpcWireHeader& command, CommandType type); + [[nodiscard]] status_t processCommand(const sp& connection, + const sp& session, + const RpcWireHeader& command, CommandType type); [[nodiscard]] status_t processTransact(const sp& connection, const sp& session, const RpcWireHeader& command); diff --git a/libs/binder/RpcWireFormat.h b/libs/binder/RpcWireFormat.h index b5e5bc1e0f..92da856b34 100644 --- a/libs/binder/RpcWireFormat.h +++ b/libs/binder/RpcWireFormat.h @@ -43,7 +43,7 @@ struct RpcConnectionHeader { * transaction. The main use of this is in order to control the timing for when * a reverse connection is setup. */ -struct RpcClientConnectionInit { +struct RpcOutgoingConnectionInit { char msg[4]; uint8_t reserved[4]; }; diff --git a/libs/binder/include/binder/RpcServer.h b/libs/binder/include/binder/RpcServer.h index 4e6934b276..fdcb3a88c6 100644 --- a/libs/binder/include/binder/RpcServer.h +++ b/libs/binder/include/binder/RpcServer.h @@ -155,8 +155,8 @@ private: friend sp; RpcServer(); - void onSessionLockedAllServerThreadsEnded(const sp& session) override; - void onSessionServerThreadEnded() override; + void onSessionLockedAllIncomingThreadsEnded(const sp& session) override; + void onSessionIncomingThreadEnded() override; static void establishConnection(sp&& server, base::unique_fd clientFd); bool setupSocketServer(const RpcSocketAddress& address); diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h index e40154bb4a..eaa86dda3e 100644 --- a/libs/binder/include/binder/RpcSession.h +++ b/libs/binder/include/binder/RpcSession.h @@ -170,14 +170,14 @@ private: class EventListener : public virtual RefBase { public: - virtual void onSessionLockedAllServerThreadsEnded(const sp& session) = 0; - virtual void onSessionServerThreadEnded() = 0; + virtual void onSessionLockedAllIncomingThreadsEnded(const sp& session) = 0; + virtual void onSessionIncomingThreadEnded() = 0; }; class WaitForShutdownListener : public EventListener { public: - void onSessionLockedAllServerThreadsEnded(const sp& session) override; - void onSessionServerThreadEnded() override; + void onSessionLockedAllIncomingThreadsEnded(const sp& session) override; + void onSessionIncomingThreadEnded() override; void waitForShutdown(std::unique_lock& lock); private: @@ -219,12 +219,12 @@ private: [[nodiscard]] bool setupSocketClient(const RpcSocketAddress& address); [[nodiscard]] bool setupOneSocketConnection(const RpcSocketAddress& address, int32_t sessionId, bool server); - [[nodiscard]] bool addClientConnection(base::unique_fd fd); + [[nodiscard]] bool addOutgoingConnection(base::unique_fd fd); [[nodiscard]] bool setForServer(const wp& server, const wp& eventListener, int32_t sessionId); - sp assignServerToThisThread(base::unique_fd fd); - [[nodiscard]] bool removeServerConnection(const sp& connection); + sp assignIncomingConnectionToThisThread(base::unique_fd fd); + [[nodiscard]] bool removeIncomingConnection(const sp& connection); enum class ConnectionUse { CLIENT, @@ -256,13 +256,13 @@ private: bool mReentrant = false; }; - // On the other side of a session, for each of mClientConnections here, there should - // be one of mServerConnections on the other side (and vice versa). + // On the other side of a session, for each of mOutgoingConnections here, there should + // be one of mIncomingConnections on the other side (and vice versa). // // For the simplest session, a single server with one client, you would // have: - // - the server has a single 'mServerConnections' and a thread listening on this - // - the client has a single 'mClientConnections' and makes calls to this + // - the server has a single 'mIncomingConnections' and a thread listening on this + // - the client has a single 'mOutgoingConnections' and makes calls to this // - here, when the client makes a call, the server can call back into it // (nested calls), but outside of this, the client will only ever read // calls from the server when it makes a call itself. @@ -288,9 +288,9 @@ private: std::condition_variable mAvailableConnectionCv; // for mWaitingThreads size_t mWaitingThreads = 0; // hint index into clients, ++ when sending an async transaction - size_t mClientConnectionsOffset = 0; - std::vector> mClientConnections; - std::vector> mServerConnections; + size_t mOutgoingConnectionsOffset = 0; + std::vector> mOutgoingConnections; + std::vector> mIncomingConnections; std::map mThreads; }; -- cgit v1.2.3-59-g8ed1b From 7b8bc4c6997c45c8f896e16537f7abc61fe3ac2e Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Thu, 10 Jun 2021 22:50:27 +0000 Subject: libbinder: RpcSession exposes sp This object guarantees a strong pointer IFF it is associated with a server. The wp<> return type here previously was for convenience (idk?) but users of it shouldn't be concerned with the underlying memory situation. Bug: 167966510 Test: binderRpcTest Change-Id: I6578c3a4246e1bd07f7697c11d4b56899b50245b --- libs/binder/RpcSession.cpp | 9 +++++++-- libs/binder/RpcState.cpp | 2 +- libs/binder/include/binder/RpcSession.h | 6 +++++- 3 files changed, 13 insertions(+), 4 deletions(-) (limited to 'libs/binder/RpcSession.cpp') diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp index 3dbd11fd37..f953a05c08 100644 --- a/libs/binder/RpcSession.cpp +++ b/libs/binder/RpcSession.cpp @@ -312,8 +312,13 @@ void RpcSession::join(sp&& session, PreJoinSetupResult&& setupResult } } -wp RpcSession::server() { - return mForServer; +sp RpcSession::server() { + RpcServer* unsafeServer = mForServer.unsafe_get(); + sp server = mForServer.promote(); + + LOG_ALWAYS_FATAL_IF((unsafeServer == nullptr) != (server == nullptr), + "wp<> is to avoid strong cycle only"); + return server; } bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) { diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp index 8dd6dafd60..967610977d 100644 --- a/libs/binder/RpcState.cpp +++ b/libs/binder/RpcState.cpp @@ -741,7 +741,7 @@ processTransactInternalTailCall: break; } default: { - sp server = session->server().promote(); + sp server = session->server(); if (server) { switch (transaction->code) { case RPC_SPECIAL_TRANSACT_GET_ROOT: { diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h index eaa86dda3e..218de205f5 100644 --- a/libs/binder/include/binder/RpcSession.h +++ b/libs/binder/include/binder/RpcSession.h @@ -118,7 +118,11 @@ public: ~RpcSession(); - wp server(); + /** + * Server if this session is created as part of a server (symmetrical to + * client servers). Otherwise, nullptr. + */ + sp server(); // internal only const std::unique_ptr& state() { return mState; } -- cgit v1.2.3-59-g8ed1b From 01a6bad2e1441c4ec89d6157dc663cb43c6d9cf9 Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Fri, 11 Jun 2021 00:59:20 +0000 Subject: libbinder: RPC session ID uses the long binder ID This is 'unguessable' (pending security review and constant time compare). Right now, it's unclear if we'll go with full TLS for on-device communication or use some other authentication scheme. However, this is being used similarly to TLS session tickets. Bug: 167966510 Test: binderRpcTest Change-Id: I4c5edd2de6cc3f6ae37b0815e7f45c7a08bac2b1 --- libs/binder/RpcServer.cpp | 34 +++++++++++++++++++++++---------- libs/binder/RpcSession.cpp | 22 ++++++++++----------- libs/binder/RpcState.cpp | 15 +++++---------- libs/binder/RpcState.h | 2 +- libs/binder/RpcWireFormat.h | 22 ++++++++++----------- libs/binder/include/binder/RpcAddress.h | 6 +----- libs/binder/include/binder/RpcServer.h | 4 ++-- libs/binder/include/binder/RpcSession.h | 9 ++++----- 8 files changed, 58 insertions(+), 56 deletions(-) (limited to 'libs/binder/RpcSession.cpp') diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp index ad377d3737..3599427f5a 100644 --- a/libs/binder/RpcServer.cpp +++ b/libs/binder/RpcServer.cpp @@ -270,14 +270,25 @@ void RpcServer::establishConnection(sp&& server, base::unique_fd clie return; } - if (header.sessionId == RPC_SESSION_ID_NEW) { + RpcAddress sessionId = RpcAddress::fromRawEmbedded(&header.sessionId); + + if (sessionId.isZero()) { if (reverse) { ALOGE("Cannot create a new session with a reverse connection, would leak"); return; } - LOG_ALWAYS_FATAL_IF(server->mSessionIdCounter >= INT32_MAX, "Out of session IDs"); - server->mSessionIdCounter++; + RpcAddress sessionId = RpcAddress::zero(); + size_t tries = 0; + do { + // don't block if there is some entropy issue + if (tries++ > 5) { + ALOGE("Cannot find new address: %s", sessionId.toString().c_str()); + return; + } + + sessionId = RpcAddress::random(true /*forServer*/); + } while (server->mSessions.end() != server->mSessions.find(sessionId)); session = RpcSession::make(); session->setMaxThreads(server->mMaxThreads); @@ -285,16 +296,17 @@ void RpcServer::establishConnection(sp&& server, base::unique_fd clie sp::fromExisting( static_cast( server.get())), - server->mSessionIdCounter)) { + sessionId)) { ALOGE("Failed to attach server to session"); return; } - server->mSessions[server->mSessionIdCounter] = session; + server->mSessions[sessionId] = session; } else { - auto it = server->mSessions.find(header.sessionId); + auto it = server->mSessions.find(sessionId); if (it == server->mSessions.end()) { - ALOGE("Cannot add thread, no record of session with ID %d", header.sessionId); + ALOGE("Cannot add thread, no record of session with ID %s", + sessionId.toString().c_str()); return; } session = it->second; @@ -353,12 +365,14 @@ bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) { void RpcServer::onSessionLockedAllIncomingThreadsEnded(const sp& session) { auto id = session->mId; LOG_ALWAYS_FATAL_IF(id == std::nullopt, "Server sessions must be initialized with ID"); - LOG_RPC_DETAIL("Dropping session %d", *id); + LOG_RPC_DETAIL("Dropping session with address %s", id->toString().c_str()); std::lock_guard _l(mLock); auto it = mSessions.find(*id); - LOG_ALWAYS_FATAL_IF(it == mSessions.end(), "Bad state, unknown session id %d", *id); - LOG_ALWAYS_FATAL_IF(it->second != session, "Bad state, session has id mismatch %d", *id); + LOG_ALWAYS_FATAL_IF(it == mSessions.end(), "Bad state, unknown session id %s", + id->toString().c_str()); + LOG_ALWAYS_FATAL_IF(it->second != session, "Bad state, session has id mismatch %s", + id->toString().c_str()); (void)mSessions.erase(it); } diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp index f953a05c08..931a876ac4 100644 --- a/libs/binder/RpcSession.cpp +++ b/libs/binder/RpcSession.cpp @@ -218,18 +218,17 @@ status_t RpcSession::readId() { LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client."); } - int32_t id; - ExclusiveConnection connection; status_t status = ExclusiveConnection::find(sp::fromExisting(this), ConnectionUse::CLIENT, &connection); if (status != OK) return status; - status = state()->getSessionId(connection.get(), sp::fromExisting(this), &id); + mId = RpcAddress::zero(); + status = state()->getSessionId(connection.get(), sp::fromExisting(this), + &mId.value()); if (status != OK) return status; - LOG_RPC_DETAIL("RpcSession %p has id %d", this, id); - mId = id; + LOG_RPC_DETAIL("RpcSession %p has id %s", this, mId->toString().c_str()); return OK; } @@ -329,7 +328,7 @@ bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) { mOutgoingConnections.size()); } - if (!setupOneSocketConnection(addr, RPC_SESSION_ID_NEW, false /*reverse*/)) return false; + if (!setupOneSocketConnection(addr, RpcAddress::zero(), false /*reverse*/)) return false; // TODO(b/189955605): we should add additional sessions dynamically // instead of all at once. @@ -366,7 +365,8 @@ bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) { return true; } -bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, int32_t id, bool reverse) { +bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const RpcAddress& id, + bool reverse) { for (size_t tries = 0; tries < 5; tries++) { if (tries > 0) usleep(10000); @@ -390,9 +390,9 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, int32_t return false; } - RpcConnectionHeader header{ - .sessionId = id, - }; + RpcConnectionHeader header{.options = 0}; + memcpy(&header.sessionId, &id.viewRawEmbedded(), sizeof(RpcWireAddress)); + if (reverse) header.options |= RPC_CONNECTION_OPTION_REVERSE; if (sizeof(header) != TEMP_FAILURE_RETRY(write(serverFd.get(), &header, sizeof(header)))) { @@ -469,7 +469,7 @@ bool RpcSession::addOutgoingConnection(unique_fd fd) { } bool RpcSession::setForServer(const wp& server, const wp& eventListener, - int32_t sessionId) { + const RpcAddress& sessionId) { LOG_ALWAYS_FATAL_IF(mForServer != nullptr); LOG_ALWAYS_FATAL_IF(server == nullptr); LOG_ALWAYS_FATAL_IF(mEventListener != nullptr); diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp index 15eec20d1d..fd2eff6870 100644 --- a/libs/binder/RpcState.cpp +++ b/libs/binder/RpcState.cpp @@ -369,7 +369,7 @@ status_t RpcState::getMaxThreads(const sp& connection } status_t RpcState::getSessionId(const sp& connection, - const sp& session, int32_t* sessionIdOut) { + const sp& session, RpcAddress* sessionIdOut) { Parcel data; data.markForRpc(session); Parcel reply; @@ -382,12 +382,7 @@ status_t RpcState::getSessionId(const sp& connection, return status; } - int32_t sessionId; - status = reply.readInt32(&sessionId); - if (status != OK) return status; - - *sessionIdOut = sessionId; - return OK; + return sessionIdOut->readFromParcel(reply); } status_t RpcState::transact(const sp& connection, @@ -767,9 +762,9 @@ processTransactInternalTailCall: } case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: { // for client connections, this should always report the value - // originally returned from the server - int32_t id = session->mId.value(); - replyStatus = reply.writeInt32(id); + // originally returned from the server, so this is asserting + // that it exists + replyStatus = session->mId.value().writeToParcel(&reply); break; } default: { diff --git a/libs/binder/RpcState.h b/libs/binder/RpcState.h index d306595d83..529dee534c 100644 --- a/libs/binder/RpcState.h +++ b/libs/binder/RpcState.h @@ -62,7 +62,7 @@ public: status_t getMaxThreads(const sp& connection, const sp& session, size_t* maxThreadsOut); status_t getSessionId(const sp& connection, - const sp& session, int32_t* sessionIdOut); + const sp& session, RpcAddress* sessionIdOut); [[nodiscard]] status_t transact(const sp& connection, const sp& address, uint32_t code, const Parcel& data, diff --git a/libs/binder/RpcWireFormat.h b/libs/binder/RpcWireFormat.h index 4bd8e36aad..2016483138 100644 --- a/libs/binder/RpcWireFormat.h +++ b/libs/binder/RpcWireFormat.h @@ -20,20 +20,26 @@ namespace android { #pragma clang diagnostic push #pragma clang diagnostic error "-Wpadded" -constexpr int32_t RPC_SESSION_ID_NEW = -1; - enum : uint8_t { RPC_CONNECTION_OPTION_REVERSE = 0x1, }; +constexpr uint64_t RPC_WIRE_ADDRESS_OPTION_CREATED = 1 << 0; // distinguish from '0' address +constexpr uint64_t RPC_WIRE_ADDRESS_OPTION_FOR_SERVER = 1 << 1; + +struct RpcWireAddress { + uint64_t options; + uint8_t address[32]; +}; + /** * This is sent to an RpcServer in order to request a new connection is created, * either as part of a new session or an existing session */ struct RpcConnectionHeader { - int32_t sessionId; + RpcWireAddress sessionId; uint8_t options; - uint8_t reserved[3]; + uint8_t reserved[7]; }; #define RPC_CONNECTION_INIT_OKAY "cci" @@ -89,14 +95,6 @@ struct RpcWireHeader { uint32_t reserved[2]; }; -constexpr uint64_t RPC_WIRE_ADDRESS_OPTION_CREATED = 1 << 0; // distinguish from '0' address -constexpr uint64_t RPC_WIRE_ADDRESS_OPTION_FOR_SERVER = 1 << 1; - -struct RpcWireAddress { - uint64_t options; - uint8_t address[32]; -}; - struct RpcWireTransaction { RpcWireAddress address; uint32_t code; diff --git a/libs/binder/include/binder/RpcAddress.h b/libs/binder/include/binder/RpcAddress.h index e856fa94ba..e428908c88 100644 --- a/libs/binder/include/binder/RpcAddress.h +++ b/libs/binder/include/binder/RpcAddress.h @@ -29,11 +29,7 @@ class Parcel; struct RpcWireAddress; /** - * This class represents an identifier of a binder object. - * - * The purpose of this class it to hide the ABI of an RpcWireAddress, and - * potentially allow us to change the size of it in the future (RpcWireAddress - * is PIMPL, essentially - although the type that is used here is not exposed). + * This class represents an identifier across an RPC boundary. */ class RpcAddress { public: diff --git a/libs/binder/include/binder/RpcServer.h b/libs/binder/include/binder/RpcServer.h index fdcb3a88c6..c8d2857347 100644 --- a/libs/binder/include/binder/RpcServer.h +++ b/libs/binder/include/binder/RpcServer.h @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -171,8 +172,7 @@ private: std::map mConnectingThreads; sp mRootObject; wp mRootObjectWeak; - std::map> mSessions; - int32_t mSessionIdCounter = 0; + std::map> mSessions; std::unique_ptr mShutdownTrigger; std::condition_variable mShutdownCv; }; diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h index 218de205f5..1548e763b4 100644 --- a/libs/binder/include/binder/RpcSession.h +++ b/libs/binder/include/binder/RpcSession.h @@ -221,12 +221,12 @@ private: static void join(sp&& session, PreJoinSetupResult&& result); [[nodiscard]] bool setupSocketClient(const RpcSocketAddress& address); - [[nodiscard]] bool setupOneSocketConnection(const RpcSocketAddress& address, int32_t sessionId, - bool server); + [[nodiscard]] bool setupOneSocketConnection(const RpcSocketAddress& address, + const RpcAddress& sessionId, bool server); [[nodiscard]] bool addOutgoingConnection(base::unique_fd fd); [[nodiscard]] bool setForServer(const wp& server, const wp& eventListener, - int32_t sessionId); + const RpcAddress& sessionId); sp assignIncomingConnectionToThisThread(base::unique_fd fd); [[nodiscard]] bool removeIncomingConnection(const sp& connection); @@ -278,8 +278,7 @@ private: sp mShutdownListener; // used for client sessions wp mEventListener; // mForServer if server, mShutdownListener if client - // TODO(b/183988761): this shouldn't be guessable - std::optional mId; + std::optional mId; std::unique_ptr mShutdownTrigger; -- cgit v1.2.3-59-g8ed1b From b86e26b735ee912acf23c670398f2d88fdfe5994 Mon Sep 17 00:00:00 2001 From: Steven Moreland Date: Sat, 12 Jun 2021 00:35:58 +0000 Subject: libbinder: RPC skip init on /dev/null This started breaking the fuzzer, since we can't do a socket operation on /dev/null. Bug: N/A # yet! Test: fuzzer no longer crashes Change-Id: I881f63b85108ff488cb5798b1f0b96629b592329 --- libs/binder/RpcServer.cpp | 2 +- libs/binder/RpcSession.cpp | 11 +++++++---- libs/binder/include/binder/RpcSession.h | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'libs/binder/RpcSession.cpp') diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp index 3599427f5a..a8f3fa8f6f 100644 --- a/libs/binder/RpcServer.cpp +++ b/libs/binder/RpcServer.cpp @@ -313,7 +313,7 @@ void RpcServer::establishConnection(sp&& server, base::unique_fd clie } if (reverse) { - LOG_ALWAYS_FATAL_IF(!session->addOutgoingConnection(std::move(clientFd)), + LOG_ALWAYS_FATAL_IF(!session->addOutgoingConnection(std::move(clientFd), true), "server state must already be initialized"); return; } diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp index 931a876ac4..4f55eef2d1 100644 --- a/libs/binder/RpcSession.cpp +++ b/libs/binder/RpcSession.cpp @@ -100,7 +100,7 @@ bool RpcSession::addNullDebuggingClient() { return false; } - return addOutgoingConnection(std::move(serverFd)); + return addOutgoingConnection(std::move(serverFd), false); } sp RpcSession::getRootObject() { @@ -432,7 +432,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const Rp LOG_ALWAYS_FATAL_IF(!ownershipTransferred); return true; } else { - return addOutgoingConnection(std::move(serverFd)); + return addOutgoingConnection(std::move(serverFd), true); } } @@ -440,7 +440,7 @@ bool RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr, const Rp return false; } -bool RpcSession::addOutgoingConnection(unique_fd fd) { +bool RpcSession::addOutgoingConnection(unique_fd fd, bool init) { sp connection = sp::make(); { std::lock_guard _l(mMutex); @@ -458,7 +458,10 @@ bool RpcSession::addOutgoingConnection(unique_fd fd) { mOutgoingConnections.push_back(connection); } - status_t status = mState->sendConnectionInit(connection, sp::fromExisting(this)); + status_t status = OK; + if (init) { + mState->sendConnectionInit(connection, sp::fromExisting(this)); + } { std::lock_guard _l(mMutex); diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h index 1548e763b4..69c2a1a956 100644 --- a/libs/binder/include/binder/RpcSession.h +++ b/libs/binder/include/binder/RpcSession.h @@ -223,7 +223,7 @@ private: [[nodiscard]] bool setupSocketClient(const RpcSocketAddress& address); [[nodiscard]] bool setupOneSocketConnection(const RpcSocketAddress& address, const RpcAddress& sessionId, bool server); - [[nodiscard]] bool addOutgoingConnection(base::unique_fd fd); + [[nodiscard]] bool addOutgoingConnection(base::unique_fd fd, bool init); [[nodiscard]] bool setForServer(const wp& server, const wp& eventListener, const RpcAddress& sessionId); -- cgit v1.2.3-59-g8ed1b