summaryrefslogtreecommitdiff
path: root/libs/binder/RpcState.cpp
diff options
context:
space:
mode:
author Steven Moreland <smoreland@google.com> 2021-04-24 01:54:26 +0000
committer Steven Moreland <smoreland@google.com> 2021-04-28 17:37:27 +0000
commitf137de90de786d4d794362ef324ae29ef12b0085 (patch)
tree8361674fd2a4cc3edcc7e7a88cd006eaa32469e8 /libs/binder/RpcState.cpp
parent27f2ed6973774978dc123854fb389ad962ec08e5 (diff)
libbinder: finalize connect/server APIs
Before, you needed to manually setup the required number of sockets on the client and server sides of a connection and manually setup threads. Now, you configure the thread count on RpcServer and call join once, and on the client side, you connect once, and the connection figured out how many connections it will make. Now, we will be able to manage how these sockets/threads get setup without affecting any client code in various tests. So, a server looks like this: sp<RpcServer> server = RpcServer::make(); // still until we are ready to open this up server->iUnderstandThisCodeIsExperimentalAndIWillNotUseItInProduction(); server->setMaxThreads(3 /* for example */); // call this for each client (currently this must be setup in // advance) sp<RpcConnection> connection = server->addClientConnection(); // other server types are supported if (!connection->setupInetServer(1234 /*some port*/)) .. error .. // process requests for each client server->join(); And a client looks like this: sp<RpcConnection> connection = RpcConnection::make(); if (!connection->setupInetClient(/*some IP address*/, 1234 /*some port*/)) .. error .. The above code will create 3 threads on the server serving 3 separate socket connections that the client can use to make up to 3 simultaneous sets of syncrhonous calls (this can't be shared because the sockets may be needed for binder socket calls). This means that each address (ip + port) in this case can server a single process. Future considerations: - if we wanted, we could dynamically setup this connection, so that extra threads and sockets are only created as needed. This would be at parity with binder, but also it opens up the possibility for later errors. TODOs are added in the code for this. - a single server should be able to share a threadpool between multiple clients. Currently a new threadpool is created for each client. - new client connections should be able to be setup dynamically. Currently, once the threadpool is started, we don't support making more connections, but we should. Bug: 185167543 Test: binderRpcTest Change-Id: I4c11ab64bf7c1c19ca67f6a1c4be21de52358a5c
Diffstat (limited to 'libs/binder/RpcState.cpp')
-rw-r--r--libs/binder/RpcState.cpp57
1 files changed, 42 insertions, 15 deletions
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index d9341369fa..6bfcc42469 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -248,6 +248,31 @@ sp<IBinder> RpcState::getRootObject(const base::unique_fd& fd,
return reply.readStrongBinder();
}
+status_t RpcState::getMaxThreads(const base::unique_fd& fd, const sp<RpcConnection>& connection,
+ size_t* maxThreads) {
+ Parcel data;
+ data.markForRpc(connection);
+ Parcel reply;
+
+ status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
+ connection, &reply, 0);
+ if (status != OK) {
+ ALOGE("Error getting max threads: %s", statusToString(status).c_str());
+ return status;
+ }
+
+ int32_t threads;
+ status = reply.readInt32(&threads);
+ if (status != OK) return status;
+ if (threads <= 0) {
+ ALOGE("Error invalid max threads: %d", threads);
+ return BAD_VALUE;
+ }
+
+ *maxThreads = threads;
+ return OK;
+}
+
status_t RpcState::transact(const base::unique_fd& fd, const RpcAddress& address, uint32_t code,
const Parcel& data, const sp<RpcConnection>& connection, Parcel* reply,
uint32_t flags) {
@@ -516,23 +541,25 @@ status_t RpcState::processTransactInternal(const base::unique_fd& fd,
replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
} else {
LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
- // special case for 'zero' address (special server commands)
- switch (transaction->code) {
- case RPC_SPECIAL_TRANSACT_GET_ROOT: {
- sp<IBinder> root;
- sp<RpcServer> server = connection->server().promote();
- if (server) {
- root = server->getRootObject();
- } else {
- ALOGE("Root object requested, but no server attached.");
- }
- replyStatus = reply.writeStrongBinder(root);
- break;
- }
- default: {
- replyStatus = UNKNOWN_TRANSACTION;
+ sp<RpcServer> server = connection->server().promote();
+ if (server) {
+ // special case for 'zero' address (special server commands)
+ switch (transaction->code) {
+ case RPC_SPECIAL_TRANSACT_GET_ROOT: {
+ replyStatus = reply.writeStrongBinder(server->getRootObject());
+ break;
+ }
+ case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
+ replyStatus = reply.writeInt32(server->getMaxThreads());
+ break;
+ }
+ default: {
+ replyStatus = UNKNOWN_TRANSACTION;
+ }
}
+ } else {
+ ALOGE("Special command sent, but no server object attached.");
}
}
}