xref: /aosp_15_r20/frameworks/native/libs/binder/RpcSession.cpp (revision 38e8c45f13ce32b0dcecb25141ffecaf386fa17f)
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcSession"
18 
19 #include <binder/RpcSession.h>
20 
21 #include <dlfcn.h>
22 #include <inttypes.h>
23 #include <netinet/tcp.h>
24 #include <poll.h>
25 #include <unistd.h>
26 
27 #include <string_view>
28 
29 #include <binder/BpBinder.h>
30 #include <binder/Functional.h>
31 #include <binder/Parcel.h>
32 #include <binder/RpcServer.h>
33 #include <binder/RpcTransportRaw.h>
34 #include <binder/Stability.h>
35 #include <utils/String8.h>
36 
37 #include "BuildFlags.h"
38 #include "FdTrigger.h"
39 #include "OS.h"
40 #include "RpcSocketAddress.h"
41 #include "RpcState.h"
42 #include "RpcTransportUtils.h"
43 #include "RpcWireFormat.h"
44 #include "Utils.h"
45 
46 #if defined(__ANDROID__) && !defined(__ANDROID_RECOVERY__)
47 #include <jni.h>
48 extern "C" JavaVM* AndroidRuntimeGetJavaVM();
49 #endif
50 
51 namespace android {
52 
53 using namespace android::binder::impl;
54 using android::binder::borrowed_fd;
55 using android::binder::unique_fd;
56 
RpcSession(std::unique_ptr<RpcTransportCtx> ctx)57 RpcSession::RpcSession(std::unique_ptr<RpcTransportCtx> ctx) : mCtx(std::move(ctx)) {
58     LOG_RPC_DETAIL("RpcSession created %p", this);
59 
60     mRpcBinderState = std::make_unique<RpcState>();
61 }
~RpcSession()62 RpcSession::~RpcSession() {
63     LOG_RPC_DETAIL("RpcSession destroyed %p", this);
64 
65     RpcMutexLockGuard _l(mMutex);
66     LOG_ALWAYS_FATAL_IF(mConnections.mIncoming.size() != 0,
67                         "Should not be able to destroy a session with servers in use.");
68 }
69 
make()70 sp<RpcSession> RpcSession::make() {
71     // Default is without TLS.
72     return make(binder::os::makeDefaultRpcTransportCtxFactory());
73 }
74 
make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory)75 sp<RpcSession> RpcSession::make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory) {
76     auto ctx = rpcTransportCtxFactory->newClientCtx();
77     if (ctx == nullptr) return nullptr;
78     return sp<RpcSession>::make(std::move(ctx));
79 }
80 
setMaxIncomingThreads(size_t threads)81 void RpcSession::setMaxIncomingThreads(size_t threads) {
82     RpcMutexLockGuard _l(mMutex);
83     LOG_ALWAYS_FATAL_IF(mStartedSetup,
84                         "Must set max incoming threads before setting up connections");
85     mMaxIncomingThreads = threads;
86 }
87 
getMaxIncomingThreads()88 size_t RpcSession::getMaxIncomingThreads() {
89     RpcMutexLockGuard _l(mMutex);
90     return mMaxIncomingThreads;
91 }
92 
setMaxOutgoingConnections(size_t connections)93 void RpcSession::setMaxOutgoingConnections(size_t connections) {
94     RpcMutexLockGuard _l(mMutex);
95     LOG_ALWAYS_FATAL_IF(mStartedSetup,
96                         "Must set max outgoing threads before setting up connections");
97     mMaxOutgoingConnections = connections;
98 }
99 
getMaxOutgoingThreads()100 size_t RpcSession::getMaxOutgoingThreads() {
101     RpcMutexLockGuard _l(mMutex);
102     return mMaxOutgoingConnections;
103 }
104 
setProtocolVersionInternal(uint32_t version,bool checkStarted)105 bool RpcSession::setProtocolVersionInternal(uint32_t version, bool checkStarted) {
106     if (!RpcState::validateProtocolVersion(version)) {
107         return false;
108     }
109 
110     RpcMutexLockGuard _l(mMutex);
111     LOG_ALWAYS_FATAL_IF(checkStarted && mStartedSetup,
112                         "Must set protocol version before setting up connections");
113     if (mProtocolVersion && version > *mProtocolVersion) {
114         ALOGE("Cannot upgrade explicitly capped protocol version %u to newer version %u",
115               *mProtocolVersion, version);
116         return false;
117     }
118 
119     mProtocolVersion = version;
120     return true;
121 }
122 
setProtocolVersion(uint32_t version)123 bool RpcSession::setProtocolVersion(uint32_t version) {
124     return setProtocolVersionInternal(version, true);
125 }
126 
getProtocolVersion()127 std::optional<uint32_t> RpcSession::getProtocolVersion() {
128     RpcMutexLockGuard _l(mMutex);
129     return mProtocolVersion;
130 }
131 
setFileDescriptorTransportMode(FileDescriptorTransportMode mode)132 void RpcSession::setFileDescriptorTransportMode(FileDescriptorTransportMode mode) {
133     RpcMutexLockGuard _l(mMutex);
134     LOG_ALWAYS_FATAL_IF(mStartedSetup,
135                         "Must set file descriptor transport mode before setting up connections");
136     mFileDescriptorTransportMode = mode;
137 }
138 
getFileDescriptorTransportMode()139 RpcSession::FileDescriptorTransportMode RpcSession::getFileDescriptorTransportMode() {
140     return mFileDescriptorTransportMode;
141 }
142 
setupUnixDomainClient(const char * path)143 status_t RpcSession::setupUnixDomainClient(const char* path) {
144     return setupSocketClient(UnixSocketAddress(path));
145 }
146 
setupUnixDomainSocketBootstrapClient(unique_fd bootstrapFd)147 status_t RpcSession::setupUnixDomainSocketBootstrapClient(unique_fd bootstrapFd) {
148     mBootstrapTransport =
149             mCtx->newTransport(RpcTransportFd(std::move(bootstrapFd)), mShutdownTrigger.get());
150     return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
151         int socks[2];
152         if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0, socks) < 0) {
153             int savedErrno = errno;
154             ALOGE("Failed socketpair: %s", strerror(savedErrno));
155             return -savedErrno;
156         }
157         unique_fd clientFd(socks[0]), serverFd(socks[1]);
158 
159         int zero = 0;
160         iovec iov{&zero, sizeof(zero)};
161         std::vector<std::variant<unique_fd, borrowed_fd>> fds;
162         fds.push_back(std::move(serverFd));
163 
164         status_t status = mBootstrapTransport->interruptableWriteFully(mShutdownTrigger.get(), &iov,
165                                                                        1, std::nullopt, &fds);
166         if (status != OK) {
167             ALOGE("Failed to send fd over bootstrap transport: %s", statusToString(status).c_str());
168             return status;
169         }
170 
171         return initAndAddConnection(RpcTransportFd(std::move(clientFd)), sessionId, incoming);
172     });
173 }
174 
setupVsockClient(unsigned int cid,unsigned int port)175 status_t RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
176     return setupSocketClient(VsockSocketAddress(cid, port));
177 }
178 
setupInetClient(const char * addr,unsigned int port)179 status_t RpcSession::setupInetClient(const char* addr, unsigned int port) {
180     auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
181     if (aiStart == nullptr) return UNKNOWN_ERROR;
182     for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
183         InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
184         if (status_t status = setupSocketClient(socketAddress); status == OK) return OK;
185     }
186     ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
187     return NAME_NOT_FOUND;
188 }
189 
setupPreconnectedClient(unique_fd fd,std::function<unique_fd ()> && request)190 status_t RpcSession::setupPreconnectedClient(unique_fd fd, std::function<unique_fd()>&& request) {
191     return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) -> status_t {
192         if (!fd.ok()) {
193             fd = request();
194             if (!fd.ok()) return BAD_VALUE;
195         }
196         if (status_t res = binder::os::setNonBlocking(fd); res != OK) return res;
197 
198         RpcTransportFd transportFd(std::move(fd));
199         status_t status = initAndAddConnection(std::move(transportFd), sessionId, incoming);
200         fd = unique_fd(); // Explicitly reset after move to avoid analyzer warning.
201         return status;
202     });
203 }
204 
addNullDebuggingClient()205 status_t RpcSession::addNullDebuggingClient() {
206     // Note: only works on raw sockets.
207     if (auto status = initShutdownTrigger(); status != OK) return status;
208 
209     unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
210 
211     if (!serverFd.ok()) {
212         int savedErrno = errno;
213         ALOGE("Could not connect to /dev/null: %s", strerror(savedErrno));
214         return -savedErrno;
215     }
216 
217     RpcTransportFd transportFd(std::move(serverFd));
218     auto server = mCtx->newTransport(std::move(transportFd), mShutdownTrigger.get());
219     if (server == nullptr) {
220         ALOGE("Unable to set up RpcTransport");
221         return UNKNOWN_ERROR;
222     }
223     return addOutgoingConnection(std::move(server), false);
224 }
225 
getRootObject()226 sp<IBinder> RpcSession::getRootObject() {
227     ExclusiveConnection connection;
228     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
229                                                 ConnectionUse::CLIENT, &connection);
230     if (status != OK) return nullptr;
231     return state()->getRootObject(connection.get(), sp<RpcSession>::fromExisting(this));
232 }
233 
getRemoteMaxThreads(size_t * maxThreads)234 status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
235     ExclusiveConnection connection;
236     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
237                                                 ConnectionUse::CLIENT, &connection);
238     if (status != OK) return status;
239     return state()->getMaxThreads(connection.get(), sp<RpcSession>::fromExisting(this), maxThreads);
240 }
241 
shutdownAndWait(bool wait)242 bool RpcSession::shutdownAndWait(bool wait) {
243     RpcMutexUniqueLock _l(mMutex);
244     LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr, "Shutdown trigger not installed");
245 
246     mShutdownTrigger->trigger();
247 
248     if (wait) {
249         LOG_ALWAYS_FATAL_IF(mShutdownListener == nullptr, "Shutdown listener not installed");
250         mShutdownListener->waitForShutdown(_l, sp<RpcSession>::fromExisting(this));
251 
252         LOG_ALWAYS_FATAL_IF(!mConnections.mThreads.empty(), "Shutdown failed");
253     }
254 
255     _l.unlock();
256 
257     if (status_t res = state()->sendObituaries(sp<RpcSession>::fromExisting(this)); res != OK) {
258         ALOGE("Failed to send obituaries as the RpcSession is shutting down: %s",
259               statusToString(res).c_str());
260     }
261 
262     mRpcBinderState->clear();
263 
264     return true;
265 }
266 
transact(const sp<IBinder> & binder,uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)267 status_t RpcSession::transact(const sp<IBinder>& binder, uint32_t code, const Parcel& data,
268                               Parcel* reply, uint32_t flags) {
269     ExclusiveConnection connection;
270     status_t status =
271             ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
272                                       (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
273                                                                      : ConnectionUse::CLIENT,
274                                       &connection);
275     if (status != OK) return status;
276     return state()->transact(connection.get(), binder, code, data,
277                              sp<RpcSession>::fromExisting(this), reply, flags);
278 }
279 
sendDecStrong(const BpBinder * binder)280 status_t RpcSession::sendDecStrong(const BpBinder* binder) {
281     // target is 0 because this is used to free BpBinder objects
282     return sendDecStrongToTarget(binder->getPrivateAccessor().rpcAddress(), 0 /*target*/);
283 }
284 
sendDecStrongToTarget(uint64_t address,size_t target)285 status_t RpcSession::sendDecStrongToTarget(uint64_t address, size_t target) {
286     ExclusiveConnection connection;
287     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
288                                                 ConnectionUse::CLIENT_REFCOUNT, &connection);
289     if (status != OK) return status;
290     return state()->sendDecStrongToTarget(connection.get(), sp<RpcSession>::fromExisting(this),
291                                           address, target);
292 }
293 
readId()294 status_t RpcSession::readId() {
295     {
296         RpcMutexLockGuard _l(mMutex);
297         LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
298     }
299 
300     ExclusiveConnection connection;
301     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
302                                                 ConnectionUse::CLIENT, &connection);
303     if (status != OK) return status;
304 
305     status = state()->getSessionId(connection.get(), sp<RpcSession>::fromExisting(this), &mId);
306     if (status != OK) return status;
307 
308     LOG_RPC_DETAIL("RpcSession %p has id %s", this, HexString(mId.data(), mId.size()).c_str());
309     return OK;
310 }
311 
onSessionAllIncomingThreadsEnded(const sp<RpcSession> & session)312 void RpcSession::WaitForShutdownListener::onSessionAllIncomingThreadsEnded(
313         const sp<RpcSession>& session) {
314     (void)session;
315 }
316 
onSessionIncomingThreadEnded()317 void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() {
318     mShutdownCount += 1;
319     mCv.notify_all();
320 }
321 
waitForShutdown(RpcMutexUniqueLock & lock,const sp<RpcSession> & session)322 void RpcSession::WaitForShutdownListener::waitForShutdown(RpcMutexUniqueLock& lock,
323                                                           const sp<RpcSession>& session) {
324     while (mShutdownCount < session->mConnections.mMaxIncoming) {
325         if (std::cv_status::timeout == mCv.wait_for(lock, std::chrono::seconds(1))) {
326             ALOGE("Waiting for RpcSession to shut down (1s w/o progress): %zu incoming connections "
327                   "still %zu/%zu fully shutdown.",
328                   session->mConnections.mIncoming.size(), mShutdownCount.load(),
329                   session->mConnections.mMaxIncoming);
330         }
331     }
332 }
333 
preJoinThreadOwnership(RpcMaybeThread thread)334 void RpcSession::preJoinThreadOwnership(RpcMaybeThread thread) {
335     LOG_ALWAYS_FATAL_IF(thread.get_id() != rpc_this_thread::get_id(), "Must own this thread");
336 
337     {
338         RpcMutexLockGuard _l(mMutex);
339         mConnections.mThreads[thread.get_id()] = std::move(thread);
340     }
341 }
342 
preJoinSetup(std::unique_ptr<RpcTransport> rpcTransport)343 RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(
344         std::unique_ptr<RpcTransport> rpcTransport) {
345     // must be registered to allow arbitrary client code executing commands to
346     // be able to do nested calls (we can't only read from it)
347     sp<RpcConnection> connection = assignIncomingConnectionToThisThread(std::move(rpcTransport));
348 
349     status_t status;
350 
351     if (connection == nullptr) {
352         status = DEAD_OBJECT;
353     } else {
354         status =
355                 mRpcBinderState->readConnectionInit(connection, sp<RpcSession>::fromExisting(this));
356     }
357 
358     return PreJoinSetupResult{
359             .connection = std::move(connection),
360             .status = status,
361     };
362 }
363 
364 namespace {
365 #if !defined(__ANDROID__) || defined(__ANDROID_RECOVERY__)
366 class JavaThreadAttacher {};
367 #else
368 // RAII object for attaching / detaching current thread to JVM if Android Runtime exists. If
369 // Android Runtime doesn't exist, no-op.
370 class JavaThreadAttacher {
371 public:
372     JavaThreadAttacher() {
373         // Use dlsym to find androidJavaAttachThread because libandroid_runtime is loaded after
374         // libbinder.
375         auto vm = getJavaVM();
376         if (vm == nullptr) return;
377 
378         char threadName[16];
379         if (0 != pthread_getname_np(pthread_self(), threadName, sizeof(threadName))) {
380             constexpr const char* defaultThreadName = "UnknownRpcSessionThread";
381             memcpy(threadName, defaultThreadName,
382                    std::min<size_t>(sizeof(threadName), strlen(defaultThreadName) + 1));
383         }
384         LOG_RPC_DETAIL("Attaching current thread %s to JVM", threadName);
385         JavaVMAttachArgs args;
386         args.version = JNI_VERSION_1_2;
387         args.name = threadName;
388         args.group = nullptr;
389         JNIEnv* env;
390 
391         LOG_ALWAYS_FATAL_IF(vm->AttachCurrentThread(&env, &args) != JNI_OK,
392                             "Cannot attach thread %s to JVM", threadName);
393         mAttached = true;
394     }
395     ~JavaThreadAttacher() {
396         if (!mAttached) return;
397         auto vm = getJavaVM();
398         LOG_ALWAYS_FATAL_IF(vm == nullptr,
399                             "Unable to detach thread. No JavaVM, but it was present before!");
400 
401         LOG_RPC_DETAIL("Detaching current thread from JVM");
402         int ret = vm->DetachCurrentThread();
403         if (ret == JNI_OK) {
404             mAttached = false;
405         } else {
406             ALOGW("Unable to detach current thread from JVM (%d)", ret);
407         }
408     }
409 
410 private:
411     JavaThreadAttacher(const JavaThreadAttacher&) = delete;
412     void operator=(const JavaThreadAttacher&) = delete;
413 
414     bool mAttached = false;
415 
416     static JavaVM* getJavaVM() {
417         static auto fn = reinterpret_cast<decltype(&AndroidRuntimeGetJavaVM)>(
418                 dlsym(RTLD_DEFAULT, "AndroidRuntimeGetJavaVM"));
419         if (fn == nullptr) return nullptr;
420         return fn();
421     }
422 };
423 #endif
424 } // namespace
425 
join(sp<RpcSession> && session,PreJoinSetupResult && setupResult)426 void RpcSession::join(sp<RpcSession>&& session, PreJoinSetupResult&& setupResult) {
427     sp<RpcConnection>& connection = setupResult.connection;
428 
429     if (setupResult.status == OK) {
430         LOG_ALWAYS_FATAL_IF(!connection, "must have connection if setup succeeded");
431         [[maybe_unused]] JavaThreadAttacher javaThreadAttacher;
432         while (true) {
433             status_t status = session->state()->getAndExecuteCommand(connection, session,
434                                                                      RpcState::CommandType::ANY);
435             if (status != OK) {
436                 LOG_RPC_DETAIL("Binder connection thread closing w/ status %s",
437                                statusToString(status).c_str());
438                 break;
439             }
440         }
441     } else {
442         ALOGE("Connection failed to init, closing with status %s",
443               statusToString(setupResult.status).c_str());
444     }
445 
446     sp<RpcSession::EventListener> listener;
447     {
448         RpcMutexLockGuard _l(session->mMutex);
449         auto it = session->mConnections.mThreads.find(rpc_this_thread::get_id());
450         LOG_ALWAYS_FATAL_IF(it == session->mConnections.mThreads.end());
451         it->second.detach();
452         session->mConnections.mThreads.erase(it);
453 
454         listener = session->mEventListener.promote();
455     }
456 
457     // done after all cleanup, since session shutdown progresses via callbacks here
458     if (connection != nullptr) {
459         LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection),
460                             "bad state: connection object guaranteed to be in list");
461     }
462 
463     session = nullptr;
464 
465     if (listener != nullptr) {
466         listener->onSessionIncomingThreadEnded();
467     }
468 }
469 
server()470 sp<RpcServer> RpcSession::server() {
471     RpcServer* unsafeServer = mForServer.unsafe_get();
472     sp<RpcServer> server = mForServer.promote();
473 
474     LOG_ALWAYS_FATAL_IF((unsafeServer == nullptr) != (server == nullptr),
475                         "wp<> is to avoid strong cycle only");
476     return server;
477 }
478 
setupClient(const std::function<status_t (const std::vector<uint8_t> & sessionId,bool incoming)> & connectAndInit)479 status_t RpcSession::setupClient(const std::function<status_t(const std::vector<uint8_t>& sessionId,
480                                                               bool incoming)>& connectAndInit) {
481     {
482         RpcMutexLockGuard _l(mMutex);
483         LOG_ALWAYS_FATAL_IF(mStartedSetup, "Must only setup session once");
484         mStartedSetup = true;
485 
486         if constexpr (!kEnableRpcThreads) {
487             LOG_ALWAYS_FATAL_IF(mMaxIncomingThreads > 0,
488                                 "Incoming threads are not supported on single-threaded libbinder");
489             // mMaxIncomingThreads should not change from here to its use below,
490             // since we set mStartedSetup==true and setMaxIncomingThreads checks
491             // for that
492         }
493     }
494 
495     if (auto status = initShutdownTrigger(); status != OK) return status;
496 
497     auto oldProtocolVersion = mProtocolVersion;
498     auto cleanup = make_scope_guard([&] {
499         // if any threads are started, shut them down
500         (void)shutdownAndWait(true);
501 
502         mShutdownListener = nullptr;
503         mEventListener.clear();
504 
505         mId.clear();
506 
507         mShutdownTrigger = nullptr;
508         mRpcBinderState = std::make_unique<RpcState>();
509 
510         // protocol version may have been downgraded - if we reuse this object
511         // to connect to another server, force that server to request a
512         // downgrade again
513         mProtocolVersion = oldProtocolVersion;
514 
515         mConnections = {};
516 
517         // clear mStartedSetup so that we can reuse this RpcSession
518         mStartedSetup = false;
519     });
520 
521     if (status_t status = connectAndInit({}, false /*incoming*/); status != OK) return status;
522 
523     {
524         ExclusiveConnection connection;
525         if (status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
526                                                         ConnectionUse::CLIENT, &connection);
527             status != OK)
528             return status;
529 
530         uint32_t version;
531         if (status_t status =
532                     state()->readNewSessionResponse(connection.get(),
533                                                     sp<RpcSession>::fromExisting(this), &version);
534             status != OK)
535             return status;
536         if (!setProtocolVersionInternal(version, false)) return BAD_VALUE;
537     }
538 
539     // TODO(b/189955605): we should add additional sessions dynamically
540     // instead of all at once.
541     size_t numThreadsAvailable;
542     if (status_t status = getRemoteMaxThreads(&numThreadsAvailable); status != OK) {
543         ALOGE("Could not get max threads after initial session setup: %s",
544               statusToString(status).c_str());
545         return status;
546     }
547 
548     if (status_t status = readId(); status != OK) {
549         ALOGE("Could not get session id after initial session setup: %s",
550               statusToString(status).c_str());
551         return status;
552     }
553 
554     size_t outgoingConnections = std::min(numThreadsAvailable, mMaxOutgoingConnections);
555     ALOGI_IF(outgoingConnections != numThreadsAvailable,
556              "Server hints client to start %zu outgoing threads, but client will only start %zu "
557              "because it is preconfigured to start at most %zu outgoing threads.",
558              numThreadsAvailable, outgoingConnections, mMaxOutgoingConnections);
559 
560     // TODO(b/189955605): we should add additional sessions dynamically
561     // instead of all at once - the other side should be responsible for setting
562     // up additional connections. We need to create at least one (unless 0 are
563     // requested to be set) in order to allow the other side to reliably make
564     // any requests at all.
565 
566     // we've already setup one client
567     LOG_RPC_DETAIL("RpcSession::setupClient() instantiating %zu outgoing connections (server max: "
568                    "%zu) and %zu incoming threads",
569                    outgoingConnections, numThreadsAvailable, mMaxIncomingThreads);
570     for (size_t i = 0; i + 1 < outgoingConnections; i++) {
571         if (status_t status = connectAndInit(mId, false /*incoming*/); status != OK) return status;
572     }
573 
574     for (size_t i = 0; i < mMaxIncomingThreads; i++) {
575         if (status_t status = connectAndInit(mId, true /*incoming*/); status != OK) return status;
576     }
577 
578     cleanup.release();
579 
580     return OK;
581 }
582 
setupSocketClient(const RpcSocketAddress & addr)583 status_t RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
584     return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
585         return setupOneSocketConnection(addr, sessionId, incoming);
586     });
587 }
588 
setupOneSocketConnection(const RpcSocketAddress & addr,const std::vector<uint8_t> & sessionId,bool incoming)589 status_t RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr,
590                                               const std::vector<uint8_t>& sessionId,
591                                               bool incoming) {
592     RpcTransportFd transportFd;
593     status_t status = singleSocketConnection(addr, mShutdownTrigger, &transportFd);
594     if (status != OK) return status;
595 
596     return initAndAddConnection(std::move(transportFd), sessionId, incoming);
597 }
598 
singleSocketConnection(const RpcSocketAddress & addr,const std::unique_ptr<FdTrigger> & shutdownTrigger,RpcTransportFd * outFd)599 status_t singleSocketConnection(const RpcSocketAddress& addr,
600                                 const std::unique_ptr<FdTrigger>& shutdownTrigger,
601                                 RpcTransportFd* outFd) {
602     LOG_ALWAYS_FATAL_IF(outFd == nullptr,
603                         "There is no reason to call this function without an outFd");
604     LOG_ALWAYS_FATAL_IF(shutdownTrigger == nullptr,
605                         "FdTrigger argument is required so we don't get stuck in the connect call "
606                         "if the server process shuts down.");
607     for (size_t tries = 0; tries < 5; tries++) {
608         if (tries > 0) usleep(10000);
609 
610         unique_fd serverFd(TEMP_FAILURE_RETRY(
611                 socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)));
612         if (!serverFd.ok()) {
613             int savedErrno = errno;
614             ALOGE("Could not create socket at %s: %s", addr.toString().c_str(),
615                   strerror(savedErrno));
616             return -savedErrno;
617         }
618 
619         if (addr.addr()->sa_family == AF_INET || addr.addr()->sa_family == AF_INET6) {
620             int noDelay = 1;
621             int result =
622                     setsockopt(serverFd.get(), IPPROTO_TCP, TCP_NODELAY, &noDelay, sizeof(noDelay));
623             if (result < 0) {
624                 int savedErrno = errno;
625                 ALOGE("Could not set TCP_NODELAY on %s: %s", addr.toString().c_str(),
626                       strerror(savedErrno));
627                 return -savedErrno;
628             }
629         }
630 
631         RpcTransportFd transportFd(std::move(serverFd));
632 
633         if (0 != TEMP_FAILURE_RETRY(connect(transportFd.fd.get(), addr.addr(), addr.addrSize()))) {
634             int connErrno = errno;
635             if (connErrno == EAGAIN || connErrno == EINPROGRESS) {
636                 // For non-blocking sockets, connect() may return EAGAIN (for unix domain socket) or
637                 // EINPROGRESS (for others). Call poll() and getsockopt() to get the error.
638                 status_t pollStatus = shutdownTrigger->triggerablePoll(transportFd, POLLOUT);
639                 if (pollStatus != OK) {
640                     ALOGE("Could not POLLOUT after connect() on non-blocking socket: %s",
641                           statusToString(pollStatus).c_str());
642                     return pollStatus;
643                 }
644                 // Set connErrno to the errno that connect() would have set if the fd were blocking.
645                 socklen_t connErrnoLen = sizeof(connErrno);
646                 int ret = getsockopt(transportFd.fd.get(), SOL_SOCKET, SO_ERROR, &connErrno,
647                                      &connErrnoLen);
648                 if (ret == -1) {
649                     int savedErrno = errno;
650                     ALOGE("Could not getsockopt() after connect() on non-blocking socket: %s. "
651                           "(Original error from connect() is: %s)",
652                           strerror(savedErrno), strerror(connErrno));
653                     return -savedErrno;
654                 }
655                 // Retrieved the real connErrno as if connect() was called with a blocking socket
656                 // fd. Continue checking connErrno.
657             }
658             if (connErrno == ECONNRESET) {
659                 ALOGW("Connection reset on %s", addr.toString().c_str());
660                 continue;
661             }
662             // connErrno could be zero if getsockopt determines so. Hence zero-check again.
663             if (connErrno != 0) {
664                 ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(),
665                       strerror(connErrno));
666                 return -connErrno;
667             }
668         }
669         LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(),
670                        transportFd.fd.get());
671 
672         *outFd = std::move(transportFd);
673         return OK;
674     }
675 
676     ALOGE("Ran out of retries to connect to %s", addr.toString().c_str());
677     return UNKNOWN_ERROR;
678 }
679 
initAndAddConnection(RpcTransportFd fd,const std::vector<uint8_t> & sessionId,bool incoming)680 status_t RpcSession::initAndAddConnection(RpcTransportFd fd, const std::vector<uint8_t>& sessionId,
681                                           bool incoming) {
682     LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr);
683     auto server = mCtx->newTransport(std::move(fd), mShutdownTrigger.get());
684     if (server == nullptr) {
685         ALOGE("%s: Unable to set up RpcTransport", __PRETTY_FUNCTION__);
686         return UNKNOWN_ERROR;
687     }
688 
689     LOG_RPC_DETAIL("Socket at client with RpcTransport %p", server.get());
690 
691     if (sessionId.size() > std::numeric_limits<uint16_t>::max()) {
692         ALOGE("Session ID too big %zu", sessionId.size());
693         return BAD_VALUE;
694     }
695 
696     RpcConnectionHeader header{
697             .version = mProtocolVersion.value_or(RPC_WIRE_PROTOCOL_VERSION),
698             .options = 0,
699             .fileDescriptorTransportMode = static_cast<uint8_t>(mFileDescriptorTransportMode),
700             .sessionIdSize = static_cast<uint16_t>(sessionId.size()),
701     };
702 
703     if (incoming) {
704         header.options |= RPC_CONNECTION_OPTION_INCOMING;
705     }
706 
707     iovec headerIov{&header, sizeof(header)};
708     auto sendHeaderStatus = server->interruptableWriteFully(mShutdownTrigger.get(), &headerIov, 1,
709                                                             std::nullopt, nullptr);
710     if (sendHeaderStatus != OK) {
711         ALOGE("Could not write connection header to socket: %s",
712               statusToString(sendHeaderStatus).c_str());
713         return sendHeaderStatus;
714     }
715 
716     if (sessionId.size() > 0) {
717         iovec sessionIov{const_cast<void*>(static_cast<const void*>(sessionId.data())),
718                          sessionId.size()};
719         auto sendSessionIdStatus =
720                 server->interruptableWriteFully(mShutdownTrigger.get(), &sessionIov, 1,
721                                                 std::nullopt, nullptr);
722         if (sendSessionIdStatus != OK) {
723             ALOGE("Could not write session ID ('%s') to socket: %s",
724                   HexString(sessionId.data(), sessionId.size()).c_str(),
725                   statusToString(sendSessionIdStatus).c_str());
726             return sendSessionIdStatus;
727         }
728     }
729 
730     LOG_RPC_DETAIL("Socket at client: header sent");
731 
732     if (incoming) {
733         return addIncomingConnection(std::move(server));
734     } else {
735         return addOutgoingConnection(std::move(server), true /*init*/);
736     }
737 }
738 
addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport)739 status_t RpcSession::addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport) {
740     RpcMutex mutex;
741     RpcConditionVariable joinCv;
742     RpcMutexUniqueLock lock(mutex);
743     RpcMaybeThread thread;
744     sp<RpcSession> thiz = sp<RpcSession>::fromExisting(this);
745     bool ownershipTransferred = false;
746     thread = RpcMaybeThread([&]() {
747         RpcMutexUniqueLock threadLock(mutex);
748         std::unique_ptr<RpcTransport> movedRpcTransport = std::move(rpcTransport);
749         // NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
750         sp<RpcSession> session = thiz;
751         session->preJoinThreadOwnership(std::move(thread));
752 
753         // only continue once we have a response or the connection fails
754         auto setupResult = session->preJoinSetup(std::move(movedRpcTransport));
755 
756         ownershipTransferred = true;
757         threadLock.unlock();
758         joinCv.notify_one();
759         // do not use & vars below
760 
761         RpcSession::join(std::move(session), std::move(setupResult));
762     });
763     rpcJoinIfSingleThreaded(thread);
764     joinCv.wait(lock, [&] { return ownershipTransferred; });
765     LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
766     return OK;
767 }
768 
initShutdownTrigger()769 status_t RpcSession::initShutdownTrigger() {
770     // first client connection added, but setForServer not called, so
771     // initializaing for a client.
772     if (mShutdownTrigger == nullptr) {
773         mShutdownTrigger = FdTrigger::make();
774         mEventListener = mShutdownListener = sp<WaitForShutdownListener>::make();
775         if (mShutdownTrigger == nullptr) return INVALID_OPERATION;
776     }
777     return OK;
778 }
779 
addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport,bool init)780 status_t RpcSession::addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport, bool init) {
781     sp<RpcConnection> connection = sp<RpcConnection>::make();
782     {
783         RpcMutexLockGuard _l(mMutex);
784         connection->rpcTransport = std::move(rpcTransport);
785         connection->exclusiveTid = binder::os::GetThreadId();
786         mConnections.mOutgoing.push_back(connection);
787     }
788 
789     status_t status = OK;
790     if (init) {
791         status =
792                 mRpcBinderState->sendConnectionInit(connection, sp<RpcSession>::fromExisting(this));
793     }
794 
795     clearConnectionTid(connection);
796 
797     return status;
798 }
799 
setForServer(const wp<RpcServer> & server,const wp<EventListener> & eventListener,const std::vector<uint8_t> & sessionId,const sp<IBinder> & sessionSpecificRoot)800 bool RpcSession::setForServer(const wp<RpcServer>& server, const wp<EventListener>& eventListener,
801                               const std::vector<uint8_t>& sessionId,
802                               const sp<IBinder>& sessionSpecificRoot) {
803     LOG_ALWAYS_FATAL_IF(mForServer != nullptr);
804     LOG_ALWAYS_FATAL_IF(server == nullptr);
805     LOG_ALWAYS_FATAL_IF(mEventListener != nullptr);
806     LOG_ALWAYS_FATAL_IF(eventListener == nullptr);
807     LOG_ALWAYS_FATAL_IF(mShutdownTrigger != nullptr);
808     LOG_ALWAYS_FATAL_IF(mCtx != nullptr);
809 
810     mShutdownTrigger = FdTrigger::make();
811     if (mShutdownTrigger == nullptr) return false;
812 
813     mId = sessionId;
814     mForServer = server;
815     mEventListener = eventListener;
816     mSessionSpecificRootObject = sessionSpecificRoot;
817     return true;
818 }
819 
setSessionSpecificRoot(const sp<IBinder> & sessionSpecificRoot)820 void RpcSession::setSessionSpecificRoot(const sp<IBinder>& sessionSpecificRoot) {
821     LOG_ALWAYS_FATAL_IF(mSessionSpecificRootObject != nullptr,
822                         "Session specific root object already set");
823     LOG_ALWAYS_FATAL_IF(mForServer != nullptr,
824                         "Session specific root object cannot be set for a server");
825     mSessionSpecificRootObject = sessionSpecificRoot;
826 }
827 
assignIncomingConnectionToThisThread(std::unique_ptr<RpcTransport> rpcTransport)828 sp<RpcSession::RpcConnection> RpcSession::assignIncomingConnectionToThisThread(
829         std::unique_ptr<RpcTransport> rpcTransport) {
830     RpcMutexLockGuard _l(mMutex);
831 
832     if (mConnections.mIncoming.size() >= mMaxIncomingThreads) {
833         ALOGE("Cannot add thread to session with %zu threads (max is set to %zu)",
834               mConnections.mIncoming.size(), mMaxIncomingThreads);
835         return nullptr;
836     }
837 
838     // Don't accept any more connections, some have shutdown. Usually this
839     // happens when new connections are still being established as part of a
840     // very short-lived session which shuts down after it already started
841     // accepting new connections.
842     if (mConnections.mIncoming.size() < mConnections.mMaxIncoming) {
843         return nullptr;
844     }
845 
846     sp<RpcConnection> session = sp<RpcConnection>::make();
847     session->rpcTransport = std::move(rpcTransport);
848     session->exclusiveTid = binder::os::GetThreadId();
849 
850     mConnections.mIncoming.push_back(session);
851     mConnections.mMaxIncoming = mConnections.mIncoming.size();
852 
853     return session;
854 }
855 
removeIncomingConnection(const sp<RpcConnection> & connection)856 bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
857     RpcMutexUniqueLock _l(mMutex);
858     if (auto it =
859                 std::find(mConnections.mIncoming.begin(), mConnections.mIncoming.end(), connection);
860         it != mConnections.mIncoming.end()) {
861         mConnections.mIncoming.erase(it);
862         if (mConnections.mIncoming.size() == 0) {
863             sp<EventListener> listener = mEventListener.promote();
864             if (listener) {
865                 _l.unlock();
866                 listener->onSessionAllIncomingThreadsEnded(sp<RpcSession>::fromExisting(this));
867             }
868         }
869         return true;
870     }
871     return false;
872 }
873 
clearConnectionTid(const sp<RpcConnection> & connection)874 void RpcSession::clearConnectionTid(const sp<RpcConnection>& connection) {
875     RpcMutexUniqueLock _l(mMutex);
876     connection->exclusiveTid = std::nullopt;
877     if (mConnections.mWaitingThreads > 0) {
878         _l.unlock();
879         mAvailableConnectionCv.notify_one();
880     }
881 }
882 
getCertificate(RpcCertificateFormat format)883 std::vector<uint8_t> RpcSession::getCertificate(RpcCertificateFormat format) {
884     return mCtx->getCertificate(format);
885 }
886 
find(const sp<RpcSession> & session,ConnectionUse use,ExclusiveConnection * connection)887 status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, ConnectionUse use,
888                                                ExclusiveConnection* connection) {
889     connection->mSession = session;
890     connection->mConnection = nullptr;
891     connection->mReentrant = false;
892 
893     uint64_t tid = binder::os::GetThreadId();
894     RpcMutexUniqueLock _l(session->mMutex);
895 
896     session->mConnections.mWaitingThreads++;
897     while (true) {
898         sp<RpcConnection> exclusive;
899         sp<RpcConnection> available;
900 
901         // CHECK FOR DEDICATED CLIENT SOCKET
902         //
903         // A server/looper should always use a dedicated connection if available
904         findConnection(tid, &exclusive, &available, session->mConnections.mOutgoing,
905                        session->mConnections.mOutgoingOffset);
906 
907         // WARNING: this assumes a server cannot request its client to send
908         // a transaction, as mIncoming is excluded below.
909         //
910         // Imagine we have more than one thread in play, and a single thread
911         // sends a synchronous, then an asynchronous command. Imagine the
912         // asynchronous command is sent on the first client connection. Then, if
913         // we naively send a synchronous command to that same connection, the
914         // thread on the far side might be busy processing the asynchronous
915         // command. So, we move to considering the second available thread
916         // for subsequent calls.
917         if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
918             session->mConnections.mOutgoingOffset = (session->mConnections.mOutgoingOffset + 1) %
919                     session->mConnections.mOutgoing.size();
920         }
921 
922         // USE SERVING SOCKET (e.g. nested transaction)
923         if (use != ConnectionUse::CLIENT_ASYNC) {
924             sp<RpcConnection> exclusiveIncoming;
925             // server connections are always assigned to a thread
926             findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
927                            session->mConnections.mIncoming, 0 /* index hint */);
928 
929             // asynchronous calls cannot be nested, we currently allow ref count
930             // calls to be nested (so that you can use this without having extra
931             // threads). Note 'drainCommands' is used so that these ref counts can't
932             // build up.
933             if (exclusiveIncoming != nullptr) {
934                 if (exclusiveIncoming->allowNested) {
935                     // guaranteed to be processed as nested command
936                     exclusive = exclusiveIncoming;
937                 } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) {
938                     // prefer available socket, but if we don't have one, don't
939                     // wait for one
940                     exclusive = exclusiveIncoming;
941                 }
942             }
943         }
944 
945         // if our thread is already using a connection, prioritize using that
946         if (exclusive != nullptr) {
947             connection->mConnection = exclusive;
948             connection->mReentrant = true;
949             break;
950         } else if (available != nullptr) {
951             connection->mConnection = available;
952             connection->mConnection->exclusiveTid = tid;
953             break;
954         }
955 
956         if (session->mConnections.mOutgoing.size() == 0) {
957             ALOGE("Session has no outgoing connections. This is required for an RPC server to make "
958                   "any non-nested (e.g. oneway or on another thread) calls. Use code request "
959                   "reason: %d. Incoming connections: %zu. %s.",
960                   static_cast<int>(use), session->mConnections.mIncoming.size(),
961                   (session->server()
962                            ? "This is a server session, so see RpcSession::setMaxIncomingThreads "
963                              "for the corresponding client"
964                            : "This is a client session, so see "
965                              "RpcSession::setMaxOutgoingConnections "
966                              "for this client or RpcServer::setMaxThreads for the corresponding "
967                              "server"));
968             return WOULD_BLOCK;
969         }
970 
971         LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
972                        session->mConnections.mOutgoing.size(),
973                        session->mConnections.mIncoming.size());
974         session->mAvailableConnectionCv.wait(_l);
975     }
976     session->mConnections.mWaitingThreads--;
977 
978     return OK;
979 }
980 
findConnection(uint64_t tid,sp<RpcConnection> * exclusive,sp<RpcConnection> * available,std::vector<sp<RpcConnection>> & sockets,size_t socketsIndexHint)981 void RpcSession::ExclusiveConnection::findConnection(uint64_t tid, sp<RpcConnection>* exclusive,
982                                                      sp<RpcConnection>* available,
983                                                      std::vector<sp<RpcConnection>>& sockets,
984                                                      size_t socketsIndexHint) {
985     LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
986                         "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
987 
988     if (*exclusive != nullptr) return; // consistent with break below
989 
990     for (size_t i = 0; i < sockets.size(); i++) {
991         sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
992 
993         // take first available connection (intuition = caching)
994         if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
995             *available = socket;
996             continue;
997         }
998 
999         // though, prefer to take connection which is already inuse by this thread
1000         // (nested transactions)
1001         if (exclusive && socket->exclusiveTid == tid) {
1002             *exclusive = socket;
1003             break; // consistent with return above
1004         }
1005     }
1006 }
1007 
~ExclusiveConnection()1008 RpcSession::ExclusiveConnection::~ExclusiveConnection() {
1009     // reentrant use of a connection means something less deep in the call stack
1010     // is using this fd, and it retains the right to it. So, we don't give up
1011     // exclusive ownership, and no thread is freed.
1012     if (!mReentrant && mConnection != nullptr) {
1013         mSession->clearConnectionTid(mConnection);
1014     }
1015 }
1016 
hasActiveConnection(const std::vector<sp<RpcConnection>> & connections)1017 bool RpcSession::hasActiveConnection(const std::vector<sp<RpcConnection>>& connections) {
1018     for (const auto& connection : connections) {
1019         if (connection->exclusiveTid != std::nullopt && !connection->rpcTransport->isWaiting()) {
1020             return true;
1021         }
1022     }
1023     return false;
1024 }
1025 
hasActiveRequests()1026 bool RpcSession::hasActiveRequests() {
1027     RpcMutexUniqueLock _l(mMutex);
1028     if (hasActiveConnection(mConnections.mIncoming)) {
1029         return true;
1030     }
1031     if (hasActiveConnection(mConnections.mOutgoing)) {
1032         return true;
1033     }
1034     return mConnections.mWaitingThreads != 0;
1035 }
1036 
1037 } // namespace android
1038