1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <endian.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <pthread.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/resource.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33 #include <algorithm>
34
35 #include <binder/Binder.h>
36 #include <binder/BpBinder.h>
37 #include <binder/Functional.h>
38 #include <binder/IPCThreadState.h>
39 #include <binder/Parcel.h>
40 #include <binder/ProcessState.h>
41 #include <binder/Stability.h>
42 #include <binder/Status.h>
43 #include <binder/TextOutput.h>
44
45 #ifndef BINDER_DISABLE_BLOB
46 #include <cutils/ashmem.h>
47 #endif
48 #include <utils/String16.h>
49 #include <utils/String8.h>
50
51 #include "OS.h"
52 #include "RpcState.h"
53 #include "Static.h"
54 #include "Utils.h"
55
56 // A lot of code in this file uses definitions from the
57 // Linux kernel header for Binder <linux/android/binder.h>
58 // which is included indirectly via "binder_module.h".
59 // Non-Linux OSes do not have that header, so libbinder should be
60 // built for those targets without kernel binder support, i.e.,
61 // without BINDER_WITH_KERNEL_IPC. For this reason, all code in this
62 // file that depends on kernel binder, including the header itself,
63 // is conditional on BINDER_WITH_KERNEL_IPC.
64 #ifdef BINDER_WITH_KERNEL_IPC
65 #include <linux/sched.h>
66 #include "binder_module.h"
67 #else // BINDER_WITH_KERNEL_IPC
68 // Needed by {read,write}Pointer
69 typedef uintptr_t binder_uintptr_t;
70 #endif // BINDER_WITH_KERNEL_IPC
71
72 #ifdef __BIONIC__
73 #include <android/fdsan.h>
74 #endif
75
76 #define LOG_REFS(...)
77 // #define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
78 #define LOG_ALLOC(...)
79 // #define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
80
81 // ---------------------------------------------------------------------------
82
83 // This macro should never be used at runtime, as a too large value
84 // of s could cause an integer overflow. Instead, you should always
85 // use the wrapper function pad_size()
86 #define PAD_SIZE_UNSAFE(s) (((s) + 3) & ~3UL)
87
pad_size(size_t s)88 static size_t pad_size(size_t s) {
89 if (s > (std::numeric_limits<size_t>::max() - 3)) {
90 LOG_ALWAYS_FATAL("pad size too big %zu", s);
91 }
92 return PAD_SIZE_UNSAFE(s);
93 }
94
95 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
96 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
97
98 namespace android {
99
100 using namespace android::binder::impl;
101 using binder::borrowed_fd;
102 using binder::unique_fd;
103
104 // many things compile this into prebuilts on the stack
105 #ifdef __LP64__
106 static_assert(sizeof(Parcel) == 120);
107 #else
108 static_assert(sizeof(Parcel) == 60);
109 #endif
110
111 static std::atomic<size_t> gParcelGlobalAllocCount;
112 static std::atomic<size_t> gParcelGlobalAllocSize;
113
114 // Maximum number of file descriptors per Parcel.
115 constexpr size_t kMaxFds = 1024;
116
117 // Maximum size of a blob to transfer in-place.
118 [[maybe_unused]] static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
119
120 #if defined(__BIONIC__)
FdTag(int fd,const void * old_addr,const void * new_addr)121 static void FdTag(int fd, const void* old_addr, const void* new_addr) {
122 if (android_fdsan_exchange_owner_tag) {
123 uint64_t old_tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
124 reinterpret_cast<uint64_t>(old_addr));
125 uint64_t new_tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
126 reinterpret_cast<uint64_t>(new_addr));
127 android_fdsan_exchange_owner_tag(fd, old_tag, new_tag);
128 }
129 }
FdTagClose(int fd,const void * addr)130 static void FdTagClose(int fd, const void* addr) {
131 if (android_fdsan_close_with_tag) {
132 uint64_t tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
133 reinterpret_cast<uint64_t>(addr));
134 android_fdsan_close_with_tag(fd, tag);
135 } else {
136 close(fd);
137 }
138 }
139 #else
FdTag(int fd,const void * old_addr,const void * new_addr)140 static void FdTag(int fd, const void* old_addr, const void* new_addr) {
141 (void)fd;
142 (void)old_addr;
143 (void)new_addr;
144 }
FdTagClose(int fd,const void * addr)145 static void FdTagClose(int fd, const void* addr) {
146 (void)addr;
147 close(fd);
148 }
149 #endif
150
151 enum {
152 BLOB_INPLACE = 0,
153 BLOB_ASHMEM_IMMUTABLE = 1,
154 BLOB_ASHMEM_MUTABLE = 2,
155 };
156
157 #ifdef BINDER_WITH_KERNEL_IPC
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)158 static void acquire_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
159 const void* who) {
160 switch (obj.hdr.type) {
161 case BINDER_TYPE_BINDER:
162 if (obj.binder) {
163 LOG_REFS("Parcel %p acquiring reference on local %llu", who, obj.cookie);
164 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
165 }
166 return;
167 case BINDER_TYPE_HANDLE: {
168 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
169 if (b != nullptr) {
170 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
171 b->incStrong(who);
172 }
173 return;
174 }
175 case BINDER_TYPE_FD: {
176 if (obj.cookie != 0) { // owned
177 FdTag(obj.handle, nullptr, who);
178 }
179 return;
180 }
181 }
182
183 ALOGE("Invalid object type 0x%08x to acquire", obj.hdr.type);
184 }
185
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)186 static void release_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
187 const void* who) {
188 switch (obj.hdr.type) {
189 case BINDER_TYPE_BINDER:
190 if (obj.binder) {
191 LOG_REFS("Parcel %p releasing reference on local %llu", who, obj.cookie);
192 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
193 }
194 return;
195 case BINDER_TYPE_HANDLE: {
196 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
197 if (b != nullptr) {
198 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
199 b->decStrong(who);
200 }
201 return;
202 }
203 case BINDER_TYPE_FD: {
204 // note: this path is not used when mOwner, so the tag is also released
205 // in 'closeFileDescriptors'
206 if (obj.cookie != 0) { // owned
207 FdTagClose(obj.handle, who);
208 }
209 return;
210 }
211 }
212
213 ALOGE("Invalid object type 0x%08x to release", obj.hdr.type);
214 }
215 #endif // BINDER_WITH_KERNEL_IPC
216
toRawFd(const std::variant<unique_fd,borrowed_fd> & v)217 static int toRawFd(const std::variant<unique_fd, borrowed_fd>& v) {
218 return std::visit([](const auto& fd) { return fd.get(); }, v);
219 }
220
RpcFields(const sp<RpcSession> & session)221 Parcel::RpcFields::RpcFields(const sp<RpcSession>& session) : mSession(session) {
222 LOG_ALWAYS_FATAL_IF(mSession == nullptr);
223 }
224
finishFlattenBinder(const sp<IBinder> & binder)225 status_t Parcel::finishFlattenBinder(const sp<IBinder>& binder)
226 {
227 internal::Stability::tryMarkCompilationUnit(binder.get());
228 int16_t rep = internal::Stability::getRepr(binder.get());
229 return writeInt32(rep);
230 }
231
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const232 status_t Parcel::finishUnflattenBinder(
233 const sp<IBinder>& binder, sp<IBinder>* out) const
234 {
235 int32_t stability;
236 status_t status = readInt32(&stability);
237 if (status != OK) return status;
238
239 status = internal::Stability::setRepr(binder.get(), static_cast<int16_t>(stability),
240 true /*log*/);
241 if (status != OK) return status;
242
243 *out = binder;
244 return OK;
245 }
246
247 #ifdef BINDER_WITH_KERNEL_IPC
schedPolicyMask(int policy,int priority)248 static constexpr inline int schedPolicyMask(int policy, int priority) {
249 return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
250 }
251 #endif // BINDER_WITH_KERNEL_IPC
252
flattenBinder(const sp<IBinder> & binder)253 status_t Parcel::flattenBinder(const sp<IBinder>& binder) {
254 BBinder* local = nullptr;
255 if (binder) local = binder->localBinder();
256 if (local) local->setParceled();
257
258 if (const auto* rpcFields = maybeRpcFields()) {
259 if (binder) {
260 status_t status = writeInt32(RpcFields::TYPE_BINDER); // non-null
261 if (status != OK) return status;
262 uint64_t address;
263 // TODO(b/167966510): need to undo this if the Parcel is not sent
264 status = rpcFields->mSession->state()->onBinderLeaving(rpcFields->mSession, binder,
265 &address);
266 if (status != OK) return status;
267 status = writeUint64(address);
268 if (status != OK) return status;
269 } else {
270 status_t status = writeInt32(RpcFields::TYPE_BINDER_NULL); // null
271 if (status != OK) return status;
272 }
273 return finishFlattenBinder(binder);
274 }
275
276 #ifdef BINDER_WITH_KERNEL_IPC
277 flat_binder_object obj;
278
279 int schedBits = 0;
280 if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
281 schedBits = schedPolicyMask(SCHED_NORMAL, 19);
282 }
283
284 if (binder != nullptr) {
285 if (!local) {
286 BpBinder *proxy = binder->remoteBinder();
287 if (proxy == nullptr) {
288 ALOGE("null proxy");
289 } else {
290 if (proxy->isRpcBinder()) {
291 ALOGE("Sending a socket binder over kernel binder is prohibited");
292 return INVALID_OPERATION;
293 }
294 }
295 const int32_t handle = proxy ? proxy->getPrivateAccessor().binderHandle() : 0;
296 obj.hdr.type = BINDER_TYPE_HANDLE;
297 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
298 obj.flags = 0;
299 obj.handle = handle;
300 obj.cookie = 0;
301 } else {
302 int policy = local->getMinSchedulerPolicy();
303 int priority = local->getMinSchedulerPriority();
304
305 if (policy != 0 || priority != 0) {
306 // override value, since it is set explicitly
307 schedBits = schedPolicyMask(policy, priority);
308 }
309 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
310 if (local->isRequestingSid()) {
311 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
312 }
313 if (local->isInheritRt()) {
314 obj.flags |= FLAT_BINDER_FLAG_INHERIT_RT;
315 }
316 obj.hdr.type = BINDER_TYPE_BINDER;
317 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
318 obj.cookie = reinterpret_cast<uintptr_t>(local);
319 }
320 } else {
321 obj.hdr.type = BINDER_TYPE_BINDER;
322 obj.flags = 0;
323 obj.binder = 0;
324 obj.cookie = 0;
325 }
326
327 obj.flags |= schedBits;
328
329 status_t status = writeObject(obj, false);
330 if (status != OK) return status;
331
332 return finishFlattenBinder(binder);
333 #else // BINDER_WITH_KERNEL_IPC
334 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
335 return INVALID_OPERATION;
336 #endif // BINDER_WITH_KERNEL_IPC
337 }
338
unflattenBinder(sp<IBinder> * out) const339 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
340 {
341 if (const auto* rpcFields = maybeRpcFields()) {
342 int32_t isPresent;
343 status_t status = readInt32(&isPresent);
344 if (status != OK) return status;
345
346 sp<IBinder> binder;
347
348 if (isPresent & 1) {
349 uint64_t addr;
350 if (status_t status = readUint64(&addr); status != OK) return status;
351 if (status_t status =
352 rpcFields->mSession->state()->onBinderEntering(rpcFields->mSession, addr,
353 &binder);
354 status != OK)
355 return status;
356 if (status_t status =
357 rpcFields->mSession->state()->flushExcessBinderRefs(rpcFields->mSession,
358 addr, binder);
359 status != OK)
360 return status;
361 }
362
363 return finishUnflattenBinder(binder, out);
364 }
365
366 #ifdef BINDER_WITH_KERNEL_IPC
367 const flat_binder_object* flat = readObject(false);
368
369 if (flat) {
370 switch (flat->hdr.type) {
371 case BINDER_TYPE_BINDER: {
372 sp<IBinder> binder =
373 sp<IBinder>::fromExisting(reinterpret_cast<IBinder*>(flat->cookie));
374 return finishUnflattenBinder(binder, out);
375 }
376 case BINDER_TYPE_HANDLE: {
377 sp<IBinder> binder =
378 ProcessState::self()->getStrongProxyForHandle(flat->handle);
379 return finishUnflattenBinder(binder, out);
380 }
381 }
382 }
383 return BAD_TYPE;
384 #else // BINDER_WITH_KERNEL_IPC
385 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
386 return INVALID_OPERATION;
387 #endif // BINDER_WITH_KERNEL_IPC
388 }
389
390 // ---------------------------------------------------------------------------
391
Parcel()392 Parcel::Parcel()
393 {
394 LOG_ALLOC("Parcel %p: constructing", this);
395 initState();
396 }
397
~Parcel()398 Parcel::~Parcel()
399 {
400 freeDataNoInit();
401 LOG_ALLOC("Parcel %p: destroyed", this);
402 }
403
getGlobalAllocSize()404 size_t Parcel::getGlobalAllocSize() {
405 return gParcelGlobalAllocSize.load();
406 }
407
getGlobalAllocCount()408 size_t Parcel::getGlobalAllocCount() {
409 return gParcelGlobalAllocCount.load();
410 }
411
data() const412 const uint8_t* Parcel::data() const
413 {
414 return mData;
415 }
416
dataSize() const417 size_t Parcel::dataSize() const
418 {
419 return (mDataSize > mDataPos ? mDataSize : mDataPos);
420 }
421
dataBufferSize() const422 size_t Parcel::dataBufferSize() const {
423 return mDataSize;
424 }
425
dataAvail() const426 size_t Parcel::dataAvail() const
427 {
428 size_t result = dataSize() - dataPosition();
429 if (result > INT32_MAX) {
430 LOG_ALWAYS_FATAL("result too big: %zu", result);
431 }
432 return result;
433 }
434
dataPosition() const435 size_t Parcel::dataPosition() const
436 {
437 return mDataPos;
438 }
439
dataCapacity() const440 size_t Parcel::dataCapacity() const
441 {
442 return mDataCapacity;
443 }
444
setDataSize(size_t size)445 status_t Parcel::setDataSize(size_t size)
446 {
447 if (size > INT32_MAX) {
448 // don't accept size_t values which may have come from an
449 // inadvertent conversion from a negative int.
450 return BAD_VALUE;
451 }
452
453 status_t err;
454 err = continueWrite(size);
455 if (err == NO_ERROR) {
456 mDataSize = size;
457 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
458 }
459 return err;
460 }
461
setDataPosition(size_t pos) const462 void Parcel::setDataPosition(size_t pos) const
463 {
464 if (pos > INT32_MAX) {
465 // don't accept size_t values which may have come from an
466 // inadvertent conversion from a negative int.
467 LOG_ALWAYS_FATAL("pos too big: %zu", pos);
468 }
469
470 mDataPos = pos;
471 if (const auto* kernelFields = maybeKernelFields()) {
472 kernelFields->mNextObjectHint = 0;
473 kernelFields->mObjectsSorted = false;
474 }
475 }
476
setDataCapacity(size_t size)477 status_t Parcel::setDataCapacity(size_t size)
478 {
479 if (size > INT32_MAX) {
480 // don't accept size_t values which may have come from an
481 // inadvertent conversion from a negative int.
482 return BAD_VALUE;
483 }
484
485 if (size > mDataCapacity) return continueWrite(size);
486 return NO_ERROR;
487 }
488
setData(const uint8_t * buffer,size_t len)489 status_t Parcel::setData(const uint8_t* buffer, size_t len)
490 {
491 if (len > INT32_MAX) {
492 // don't accept size_t values which may have come from an
493 // inadvertent conversion from a negative int.
494 return BAD_VALUE;
495 }
496
497 status_t err = restartWrite(len);
498 if (err == NO_ERROR) {
499 memcpy(const_cast<uint8_t*>(data()), buffer, len);
500 mDataSize = len;
501 if (auto* kernelFields = maybeKernelFields()) {
502 kernelFields->mFdsKnown = false;
503 }
504 }
505 return err;
506 }
507
appendFrom(const Parcel * parcel,size_t offset,size_t len)508 status_t Parcel::appendFrom(const Parcel* parcel, size_t offset, size_t len) {
509 if (isForRpc() != parcel->isForRpc()) {
510 ALOGE("Cannot append Parcel from one context to another. They may be different formats, "
511 "and objects are specific to a context.");
512 return BAD_TYPE;
513 }
514 if (isForRpc() && maybeRpcFields()->mSession != parcel->maybeRpcFields()->mSession) {
515 ALOGE("Cannot append Parcels from different sessions");
516 return BAD_TYPE;
517 }
518
519 status_t err;
520 const uint8_t* data = parcel->mData;
521 int startPos = mDataPos;
522
523 if (len == 0) {
524 return NO_ERROR;
525 }
526
527 if (len > INT32_MAX) {
528 // don't accept size_t values which may have come from an
529 // inadvertent conversion from a negative int.
530 return BAD_VALUE;
531 }
532
533 // range checks against the source parcel size
534 if ((offset > parcel->mDataSize)
535 || (len > parcel->mDataSize)
536 || (offset + len > parcel->mDataSize)) {
537 return BAD_VALUE;
538 }
539
540 if ((mDataSize+len) > mDataCapacity) {
541 // grow data
542 err = growData(len);
543 if (err != NO_ERROR) {
544 return err;
545 }
546 }
547
548 // append data
549 memcpy(mData + mDataPos, data + offset, len);
550 mDataPos += len;
551 mDataSize += len;
552
553 err = NO_ERROR;
554
555 if (auto* kernelFields = maybeKernelFields()) {
556 #ifdef BINDER_WITH_KERNEL_IPC
557 auto* otherKernelFields = parcel->maybeKernelFields();
558 LOG_ALWAYS_FATAL_IF(otherKernelFields == nullptr);
559
560 const binder_size_t* objects = otherKernelFields->mObjects;
561 size_t size = otherKernelFields->mObjectsSize;
562 // Count objects in range
563 int firstIndex = -1, lastIndex = -2;
564 for (int i = 0; i < (int)size; i++) {
565 size_t off = objects[i];
566 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
567 if (firstIndex == -1) {
568 firstIndex = i;
569 }
570 lastIndex = i;
571 }
572 }
573 int numObjects = lastIndex - firstIndex + 1;
574 if (numObjects > 0) {
575 const sp<ProcessState> proc(ProcessState::self());
576 // grow objects
577 if (kernelFields->mObjectsCapacity < kernelFields->mObjectsSize + numObjects) {
578 if ((size_t)numObjects > SIZE_MAX - kernelFields->mObjectsSize)
579 return NO_MEMORY; // overflow
580 if (kernelFields->mObjectsSize + numObjects > SIZE_MAX / 3)
581 return NO_MEMORY; // overflow
582 size_t newSize = ((kernelFields->mObjectsSize + numObjects) * 3) / 2;
583 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
584 binder_size_t* objects = (binder_size_t*)realloc(kernelFields->mObjects,
585 newSize * sizeof(binder_size_t));
586 if (objects == (binder_size_t*)nullptr) {
587 return NO_MEMORY;
588 }
589 kernelFields->mObjects = objects;
590 kernelFields->mObjectsCapacity = newSize;
591 }
592
593 // append and acquire objects
594 int idx = kernelFields->mObjectsSize;
595 for (int i = firstIndex; i <= lastIndex; i++) {
596 size_t off = objects[i] - offset + startPos;
597 kernelFields->mObjects[idx++] = off;
598 kernelFields->mObjectsSize++;
599
600 flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(mData + off);
601
602 if (flat->hdr.type == BINDER_TYPE_FD) {
603 // If this is a file descriptor, we need to dup it so the
604 // new Parcel now owns its own fd, and can declare that we
605 // officially know we have fds.
606 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
607 flat->cookie = 1;
608 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
609 if (!mAllowFds) {
610 err = FDS_NOT_ALLOWED;
611 }
612 }
613
614 acquire_object(proc, *flat, this);
615 }
616 }
617 #else
618 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
619 return INVALID_OPERATION;
620 #endif // BINDER_WITH_KERNEL_IPC
621 } else {
622 auto* rpcFields = maybeRpcFields();
623 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
624 auto* otherRpcFields = parcel->maybeRpcFields();
625 if (otherRpcFields == nullptr) {
626 return BAD_TYPE;
627 }
628 if (rpcFields->mSession != otherRpcFields->mSession) {
629 return BAD_TYPE;
630 }
631
632 const size_t savedDataPos = mDataPos;
633 auto scopeGuard = make_scope_guard([&]() { mDataPos = savedDataPos; });
634
635 rpcFields->mObjectPositions.reserve(otherRpcFields->mObjectPositions.size());
636 if (otherRpcFields->mFds != nullptr) {
637 if (rpcFields->mFds == nullptr) {
638 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
639 }
640 rpcFields->mFds->reserve(otherRpcFields->mFds->size());
641 }
642 for (size_t i = 0; i < otherRpcFields->mObjectPositions.size(); i++) {
643 const binder_size_t objPos = otherRpcFields->mObjectPositions[i];
644 if (offset <= objPos && objPos < offset + len) {
645 size_t newDataPos = objPos - offset + startPos;
646 rpcFields->mObjectPositions.push_back(newDataPos);
647
648 mDataPos = newDataPos;
649 int32_t objectType;
650 if (status_t status = readInt32(&objectType); status != OK) {
651 return status;
652 }
653 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
654 continue;
655 }
656
657 if (!mAllowFds) {
658 return FDS_NOT_ALLOWED;
659 }
660
661 // Read FD, duplicate, and add to list.
662 int32_t fdIndex;
663 if (status_t status = readInt32(&fdIndex); status != OK) {
664 return status;
665 }
666 int oldFd = toRawFd(otherRpcFields->mFds->at(fdIndex));
667 // To match kernel binder behavior, we always dup, even if the
668 // FD was unowned in the source parcel.
669 int newFd = -1;
670 if (status_t status = binder::os::dupFileDescriptor(oldFd, &newFd); status != OK) {
671 ALOGW("Failed to duplicate file descriptor %d: %s", oldFd,
672 statusToString(status).c_str());
673 }
674 rpcFields->mFds->emplace_back(unique_fd(newFd));
675 // Fixup the index in the data.
676 mDataPos = newDataPos + 4;
677 if (status_t status = writeInt32(rpcFields->mFds->size() - 1); status != OK) {
678 return status;
679 }
680 }
681 }
682 }
683
684 return err;
685 }
686
compareData(const Parcel & other) const687 int Parcel::compareData(const Parcel& other) const {
688 size_t size = dataSize();
689 if (size != other.dataSize()) {
690 return size < other.dataSize() ? -1 : 1;
691 }
692 return memcmp(data(), other.data(), size);
693 }
694
compareDataInRange(size_t thisOffset,const Parcel & other,size_t otherOffset,size_t len,int * result) const695 status_t Parcel::compareDataInRange(size_t thisOffset, const Parcel& other, size_t otherOffset,
696 size_t len, int* result) const {
697 if (len > INT32_MAX || thisOffset > INT32_MAX || otherOffset > INT32_MAX) {
698 // Don't accept size_t values which may have come from an inadvertent conversion from a
699 // negative int.
700 return BAD_VALUE;
701 }
702 size_t thisLimit;
703 if (__builtin_add_overflow(thisOffset, len, &thisLimit) || thisLimit > mDataSize) {
704 return BAD_VALUE;
705 }
706 size_t otherLimit;
707 if (__builtin_add_overflow(otherOffset, len, &otherLimit) || otherLimit > other.mDataSize) {
708 return BAD_VALUE;
709 }
710 *result = memcmp(data() + thisOffset, other.data() + otherOffset, len);
711 return NO_ERROR;
712 }
713
allowFds() const714 bool Parcel::allowFds() const
715 {
716 return mAllowFds;
717 }
718
pushAllowFds(bool allowFds)719 bool Parcel::pushAllowFds(bool allowFds)
720 {
721 const bool origValue = mAllowFds;
722 if (!allowFds) {
723 mAllowFds = false;
724 }
725 return origValue;
726 }
727
restoreAllowFds(bool lastValue)728 void Parcel::restoreAllowFds(bool lastValue)
729 {
730 mAllowFds = lastValue;
731 }
732
hasFileDescriptors() const733 bool Parcel::hasFileDescriptors() const
734 {
735 if (const auto* rpcFields = maybeRpcFields()) {
736 return rpcFields->mFds != nullptr && !rpcFields->mFds->empty();
737 }
738 auto* kernelFields = maybeKernelFields();
739 if (!kernelFields->mFdsKnown) {
740 scanForFds();
741 }
742 return kernelFields->mHasFds;
743 }
744
hasBinders(bool * result) const745 status_t Parcel::hasBinders(bool* result) const {
746 status_t status = hasBindersInRange(0, dataSize(), result);
747 ALOGE_IF(status != NO_ERROR, "Error %d calling hasBindersInRange()", status);
748 return status;
749 }
750
debugReadAllStrongBinders() const751 std::vector<sp<IBinder>> Parcel::debugReadAllStrongBinders() const {
752 std::vector<sp<IBinder>> ret;
753
754 #ifdef BINDER_WITH_KERNEL_IPC
755 const auto* kernelFields = maybeKernelFields();
756 if (kernelFields == nullptr) {
757 return ret;
758 }
759
760 size_t initPosition = dataPosition();
761 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
762 binder_size_t offset = kernelFields->mObjects[i];
763 const flat_binder_object* flat =
764 reinterpret_cast<const flat_binder_object*>(mData + offset);
765 if (flat->hdr.type != BINDER_TYPE_BINDER) continue;
766
767 setDataPosition(offset);
768
769 sp<IBinder> binder = readStrongBinder();
770 if (binder != nullptr) ret.push_back(binder);
771 }
772
773 setDataPosition(initPosition);
774 #endif // BINDER_WITH_KERNEL_IPC
775
776 return ret;
777 }
778
debugReadAllFileDescriptors() const779 std::vector<int> Parcel::debugReadAllFileDescriptors() const {
780 std::vector<int> ret;
781
782 if (const auto* kernelFields = maybeKernelFields()) {
783 #ifdef BINDER_WITH_KERNEL_IPC
784 size_t initPosition = dataPosition();
785 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
786 binder_size_t offset = kernelFields->mObjects[i];
787 const flat_binder_object* flat =
788 reinterpret_cast<const flat_binder_object*>(mData + offset);
789 if (flat->hdr.type != BINDER_TYPE_FD) continue;
790
791 setDataPosition(offset);
792
793 int fd = readFileDescriptor();
794 LOG_ALWAYS_FATAL_IF(fd == -1);
795 ret.push_back(fd);
796 }
797 setDataPosition(initPosition);
798 #else
799 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
800 #endif
801 } else if (const auto* rpcFields = maybeRpcFields(); rpcFields && rpcFields->mFds) {
802 for (const auto& fd : *rpcFields->mFds) {
803 ret.push_back(toRawFd(fd));
804 }
805 }
806
807 return ret;
808 }
809
hasBindersInRange(size_t offset,size_t len,bool * result) const810 status_t Parcel::hasBindersInRange(size_t offset, size_t len, bool* result) const {
811 if (len > INT32_MAX || offset > INT32_MAX) {
812 // Don't accept size_t values which may have come from an inadvertent conversion from a
813 // negative int.
814 return BAD_VALUE;
815 }
816 size_t limit;
817 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
818 return BAD_VALUE;
819 }
820 *result = false;
821 if (const auto* kernelFields = maybeKernelFields()) {
822 #ifdef BINDER_WITH_KERNEL_IPC
823 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
824 size_t pos = kernelFields->mObjects[i];
825 if (pos < offset) continue;
826 if (pos + sizeof(flat_binder_object) > offset + len) {
827 if (kernelFields->mObjectsSorted) {
828 break;
829 } else {
830 continue;
831 }
832 }
833 const flat_binder_object* flat =
834 reinterpret_cast<const flat_binder_object*>(mData + pos);
835 if (flat->hdr.type == BINDER_TYPE_BINDER || flat->hdr.type == BINDER_TYPE_HANDLE) {
836 *result = true;
837 break;
838 }
839 }
840 #else
841 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
842 return INVALID_OPERATION;
843 #endif // BINDER_WITH_KERNEL_IPC
844 } else if (const auto* rpcFields = maybeRpcFields()) {
845 return INVALID_OPERATION;
846 }
847 return NO_ERROR;
848 }
849
hasFileDescriptorsInRange(size_t offset,size_t len,bool * result) const850 status_t Parcel::hasFileDescriptorsInRange(size_t offset, size_t len, bool* result) const {
851 if (len > INT32_MAX || offset > INT32_MAX) {
852 // Don't accept size_t values which may have come from an inadvertent conversion from a
853 // negative int.
854 return BAD_VALUE;
855 }
856 size_t limit;
857 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
858 return BAD_VALUE;
859 }
860 *result = false;
861 if (const auto* kernelFields = maybeKernelFields()) {
862 #ifdef BINDER_WITH_KERNEL_IPC
863 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
864 size_t pos = kernelFields->mObjects[i];
865 if (pos < offset) continue;
866 if (pos + sizeof(flat_binder_object) > offset + len) {
867 if (kernelFields->mObjectsSorted) {
868 break;
869 } else {
870 continue;
871 }
872 }
873 const flat_binder_object* flat =
874 reinterpret_cast<const flat_binder_object*>(mData + pos);
875 if (flat->hdr.type == BINDER_TYPE_FD) {
876 *result = true;
877 break;
878 }
879 }
880 #else
881 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
882 return INVALID_OPERATION;
883 #endif // BINDER_WITH_KERNEL_IPC
884 } else if (const auto* rpcFields = maybeRpcFields()) {
885 for (uint32_t pos : rpcFields->mObjectPositions) {
886 if (offset <= pos && pos < limit) {
887 const auto* type = reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
888 if (*type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
889 *result = true;
890 break;
891 }
892 }
893 }
894 }
895 return NO_ERROR;
896 }
897
markSensitive() const898 void Parcel::markSensitive() const
899 {
900 mDeallocZero = true;
901 }
902
markForBinder(const sp<IBinder> & binder)903 void Parcel::markForBinder(const sp<IBinder>& binder) {
904 LOG_ALWAYS_FATAL_IF(mData != nullptr, "format must be set before data is written");
905
906 if (binder && binder->remoteBinder() && binder->remoteBinder()->isRpcBinder()) {
907 markForRpc(binder->remoteBinder()->getPrivateAccessor().rpcSession());
908 }
909 }
910
markForRpc(const sp<RpcSession> & session)911 void Parcel::markForRpc(const sp<RpcSession>& session) {
912 LOG_ALWAYS_FATAL_IF(mData != nullptr && mOwner == nullptr,
913 "format must be set before data is written OR on IPC data");
914
915 mVariantFields.emplace<RpcFields>(session);
916 }
917
isForRpc() const918 bool Parcel::isForRpc() const {
919 return std::holds_alternative<RpcFields>(mVariantFields);
920 }
921
updateWorkSourceRequestHeaderPosition() const922 void Parcel::updateWorkSourceRequestHeaderPosition() const {
923 auto* kernelFields = maybeKernelFields();
924 if (kernelFields == nullptr) {
925 return;
926 }
927
928 // Only update the request headers once. We only want to point
929 // to the first headers read/written.
930 if (!kernelFields->mRequestHeaderPresent) {
931 kernelFields->mWorkSourceRequestHeaderPosition = dataPosition();
932 kernelFields->mRequestHeaderPresent = true;
933 }
934 }
935
936 #ifdef BINDER_WITH_KERNEL_IPC
937
938 #if defined(__ANDROID__)
939
940 #if defined(__ANDROID_VNDK__)
941 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
942 #elif defined(__ANDROID_RECOVERY__)
943 constexpr int32_t kHeader = B_PACK_CHARS('R', 'E', 'C', 'O');
944 #else
945 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
946 #endif
947
948 #else // ANDROID not defined
949
950 // If kernel binder is used in new environments, we need to make sure it's separated
951 // out and has a separate header.
952 constexpr int32_t kHeader = B_PACK_CHARS('U', 'N', 'K', 'N');
953 #endif
954
955 #endif // BINDER_WITH_KERNEL_IPC
956
957 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)958 status_t Parcel::writeInterfaceToken(const String16& interface)
959 {
960 return writeInterfaceToken(interface.c_str(), interface.size());
961 }
962
writeInterfaceToken(const char16_t * str,size_t len)963 status_t Parcel::writeInterfaceToken(const char16_t* str, size_t len) {
964 if (auto* kernelFields = maybeKernelFields()) {
965 #ifdef BINDER_WITH_KERNEL_IPC
966 const IPCThreadState* threadState = IPCThreadState::self();
967 writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
968 updateWorkSourceRequestHeaderPosition();
969 writeInt32(threadState->shouldPropagateWorkSource() ? threadState->getCallingWorkSourceUid()
970 : IPCThreadState::kUnsetWorkSource);
971 writeInt32(kHeader);
972 #else // BINDER_WITH_KERNEL_IPC
973 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
974 return INVALID_OPERATION;
975 #endif // BINDER_WITH_KERNEL_IPC
976 }
977
978 // currently the interface identification token is just its name as a string
979 return writeString16(str, len);
980 }
981
replaceCallingWorkSourceUid(uid_t uid)982 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
983 {
984 auto* kernelFields = maybeKernelFields();
985 if (kernelFields == nullptr) {
986 return false;
987 }
988 if (!kernelFields->mRequestHeaderPresent) {
989 return false;
990 }
991
992 const size_t initialPosition = dataPosition();
993 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
994 status_t err = writeInt32(uid);
995 setDataPosition(initialPosition);
996 return err == NO_ERROR;
997 }
998
readCallingWorkSourceUid() const999 uid_t Parcel::readCallingWorkSourceUid() const
1000 {
1001 auto* kernelFields = maybeKernelFields();
1002 if (kernelFields == nullptr) {
1003 return false;
1004 }
1005 if (!kernelFields->mRequestHeaderPresent) {
1006 return IPCThreadState::kUnsetWorkSource;
1007 }
1008
1009 const size_t initialPosition = dataPosition();
1010 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
1011 uid_t uid = readInt32();
1012 setDataPosition(initialPosition);
1013 return uid;
1014 }
1015
checkInterface(IBinder * binder) const1016 bool Parcel::checkInterface(IBinder* binder) const
1017 {
1018 return enforceInterface(binder->getInterfaceDescriptor());
1019 }
1020
enforceInterface(const String16 & interface,IPCThreadState * threadState) const1021 bool Parcel::enforceInterface(const String16& interface,
1022 IPCThreadState* threadState) const
1023 {
1024 return enforceInterface(interface.c_str(), interface.size(), threadState);
1025 }
1026
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const1027 bool Parcel::enforceInterface(const char16_t* interface,
1028 size_t len,
1029 IPCThreadState* threadState) const
1030 {
1031 if (auto* kernelFields = maybeKernelFields()) {
1032 #ifdef BINDER_WITH_KERNEL_IPC
1033 // StrictModePolicy.
1034 int32_t strictPolicy = readInt32();
1035 if (threadState == nullptr) {
1036 threadState = IPCThreadState::self();
1037 }
1038 if ((threadState->getLastTransactionBinderFlags() & IBinder::FLAG_ONEWAY) != 0) {
1039 // For one-way calls, the callee is running entirely
1040 // disconnected from the caller, so disable StrictMode entirely.
1041 // Not only does disk/network usage not impact the caller, but
1042 // there's no way to communicate back violations anyway.
1043 threadState->setStrictModePolicy(0);
1044 } else {
1045 threadState->setStrictModePolicy(strictPolicy);
1046 }
1047 // WorkSource.
1048 updateWorkSourceRequestHeaderPosition();
1049 int32_t workSource = readInt32();
1050 threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
1051 // vendor header
1052 int32_t header = readInt32();
1053
1054 // fuzzers skip this check, because it is for protecting the underlying ABI, but
1055 // we don't want it to reduce our coverage
1056 if (header != kHeader && !mServiceFuzzing) {
1057 ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader,
1058 header);
1059 return false;
1060 }
1061 #else // BINDER_WITH_KERNEL_IPC
1062 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1063 (void)threadState;
1064 return false;
1065 #endif // BINDER_WITH_KERNEL_IPC
1066 }
1067
1068 // Interface descriptor.
1069 size_t parcel_interface_len;
1070 const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
1071 if (len == parcel_interface_len &&
1072 (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
1073 return true;
1074 } else {
1075 if (mServiceFuzzing) {
1076 // ignore. Theoretically, this could cause a few false positives, because
1077 // people could assume things about getInterfaceDescriptor if they pass
1078 // this point, but it would be extremely fragile. It's more important that
1079 // we fuzz with the above things read from the Parcel.
1080 return true;
1081 } else {
1082 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
1083 String8(interface, len).c_str(),
1084 String8(parcel_interface, parcel_interface_len).c_str());
1085 return false;
1086 }
1087 }
1088 }
1089
setEnforceNoDataAvail(bool enforceNoDataAvail)1090 void Parcel::setEnforceNoDataAvail(bool enforceNoDataAvail) {
1091 mEnforceNoDataAvail = enforceNoDataAvail;
1092 }
1093
setServiceFuzzing()1094 void Parcel::setServiceFuzzing() {
1095 mServiceFuzzing = true;
1096 }
1097
isServiceFuzzing() const1098 bool Parcel::isServiceFuzzing() const {
1099 return mServiceFuzzing;
1100 }
1101
enforceNoDataAvail() const1102 binder::Status Parcel::enforceNoDataAvail() const {
1103 if (!mEnforceNoDataAvail) {
1104 return binder::Status::ok();
1105 }
1106
1107 const auto n = dataAvail();
1108 if (n == 0) {
1109 return binder::Status::ok();
1110 }
1111 return binder::Status::
1112 fromExceptionCode(binder::Status::Exception::EX_BAD_PARCELABLE,
1113 String8::format("Parcel data not fully consumed, unread size: %zu",
1114 n));
1115 }
1116
objectsCount() const1117 size_t Parcel::objectsCount() const
1118 {
1119 if (const auto* kernelFields = maybeKernelFields()) {
1120 return kernelFields->mObjectsSize;
1121 }
1122 return 0;
1123 }
1124
errorCheck() const1125 status_t Parcel::errorCheck() const
1126 {
1127 return mError;
1128 }
1129
setError(status_t err)1130 void Parcel::setError(status_t err)
1131 {
1132 mError = err;
1133 }
1134
finishWrite(size_t len)1135 status_t Parcel::finishWrite(size_t len)
1136 {
1137 if (len > INT32_MAX) {
1138 // don't accept size_t values which may have come from an
1139 // inadvertent conversion from a negative int.
1140 return BAD_VALUE;
1141 }
1142
1143 //printf("Finish write of %d\n", len);
1144 mDataPos += len;
1145 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
1146 if (mDataPos > mDataSize) {
1147 mDataSize = mDataPos;
1148 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
1149 }
1150 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
1151 return NO_ERROR;
1152 }
1153
write(const void * data,size_t len)1154 status_t Parcel::write(const void* data, size_t len)
1155 {
1156 if (len > INT32_MAX) {
1157 // don't accept size_t values which may have come from an
1158 // inadvertent conversion from a negative int.
1159 return BAD_VALUE;
1160 }
1161
1162 void* const d = writeInplace(len);
1163 if (d) {
1164 memcpy(d, data, len);
1165 return NO_ERROR;
1166 }
1167 return mError;
1168 }
1169
writeInplace(size_t len)1170 void* Parcel::writeInplace(size_t len)
1171 {
1172 if (len > INT32_MAX) {
1173 // don't accept size_t values which may have come from an
1174 // inadvertent conversion from a negative int.
1175 return nullptr;
1176 }
1177
1178 const size_t padded = pad_size(len);
1179
1180 // check for integer overflow
1181 if (mDataPos+padded < mDataPos) {
1182 return nullptr;
1183 }
1184
1185 if ((mDataPos+padded) <= mDataCapacity) {
1186 restart_write:
1187 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
1188 uint8_t* const data = mData+mDataPos;
1189
1190 if (status_t status = validateReadData(mDataPos + padded); status != OK) {
1191 return nullptr; // drops status
1192 }
1193
1194 // Need to pad at end?
1195 if (padded != len) {
1196 #if BYTE_ORDER == BIG_ENDIAN
1197 static const uint32_t mask[4] = {
1198 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
1199 };
1200 #endif
1201 #if BYTE_ORDER == LITTLE_ENDIAN
1202 static const uint32_t mask[4] = {
1203 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
1204 };
1205 #endif
1206 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
1207 // *reinterpret_cast<void**>(data+padded-4));
1208 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
1209 }
1210
1211 finishWrite(padded);
1212 return data;
1213 }
1214
1215 status_t err = growData(padded);
1216 if (err == NO_ERROR) goto restart_write;
1217 return nullptr;
1218 }
1219
writeUtf8AsUtf16(const std::string & str)1220 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
1221 const uint8_t* strData = (uint8_t*)str.data();
1222 const size_t strLen= str.length();
1223 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
1224 if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
1225 return BAD_VALUE;
1226 }
1227
1228 status_t err = writeInt32(utf16Len);
1229 if (err) {
1230 return err;
1231 }
1232
1233 // Allocate enough bytes to hold our converted string and its terminating NULL.
1234 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
1235 if (!dst) {
1236 return NO_MEMORY;
1237 }
1238
1239 utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
1240
1241 return NO_ERROR;
1242 }
1243
1244
writeUtf8AsUtf16(const std::optional<std::string> & str)1245 status_t Parcel::writeUtf8AsUtf16(const std::optional<std::string>& str) { return writeData(str); }
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)1246 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) { return writeData(str); }
1247
writeString16(const std::optional<String16> & str)1248 status_t Parcel::writeString16(const std::optional<String16>& str) { return writeData(str); }
writeString16(const std::unique_ptr<String16> & str)1249 status_t Parcel::writeString16(const std::unique_ptr<String16>& str) { return writeData(str); }
1250
writeByteVector(const std::vector<int8_t> & val)1251 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<int8_t>> & val)1252 status_t Parcel::writeByteVector(const std::optional<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)1253 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::vector<uint8_t> & val)1254 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<uint8_t>> & val)1255 status_t Parcel::writeByteVector(const std::optional<std::vector<uint8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)1256 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val){ return writeData(val); }
writeInt32Vector(const std::vector<int32_t> & val)1257 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) { return writeData(val); }
writeInt32Vector(const std::optional<std::vector<int32_t>> & val)1258 status_t Parcel::writeInt32Vector(const std::optional<std::vector<int32_t>>& val) { return writeData(val); }
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)1259 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val) { return writeData(val); }
writeInt64Vector(const std::vector<int64_t> & val)1260 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) { return writeData(val); }
writeInt64Vector(const std::optional<std::vector<int64_t>> & val)1261 status_t Parcel::writeInt64Vector(const std::optional<std::vector<int64_t>>& val) { return writeData(val); }
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)1262 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::vector<uint64_t> & val)1263 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val) { return writeData(val); }
writeUint64Vector(const std::optional<std::vector<uint64_t>> & val)1264 status_t Parcel::writeUint64Vector(const std::optional<std::vector<uint64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)1265 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val) { return writeData(val); }
writeFloatVector(const std::vector<float> & val)1266 status_t Parcel::writeFloatVector(const std::vector<float>& val) { return writeData(val); }
writeFloatVector(const std::optional<std::vector<float>> & val)1267 status_t Parcel::writeFloatVector(const std::optional<std::vector<float>>& val) { return writeData(val); }
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)1268 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val) { return writeData(val); }
writeDoubleVector(const std::vector<double> & val)1269 status_t Parcel::writeDoubleVector(const std::vector<double>& val) { return writeData(val); }
writeDoubleVector(const std::optional<std::vector<double>> & val)1270 status_t Parcel::writeDoubleVector(const std::optional<std::vector<double>>& val) { return writeData(val); }
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)1271 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val) { return writeData(val); }
writeBoolVector(const std::vector<bool> & val)1272 status_t Parcel::writeBoolVector(const std::vector<bool>& val) { return writeData(val); }
writeBoolVector(const std::optional<std::vector<bool>> & val)1273 status_t Parcel::writeBoolVector(const std::optional<std::vector<bool>>& val) { return writeData(val); }
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)1274 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val) { return writeData(val); }
writeCharVector(const std::vector<char16_t> & val)1275 status_t Parcel::writeCharVector(const std::vector<char16_t>& val) { return writeData(val); }
writeCharVector(const std::optional<std::vector<char16_t>> & val)1276 status_t Parcel::writeCharVector(const std::optional<std::vector<char16_t>>& val) { return writeData(val); }
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)1277 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val) { return writeData(val); }
1278
writeString16Vector(const std::vector<String16> & val)1279 status_t Parcel::writeString16Vector(const std::vector<String16>& val) { return writeData(val); }
writeString16Vector(const std::optional<std::vector<std::optional<String16>>> & val)1280 status_t Parcel::writeString16Vector(
1281 const std::optional<std::vector<std::optional<String16>>>& val) { return writeData(val); }
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)1282 status_t Parcel::writeString16Vector(
1283 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::optional<std::vector<std::optional<std::string>>> & val)1284 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1285 const std::optional<std::vector<std::optional<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)1286 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1287 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)1288 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) { return writeData(val); }
1289
writeUniqueFileDescriptorVector(const std::vector<unique_fd> & val)1290 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<unique_fd>& val) {
1291 return writeData(val);
1292 }
writeUniqueFileDescriptorVector(const std::optional<std::vector<unique_fd>> & val)1293 status_t Parcel::writeUniqueFileDescriptorVector(const std::optional<std::vector<unique_fd>>& val) {
1294 return writeData(val);
1295 }
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<unique_fd>> & val)1296 status_t Parcel::writeUniqueFileDescriptorVector(
1297 const std::unique_ptr<std::vector<unique_fd>>& val) {
1298 return writeData(val);
1299 }
1300
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1301 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) { return writeData(val); }
writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>> & val)1302 status_t Parcel::writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>>& val) { return writeData(val); }
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1303 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val) { return writeData(val); }
1304
writeParcelable(const Parcelable & parcelable)1305 status_t Parcel::writeParcelable(const Parcelable& parcelable) { return writeData(parcelable); }
1306
readUtf8FromUtf16(std::optional<std::string> * str) const1307 status_t Parcel::readUtf8FromUtf16(std::optional<std::string>* str) const { return readData(str); }
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1308 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const { return readData(str); }
1309
readString16(std::optional<String16> * pArg) const1310 status_t Parcel::readString16(std::optional<String16>* pArg) const { return readData(pArg); }
readString16(std::unique_ptr<String16> * pArg) const1311 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const { return readData(pArg); }
1312
readByteVector(std::vector<int8_t> * val) const1313 status_t Parcel::readByteVector(std::vector<int8_t>* val) const { return readData(val); }
readByteVector(std::vector<uint8_t> * val) const1314 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<int8_t>> * val) const1315 status_t Parcel::readByteVector(std::optional<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1316 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<uint8_t>> * val) const1317 status_t Parcel::readByteVector(std::optional<std::vector<uint8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1318 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const { return readData(val); }
readInt32Vector(std::optional<std::vector<int32_t>> * val) const1319 status_t Parcel::readInt32Vector(std::optional<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1320 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::vector<int32_t> * val) const1321 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { return readData(val); }
readInt64Vector(std::optional<std::vector<int64_t>> * val) const1322 status_t Parcel::readInt64Vector(std::optional<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1323 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::vector<int64_t> * val) const1324 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { return readData(val); }
readUint64Vector(std::optional<std::vector<uint64_t>> * val) const1325 status_t Parcel::readUint64Vector(std::optional<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1326 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::vector<uint64_t> * val) const1327 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const { return readData(val); }
readFloatVector(std::optional<std::vector<float>> * val) const1328 status_t Parcel::readFloatVector(std::optional<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1329 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::vector<float> * val) const1330 status_t Parcel::readFloatVector(std::vector<float>* val) const { return readData(val); }
readDoubleVector(std::optional<std::vector<double>> * val) const1331 status_t Parcel::readDoubleVector(std::optional<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1332 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::vector<double> * val) const1333 status_t Parcel::readDoubleVector(std::vector<double>* val) const { return readData(val); }
readBoolVector(std::optional<std::vector<bool>> * val) const1334 status_t Parcel::readBoolVector(std::optional<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1335 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::vector<bool> * val) const1336 status_t Parcel::readBoolVector(std::vector<bool>* val) const { return readData(val); }
readCharVector(std::optional<std::vector<char16_t>> * val) const1337 status_t Parcel::readCharVector(std::optional<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1338 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::vector<char16_t> * val) const1339 status_t Parcel::readCharVector(std::vector<char16_t>* val) const { return readData(val); }
1340
readString16Vector(std::optional<std::vector<std::optional<String16>>> * val) const1341 status_t Parcel::readString16Vector(
1342 std::optional<std::vector<std::optional<String16>>>* val) const { return readData(val); }
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1343 status_t Parcel::readString16Vector(
1344 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const { return readData(val); }
readString16Vector(std::vector<String16> * val) const1345 status_t Parcel::readString16Vector(std::vector<String16>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::optional<std::vector<std::optional<std::string>>> * val) const1346 status_t Parcel::readUtf8VectorFromUtf16Vector(
1347 std::optional<std::vector<std::optional<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1348 status_t Parcel::readUtf8VectorFromUtf16Vector(
1349 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1350 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const { return readData(val); }
1351
readUniqueFileDescriptorVector(std::optional<std::vector<unique_fd>> * val) const1352 status_t Parcel::readUniqueFileDescriptorVector(std::optional<std::vector<unique_fd>>* val) const {
1353 return readData(val);
1354 }
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<unique_fd>> * val) const1355 status_t Parcel::readUniqueFileDescriptorVector(
1356 std::unique_ptr<std::vector<unique_fd>>* val) const {
1357 return readData(val);
1358 }
readUniqueFileDescriptorVector(std::vector<unique_fd> * val) const1359 status_t Parcel::readUniqueFileDescriptorVector(std::vector<unique_fd>* val) const {
1360 return readData(val);
1361 }
1362
readStrongBinderVector(std::optional<std::vector<sp<IBinder>>> * val) const1363 status_t Parcel::readStrongBinderVector(std::optional<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1364 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1365 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { return readData(val); }
1366
readParcelable(Parcelable * parcelable) const1367 status_t Parcel::readParcelable(Parcelable* parcelable) const { return readData(parcelable); }
1368
writeInt32(int32_t val)1369 status_t Parcel::writeInt32(int32_t val)
1370 {
1371 return writeAligned(val);
1372 }
1373
writeUint32(uint32_t val)1374 status_t Parcel::writeUint32(uint32_t val)
1375 {
1376 return writeAligned(val);
1377 }
1378
writeInt32Array(size_t len,const int32_t * val)1379 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
1380 if (len > INT32_MAX) {
1381 // don't accept size_t values which may have come from an
1382 // inadvertent conversion from a negative int.
1383 return BAD_VALUE;
1384 }
1385
1386 if (!val) {
1387 return writeInt32(-1);
1388 }
1389 status_t ret = writeInt32(static_cast<uint32_t>(len));
1390 if (ret == NO_ERROR) {
1391 ret = write(val, len * sizeof(*val));
1392 }
1393 return ret;
1394 }
writeByteArray(size_t len,const uint8_t * val)1395 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
1396 if (len > INT32_MAX) {
1397 // don't accept size_t values which may have come from an
1398 // inadvertent conversion from a negative int.
1399 return BAD_VALUE;
1400 }
1401
1402 if (!val) {
1403 return writeInt32(-1);
1404 }
1405 status_t ret = writeInt32(static_cast<uint32_t>(len));
1406 if (ret == NO_ERROR) {
1407 ret = write(val, len * sizeof(*val));
1408 }
1409 return ret;
1410 }
1411
writeBool(bool val)1412 status_t Parcel::writeBool(bool val)
1413 {
1414 return writeInt32(int32_t(val));
1415 }
1416
writeChar(char16_t val)1417 status_t Parcel::writeChar(char16_t val)
1418 {
1419 return writeInt32(int32_t(val));
1420 }
1421
writeByte(int8_t val)1422 status_t Parcel::writeByte(int8_t val)
1423 {
1424 return writeInt32(int32_t(val));
1425 }
1426
writeInt64(int64_t val)1427 status_t Parcel::writeInt64(int64_t val)
1428 {
1429 return writeAligned(val);
1430 }
1431
writeUint64(uint64_t val)1432 status_t Parcel::writeUint64(uint64_t val)
1433 {
1434 return writeAligned(val);
1435 }
1436
writePointer(uintptr_t val)1437 status_t Parcel::writePointer(uintptr_t val)
1438 {
1439 return writeAligned<binder_uintptr_t>(val);
1440 }
1441
writeFloat(float val)1442 status_t Parcel::writeFloat(float val)
1443 {
1444 return writeAligned(val);
1445 }
1446
1447 #if defined(__mips__) && defined(__mips_hard_float)
1448
writeDouble(double val)1449 status_t Parcel::writeDouble(double val)
1450 {
1451 union {
1452 double d;
1453 unsigned long long ll;
1454 } u;
1455 u.d = val;
1456 return writeAligned(u.ll);
1457 }
1458
1459 #else
1460
writeDouble(double val)1461 status_t Parcel::writeDouble(double val)
1462 {
1463 return writeAligned(val);
1464 }
1465
1466 #endif
1467
writeCString(const char * str)1468 status_t Parcel::writeCString(const char* str)
1469 {
1470 return write(str, strlen(str)+1);
1471 }
1472
writeString8(const String8 & str)1473 status_t Parcel::writeString8(const String8& str)
1474 {
1475 return writeString8(str.c_str(), str.size());
1476 }
1477
writeString8(const char * str,size_t len)1478 status_t Parcel::writeString8(const char* str, size_t len)
1479 {
1480 if (str == nullptr) return writeInt32(-1);
1481
1482 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1483 status_t err = writeInt32(len);
1484 if (err == NO_ERROR) {
1485 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char));
1486 if (data) {
1487 memcpy(data, str, len);
1488 *reinterpret_cast<char*>(data+len) = 0;
1489 return NO_ERROR;
1490 }
1491 err = mError;
1492 }
1493 return err;
1494 }
1495
writeString16(const String16 & str)1496 status_t Parcel::writeString16(const String16& str)
1497 {
1498 return writeString16(str.c_str(), str.size());
1499 }
1500
writeString16(const char16_t * str,size_t len)1501 status_t Parcel::writeString16(const char16_t* str, size_t len)
1502 {
1503 if (str == nullptr) return writeInt32(-1);
1504
1505 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1506 status_t err = writeInt32(len);
1507 if (err == NO_ERROR) {
1508 len *= sizeof(char16_t);
1509 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1510 if (data) {
1511 memcpy(data, str, len);
1512 *reinterpret_cast<char16_t*>(data+len) = 0;
1513 return NO_ERROR;
1514 }
1515 err = mError;
1516 }
1517 return err;
1518 }
1519
writeStrongBinder(const sp<IBinder> & val)1520 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1521 {
1522 return flattenBinder(val);
1523 }
1524
1525
writeRawNullableParcelable(const Parcelable * parcelable)1526 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1527 if (!parcelable) {
1528 return writeInt32(0);
1529 }
1530
1531 return writeParcelable(*parcelable);
1532 }
1533
1534 #ifndef BINDER_DISABLE_NATIVE_HANDLE
writeNativeHandle(const native_handle * handle)1535 status_t Parcel::writeNativeHandle(const native_handle* handle)
1536 {
1537 if (!handle || handle->version != sizeof(native_handle))
1538 return BAD_TYPE;
1539
1540 status_t err;
1541 err = writeInt32(handle->numFds);
1542 if (err != NO_ERROR) return err;
1543
1544 err = writeInt32(handle->numInts);
1545 if (err != NO_ERROR) return err;
1546
1547 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1548 err = writeDupFileDescriptor(handle->data[i]);
1549
1550 if (err != NO_ERROR) {
1551 ALOGD("write native handle, write dup fd failed");
1552 return err;
1553 }
1554 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1555 return err;
1556 }
1557 #endif
1558
writeFileDescriptor(int fd,bool takeOwnership)1559 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) {
1560 if (auto* rpcFields = maybeRpcFields()) {
1561 std::variant<unique_fd, borrowed_fd> fdVariant;
1562 if (takeOwnership) {
1563 fdVariant = unique_fd(fd);
1564 } else {
1565 fdVariant = borrowed_fd(fd);
1566 }
1567 if (!mAllowFds) {
1568 ALOGE("FDs are not allowed in this parcel. Both the service and the client must set "
1569 "the FileDescriptorTransportMode and agree on the support.");
1570 return FDS_NOT_ALLOWED;
1571 }
1572 switch (rpcFields->mSession->getFileDescriptorTransportMode()) {
1573 case RpcSession::FileDescriptorTransportMode::NONE: {
1574 ALOGE("FDs are not allowed in this RpcSession. Both the service and the client "
1575 "must set "
1576 "the FileDescriptorTransportMode and agree on the support.");
1577 return FDS_NOT_ALLOWED;
1578 }
1579 case RpcSession::FileDescriptorTransportMode::UNIX:
1580 case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1581 if (rpcFields->mFds == nullptr) {
1582 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
1583 }
1584 size_t dataPos = mDataPos;
1585 if (dataPos > UINT32_MAX) {
1586 return NO_MEMORY;
1587 }
1588 if (status_t err = writeInt32(RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR); err != OK) {
1589 return err;
1590 }
1591 if (status_t err = writeInt32(rpcFields->mFds->size()); err != OK) {
1592 return err;
1593 }
1594 rpcFields->mObjectPositions.push_back(dataPos);
1595 rpcFields->mFds->push_back(std::move(fdVariant));
1596 return OK;
1597 }
1598 }
1599 }
1600
1601 #ifdef BINDER_WITH_KERNEL_IPC
1602 flat_binder_object obj;
1603 obj.hdr.type = BINDER_TYPE_FD;
1604 obj.flags = 0;
1605 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1606 obj.handle = fd;
1607 obj.cookie = takeOwnership ? 1 : 0;
1608 return writeObject(obj, true);
1609 #else // BINDER_WITH_KERNEL_IPC
1610 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1611 (void)fd;
1612 (void)takeOwnership;
1613 return INVALID_OPERATION;
1614 #endif // BINDER_WITH_KERNEL_IPC
1615 }
1616
writeDupFileDescriptor(int fd)1617 status_t Parcel::writeDupFileDescriptor(int fd)
1618 {
1619 int dupFd;
1620 if (status_t err = binder::os::dupFileDescriptor(fd, &dupFd); err != OK) {
1621 return err;
1622 }
1623 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1624 if (err != OK) {
1625 close(dupFd);
1626 }
1627 return err;
1628 }
1629
writeParcelFileDescriptor(int fd,bool takeOwnership)1630 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1631 {
1632 writeInt32(0);
1633 return writeFileDescriptor(fd, takeOwnership);
1634 }
1635
writeDupParcelFileDescriptor(int fd)1636 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1637 {
1638 int dupFd;
1639 if (status_t err = binder::os::dupFileDescriptor(fd, &dupFd); err != OK) {
1640 return err;
1641 }
1642 status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1643 if (err != OK) {
1644 close(dupFd);
1645 }
1646 return err;
1647 }
1648
writeUniqueFileDescriptor(const unique_fd & fd)1649 status_t Parcel::writeUniqueFileDescriptor(const unique_fd& fd) {
1650 return writeDupFileDescriptor(fd.get());
1651 }
1652
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1653 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1654 {
1655 #ifdef BINDER_DISABLE_BLOB
1656 (void)len;
1657 (void)mutableCopy;
1658 (void)outBlob;
1659 return INVALID_OPERATION;
1660 #else
1661 if (len > INT32_MAX) {
1662 // don't accept size_t values which may have come from an
1663 // inadvertent conversion from a negative int.
1664 return BAD_VALUE;
1665 }
1666
1667 status_t status;
1668 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1669 ALOGV("writeBlob: write in place");
1670 status = writeInt32(BLOB_INPLACE);
1671 if (status) return status;
1672
1673 void* ptr = writeInplace(len);
1674 if (!ptr) return NO_MEMORY;
1675
1676 outBlob->init(-1, ptr, len, false);
1677 return NO_ERROR;
1678 }
1679
1680 ALOGV("writeBlob: write to ashmem");
1681 int fd = ashmem_create_region("Parcel Blob", len);
1682 if (fd < 0) return NO_MEMORY;
1683
1684 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1685 if (result < 0) {
1686 status = result;
1687 } else {
1688 void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1689 if (ptr == MAP_FAILED) {
1690 status = -errno;
1691 } else {
1692 if (!mutableCopy) {
1693 result = ashmem_set_prot_region(fd, PROT_READ);
1694 }
1695 if (result < 0) {
1696 status = result;
1697 } else {
1698 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1699 if (!status) {
1700 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1701 if (!status) {
1702 outBlob->init(fd, ptr, len, mutableCopy);
1703 return NO_ERROR;
1704 }
1705 }
1706 }
1707 }
1708 if (::munmap(ptr, len) == -1) {
1709 ALOGW("munmap() failed: %s", strerror(errno));
1710 }
1711 }
1712 ::close(fd);
1713 return status;
1714 #endif
1715 }
1716
writeDupImmutableBlobFileDescriptor(int fd)1717 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1718 {
1719 // Must match up with what's done in writeBlob.
1720 if (!mAllowFds) return FDS_NOT_ALLOWED;
1721 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1722 if (status) return status;
1723 return writeDupFileDescriptor(fd);
1724 }
1725
write(const FlattenableHelperInterface & val)1726 status_t Parcel::write(const FlattenableHelperInterface& val)
1727 {
1728 status_t err;
1729
1730 // size if needed
1731 const size_t len = val.getFlattenedSize();
1732 const size_t fd_count = val.getFdCount();
1733
1734 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
1735 // don't accept size_t values which may have come from an
1736 // inadvertent conversion from a negative int.
1737 return BAD_VALUE;
1738 }
1739
1740 err = this->writeInt32(len);
1741 if (err) return err;
1742
1743 err = this->writeInt32(fd_count);
1744 if (err) return err;
1745
1746 // payload
1747 void* const buf = this->writeInplace(len);
1748 if (buf == nullptr)
1749 return BAD_VALUE;
1750
1751 int* fds = nullptr;
1752 if (fd_count) {
1753 fds = new (std::nothrow) int[fd_count];
1754 if (fds == nullptr) {
1755 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1756 return BAD_VALUE;
1757 }
1758 }
1759
1760 err = val.flatten(buf, len, fds, fd_count);
1761 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1762 err = this->writeDupFileDescriptor( fds[i] );
1763 }
1764
1765 if (fd_count) {
1766 delete [] fds;
1767 }
1768
1769 return err;
1770 }
1771
writeObject(const flat_binder_object & val,bool nullMetaData)1772 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1773 {
1774 auto* kernelFields = maybeKernelFields();
1775 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr, "Can't write flat_binder_object to RPC Parcel");
1776
1777 #ifdef BINDER_WITH_KERNEL_IPC
1778 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1779 const bool enoughObjects = kernelFields->mObjectsSize < kernelFields->mObjectsCapacity;
1780 if (enoughData && enoughObjects) {
1781 restart_write:
1782 if (status_t status = validateReadData(mDataPos + sizeof(val)); status != OK) {
1783 return status;
1784 }
1785
1786 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1787
1788 // remember if it's a file descriptor
1789 if (val.hdr.type == BINDER_TYPE_FD) {
1790 if (!mAllowFds) {
1791 // fail before modifying our object index
1792 return FDS_NOT_ALLOWED;
1793 }
1794 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
1795 }
1796
1797 // Need to write meta-data?
1798 if (nullMetaData || val.binder != 0) {
1799 kernelFields->mObjects[kernelFields->mObjectsSize] = mDataPos;
1800 acquire_object(ProcessState::self(), val, this);
1801 kernelFields->mObjectsSize++;
1802 }
1803
1804 return finishWrite(sizeof(flat_binder_object));
1805 }
1806
1807 if (!enoughData) {
1808 const status_t err = growData(sizeof(val));
1809 if (err != NO_ERROR) return err;
1810 }
1811 if (!enoughObjects) {
1812 if (kernelFields->mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1813 if ((kernelFields->mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1814 size_t newSize = ((kernelFields->mObjectsSize + 2) * 3) / 2;
1815 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1816 binder_size_t* objects =
1817 (binder_size_t*)realloc(kernelFields->mObjects, newSize * sizeof(binder_size_t));
1818 if (objects == nullptr) return NO_MEMORY;
1819 kernelFields->mObjects = objects;
1820 kernelFields->mObjectsCapacity = newSize;
1821 }
1822
1823 goto restart_write;
1824 #else // BINDER_WITH_KERNEL_IPC
1825 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1826 (void)val;
1827 (void)nullMetaData;
1828 return INVALID_OPERATION;
1829 #endif // BINDER_WITH_KERNEL_IPC
1830 }
1831
writeNoException()1832 status_t Parcel::writeNoException()
1833 {
1834 binder::Status status;
1835 return status.writeToParcel(this);
1836 }
1837
validateReadData(size_t upperBound) const1838 status_t Parcel::validateReadData(size_t upperBound) const
1839 {
1840 const auto* kernelFields = maybeKernelFields();
1841 if (kernelFields == nullptr) {
1842 // Can't validate RPC Parcel reads because the location of binder
1843 // objects is unknown.
1844 return OK;
1845 }
1846
1847 #ifdef BINDER_WITH_KERNEL_IPC
1848 // Don't allow non-object reads on object data
1849 if (kernelFields->mObjectsSorted || kernelFields->mObjectsSize <= 1) {
1850 data_sorted:
1851 // Expect to check only against the next object
1852 if (kernelFields->mNextObjectHint < kernelFields->mObjectsSize &&
1853 upperBound > kernelFields->mObjects[kernelFields->mNextObjectHint]) {
1854 // For some reason the current read position is greater than the next object
1855 // hint. Iterate until we find the right object
1856 size_t nextObject = kernelFields->mNextObjectHint;
1857 do {
1858 if (mDataPos < kernelFields->mObjects[nextObject] + sizeof(flat_binder_object)) {
1859 // Requested info overlaps with an object
1860 if (!mServiceFuzzing) {
1861 ALOGE("Attempt to read or write from protected data in Parcel %p. pos: "
1862 "%zu, nextObject: %zu, object offset: %llu, object size: %zu",
1863 this, mDataPos, nextObject, kernelFields->mObjects[nextObject],
1864 sizeof(flat_binder_object));
1865 }
1866 return PERMISSION_DENIED;
1867 }
1868 nextObject++;
1869 } while (nextObject < kernelFields->mObjectsSize &&
1870 upperBound > kernelFields->mObjects[nextObject]);
1871 kernelFields->mNextObjectHint = nextObject;
1872 }
1873 return NO_ERROR;
1874 }
1875 // Quickly determine if mObjects is sorted.
1876 binder_size_t* currObj = kernelFields->mObjects + kernelFields->mObjectsSize - 1;
1877 binder_size_t* prevObj = currObj;
1878 while (currObj > kernelFields->mObjects) {
1879 prevObj--;
1880 if(*prevObj > *currObj) {
1881 goto data_unsorted;
1882 }
1883 currObj--;
1884 }
1885 kernelFields->mObjectsSorted = true;
1886 goto data_sorted;
1887
1888 data_unsorted:
1889 // Insertion Sort mObjects
1890 // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1891 // switch to std::sort(mObjects, mObjects + mObjectsSize);
1892 for (binder_size_t* iter0 = kernelFields->mObjects + 1;
1893 iter0 < kernelFields->mObjects + kernelFields->mObjectsSize; iter0++) {
1894 binder_size_t temp = *iter0;
1895 binder_size_t* iter1 = iter0 - 1;
1896 while (iter1 >= kernelFields->mObjects && *iter1 > temp) {
1897 *(iter1 + 1) = *iter1;
1898 iter1--;
1899 }
1900 *(iter1 + 1) = temp;
1901 }
1902 kernelFields->mNextObjectHint = 0;
1903 kernelFields->mObjectsSorted = true;
1904 goto data_sorted;
1905 #else // BINDER_WITH_KERNEL_IPC
1906 (void)upperBound;
1907 return NO_ERROR;
1908 #endif // BINDER_WITH_KERNEL_IPC
1909 }
1910
read(void * outData,size_t len) const1911 status_t Parcel::read(void* outData, size_t len) const
1912 {
1913 if (len > INT32_MAX) {
1914 // don't accept size_t values which may have come from an
1915 // inadvertent conversion from a negative int.
1916 return BAD_VALUE;
1917 }
1918
1919 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1920 && len <= pad_size(len)) {
1921 const auto* kernelFields = maybeKernelFields();
1922 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1923 status_t err = validateReadData(mDataPos + pad_size(len));
1924 if(err != NO_ERROR) {
1925 // Still increment the data position by the expected length
1926 mDataPos += pad_size(len);
1927 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1928 return err;
1929 }
1930 }
1931 memcpy(outData, mData+mDataPos, len);
1932 mDataPos += pad_size(len);
1933 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1934 return NO_ERROR;
1935 }
1936 return NOT_ENOUGH_DATA;
1937 }
1938
readInplace(size_t len) const1939 const void* Parcel::readInplace(size_t len) const
1940 {
1941 if (len > INT32_MAX) {
1942 // don't accept size_t values which may have come from an
1943 // inadvertent conversion from a negative int.
1944 return nullptr;
1945 }
1946
1947 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1948 && len <= pad_size(len)) {
1949 const auto* kernelFields = maybeKernelFields();
1950 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1951 status_t err = validateReadData(mDataPos + pad_size(len));
1952 if(err != NO_ERROR) {
1953 // Still increment the data position by the expected length
1954 mDataPos += pad_size(len);
1955 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1956 return nullptr;
1957 }
1958 }
1959
1960 const void* data = mData+mDataPos;
1961 mDataPos += pad_size(len);
1962 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1963 return data;
1964 }
1965 return nullptr;
1966 }
1967
readOutVectorSizeWithCheck(size_t elmSize,int32_t * size) const1968 status_t Parcel::readOutVectorSizeWithCheck(size_t elmSize, int32_t* size) const {
1969 if (status_t status = readInt32(size); status != OK) return status;
1970 if (*size < 0) return OK; // may be null, client to handle
1971
1972 LOG_ALWAYS_FATAL_IF(elmSize > INT32_MAX, "Cannot have element as big as %zu", elmSize);
1973
1974 // approximation, can't know max element size (e.g. if it makes heap
1975 // allocations)
1976 static_assert(sizeof(int) == sizeof(int32_t), "Android is LP64");
1977 int32_t allocationSize;
1978 if (__builtin_smul_overflow(elmSize, *size, &allocationSize)) return NO_MEMORY;
1979
1980 // High limit of 1MB since something this big could never be returned. Could
1981 // probably scope this down, but might impact very specific usecases.
1982 constexpr int32_t kMaxAllocationSize = 1 * 1000 * 1000;
1983
1984 if (allocationSize >= kMaxAllocationSize) {
1985 return NO_MEMORY;
1986 }
1987
1988 return OK;
1989 }
1990
1991 template<class T>
readAligned(T * pArg) const1992 status_t Parcel::readAligned(T *pArg) const {
1993 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1994 static_assert(std::is_trivially_copyable_v<T>);
1995
1996 if ((mDataPos+sizeof(T)) <= mDataSize) {
1997 const auto* kernelFields = maybeKernelFields();
1998 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1999 status_t err = validateReadData(mDataPos + sizeof(T));
2000 if(err != NO_ERROR) {
2001 // Still increment the data position by the expected length
2002 mDataPos += sizeof(T);
2003 return err;
2004 }
2005 }
2006
2007 memcpy(pArg, mData + mDataPos, sizeof(T));
2008 mDataPos += sizeof(T);
2009 return NO_ERROR;
2010 } else {
2011 return NOT_ENOUGH_DATA;
2012 }
2013 }
2014
2015 template<class T>
readAligned() const2016 T Parcel::readAligned() const {
2017 T result;
2018 if (readAligned(&result) != NO_ERROR) {
2019 result = 0;
2020 }
2021
2022 return result;
2023 }
2024
2025 template<class T>
writeAligned(T val)2026 status_t Parcel::writeAligned(T val) {
2027 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
2028 static_assert(std::is_trivially_copyable_v<T>);
2029
2030 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
2031 restart_write:
2032 if (status_t status = validateReadData(mDataPos + sizeof(val)); status != OK) {
2033 return status;
2034 }
2035
2036 memcpy(mData + mDataPos, &val, sizeof(val));
2037 return finishWrite(sizeof(val));
2038 }
2039
2040 status_t err = growData(sizeof(val));
2041 if (err == NO_ERROR) goto restart_write;
2042 return err;
2043 }
2044
readInt32(int32_t * pArg) const2045 status_t Parcel::readInt32(int32_t *pArg) const
2046 {
2047 return readAligned(pArg);
2048 }
2049
readInt32() const2050 int32_t Parcel::readInt32() const
2051 {
2052 return readAligned<int32_t>();
2053 }
2054
readUint32(uint32_t * pArg) const2055 status_t Parcel::readUint32(uint32_t *pArg) const
2056 {
2057 return readAligned(pArg);
2058 }
2059
readUint32() const2060 uint32_t Parcel::readUint32() const
2061 {
2062 return readAligned<uint32_t>();
2063 }
2064
readInt64(int64_t * pArg) const2065 status_t Parcel::readInt64(int64_t *pArg) const
2066 {
2067 return readAligned(pArg);
2068 }
2069
2070
readInt64() const2071 int64_t Parcel::readInt64() const
2072 {
2073 return readAligned<int64_t>();
2074 }
2075
readUint64(uint64_t * pArg) const2076 status_t Parcel::readUint64(uint64_t *pArg) const
2077 {
2078 return readAligned(pArg);
2079 }
2080
readUint64() const2081 uint64_t Parcel::readUint64() const
2082 {
2083 return readAligned<uint64_t>();
2084 }
2085
readPointer(uintptr_t * pArg) const2086 status_t Parcel::readPointer(uintptr_t *pArg) const
2087 {
2088 status_t ret;
2089 binder_uintptr_t ptr;
2090 ret = readAligned(&ptr);
2091 if (!ret)
2092 *pArg = ptr;
2093 return ret;
2094 }
2095
readPointer() const2096 uintptr_t Parcel::readPointer() const
2097 {
2098 return readAligned<binder_uintptr_t>();
2099 }
2100
2101
readFloat(float * pArg) const2102 status_t Parcel::readFloat(float *pArg) const
2103 {
2104 return readAligned(pArg);
2105 }
2106
2107
readFloat() const2108 float Parcel::readFloat() const
2109 {
2110 return readAligned<float>();
2111 }
2112
2113 #if defined(__mips__) && defined(__mips_hard_float)
2114
readDouble(double * pArg) const2115 status_t Parcel::readDouble(double *pArg) const
2116 {
2117 union {
2118 double d;
2119 unsigned long long ll;
2120 } u;
2121 u.d = 0;
2122 status_t status;
2123 status = readAligned(&u.ll);
2124 *pArg = u.d;
2125 return status;
2126 }
2127
readDouble() const2128 double Parcel::readDouble() const
2129 {
2130 union {
2131 double d;
2132 unsigned long long ll;
2133 } u;
2134 u.ll = readAligned<unsigned long long>();
2135 return u.d;
2136 }
2137
2138 #else
2139
readDouble(double * pArg) const2140 status_t Parcel::readDouble(double *pArg) const
2141 {
2142 return readAligned(pArg);
2143 }
2144
readDouble() const2145 double Parcel::readDouble() const
2146 {
2147 return readAligned<double>();
2148 }
2149
2150 #endif
2151
readBool(bool * pArg) const2152 status_t Parcel::readBool(bool *pArg) const
2153 {
2154 int32_t tmp = 0;
2155 status_t ret = readInt32(&tmp);
2156 *pArg = (tmp != 0);
2157 return ret;
2158 }
2159
readBool() const2160 bool Parcel::readBool() const
2161 {
2162 return readInt32() != 0;
2163 }
2164
readChar(char16_t * pArg) const2165 status_t Parcel::readChar(char16_t *pArg) const
2166 {
2167 int32_t tmp = 0;
2168 status_t ret = readInt32(&tmp);
2169 *pArg = char16_t(tmp);
2170 return ret;
2171 }
2172
readChar() const2173 char16_t Parcel::readChar() const
2174 {
2175 return char16_t(readInt32());
2176 }
2177
readByte(int8_t * pArg) const2178 status_t Parcel::readByte(int8_t *pArg) const
2179 {
2180 int32_t tmp = 0;
2181 status_t ret = readInt32(&tmp);
2182 *pArg = int8_t(tmp);
2183 return ret;
2184 }
2185
readByte() const2186 int8_t Parcel::readByte() const
2187 {
2188 return int8_t(readInt32());
2189 }
2190
readUtf8FromUtf16(std::string * str) const2191 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
2192 size_t utf16Size = 0;
2193 const char16_t* src = readString16Inplace(&utf16Size);
2194 if (!src) {
2195 return UNEXPECTED_NULL;
2196 }
2197
2198 // Save ourselves the trouble, we're done.
2199 if (utf16Size == 0u) {
2200 str->clear();
2201 return NO_ERROR;
2202 }
2203
2204 // Allow for closing '\0'
2205 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
2206 if (utf8Size < 1) {
2207 return BAD_VALUE;
2208 }
2209 // Note that while it is probably safe to assume string::resize keeps a
2210 // spare byte around for the trailing null, we still pass the size including the trailing null
2211 str->resize(utf8Size);
2212 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
2213 str->resize(utf8Size - 1);
2214 return NO_ERROR;
2215 }
2216
readCString() const2217 const char* Parcel::readCString() const
2218 {
2219 if (mDataPos < mDataSize) {
2220 const size_t avail = mDataSize-mDataPos;
2221 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2222 // is the string's trailing NUL within the parcel's valid bounds?
2223 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2224 if (eos) {
2225 const size_t len = eos - str;
2226 return static_cast<const char*>(readInplace(len + 1));
2227 }
2228 }
2229 return nullptr;
2230 }
2231
readString8() const2232 String8 Parcel::readString8() const
2233 {
2234 size_t len;
2235 const char* str = readString8Inplace(&len);
2236 if (str) return String8(str, len);
2237
2238 if (!mServiceFuzzing) {
2239 ALOGE("Reading a NULL string not supported here.");
2240 }
2241
2242 return String8();
2243 }
2244
readString8(String8 * pArg) const2245 status_t Parcel::readString8(String8* pArg) const
2246 {
2247 size_t len;
2248 const char* str = readString8Inplace(&len);
2249 if (str) {
2250 pArg->setTo(str, len);
2251 return 0;
2252 } else {
2253 *pArg = String8();
2254 return UNEXPECTED_NULL;
2255 }
2256 }
2257
readString8Inplace(size_t * outLen) const2258 const char* Parcel::readString8Inplace(size_t* outLen) const
2259 {
2260 int32_t size = readInt32();
2261 // watch for potential int overflow from size+1
2262 if (size >= 0 && size < INT32_MAX) {
2263 *outLen = size;
2264 const char* str = (const char*)readInplace(size+1);
2265 if (str != nullptr) {
2266 if (str[size] == '\0') {
2267 return str;
2268 }
2269 android_errorWriteLog(0x534e4554, "172655291");
2270 }
2271 }
2272 *outLen = 0;
2273 return nullptr;
2274 }
2275
readString16() const2276 String16 Parcel::readString16() const
2277 {
2278 size_t len;
2279 const char16_t* str = readString16Inplace(&len);
2280 if (str) return String16(str, len);
2281
2282 if (!mServiceFuzzing) {
2283 ALOGE("Reading a NULL string not supported here.");
2284 }
2285
2286 return String16();
2287 }
2288
2289
readString16(String16 * pArg) const2290 status_t Parcel::readString16(String16* pArg) const
2291 {
2292 size_t len;
2293 const char16_t* str = readString16Inplace(&len);
2294 if (str) {
2295 pArg->setTo(str, len);
2296 return 0;
2297 } else {
2298 *pArg = String16();
2299 return UNEXPECTED_NULL;
2300 }
2301 }
2302
readString16Inplace(size_t * outLen) const2303 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2304 {
2305 int32_t size = readInt32();
2306 // watch for potential int overflow from size+1
2307 if (size >= 0 && size < INT32_MAX) {
2308 *outLen = size;
2309 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2310 if (str != nullptr) {
2311 if (str[size] == u'\0') {
2312 return str;
2313 }
2314 android_errorWriteLog(0x534e4554, "172655291");
2315 }
2316 }
2317 *outLen = 0;
2318 return nullptr;
2319 }
2320
readStrongBinder(sp<IBinder> * val) const2321 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2322 {
2323 status_t status = readNullableStrongBinder(val);
2324 if (status == OK && !val->get()) {
2325 if (!mServiceFuzzing) {
2326 ALOGW("Expecting binder but got null!");
2327 }
2328 status = UNEXPECTED_NULL;
2329 }
2330 return status;
2331 }
2332
readNullableStrongBinder(sp<IBinder> * val) const2333 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2334 {
2335 return unflattenBinder(val);
2336 }
2337
readStrongBinder() const2338 sp<IBinder> Parcel::readStrongBinder() const
2339 {
2340 sp<IBinder> val;
2341 // Note that a lot of code in Android reads binders by hand with this
2342 // method, and that code has historically been ok with getting nullptr
2343 // back (while ignoring error codes).
2344 readNullableStrongBinder(&val);
2345 return val;
2346 }
2347
readExceptionCode() const2348 int32_t Parcel::readExceptionCode() const
2349 {
2350 binder::Status status;
2351 status.readFromParcel(*this);
2352 return status.exceptionCode();
2353 }
2354
2355 #ifndef BINDER_DISABLE_NATIVE_HANDLE
readNativeHandle() const2356 native_handle* Parcel::readNativeHandle() const
2357 {
2358 int numFds, numInts;
2359 status_t err;
2360 err = readInt32(&numFds);
2361 if (err != NO_ERROR) return nullptr;
2362 err = readInt32(&numInts);
2363 if (err != NO_ERROR) return nullptr;
2364
2365 native_handle* h = native_handle_create(numFds, numInts);
2366 if (!h) {
2367 return nullptr;
2368 }
2369
2370 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2371 h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2372 if (h->data[i] < 0) {
2373 for (int j = 0; j < i; j++) {
2374 close(h->data[j]);
2375 }
2376 native_handle_delete(h);
2377 return nullptr;
2378 }
2379 }
2380 err = read(h->data + numFds, sizeof(int)*numInts);
2381 if (err != NO_ERROR) {
2382 native_handle_close(h);
2383 native_handle_delete(h);
2384 h = nullptr;
2385 }
2386 return h;
2387 }
2388 #endif
2389
readFileDescriptor() const2390 int Parcel::readFileDescriptor() const {
2391 if (const auto* rpcFields = maybeRpcFields()) {
2392 if (!std::binary_search(rpcFields->mObjectPositions.begin(),
2393 rpcFields->mObjectPositions.end(), mDataPos)) {
2394 if (!mServiceFuzzing) {
2395 ALOGW("Attempt to read file descriptor from Parcel %p at offset %zu that is not in "
2396 "the object list",
2397 this, mDataPos);
2398 }
2399 return BAD_TYPE;
2400 }
2401
2402 int32_t objectType = readInt32();
2403 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
2404 return BAD_TYPE;
2405 }
2406
2407 int32_t fdIndex = readInt32();
2408 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
2409 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
2410 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
2411 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
2412 return BAD_VALUE;
2413 }
2414 return toRawFd(rpcFields->mFds->at(fdIndex));
2415 }
2416
2417 #ifdef BINDER_WITH_KERNEL_IPC
2418 const flat_binder_object* flat = readObject(true);
2419
2420 if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2421 return flat->handle;
2422 }
2423
2424 return BAD_TYPE;
2425 #else // BINDER_WITH_KERNEL_IPC
2426 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2427 return INVALID_OPERATION;
2428 #endif // BINDER_WITH_KERNEL_IPC
2429 }
2430
readParcelFileDescriptor() const2431 int Parcel::readParcelFileDescriptor() const {
2432 int32_t hasComm = readInt32();
2433 int fd = readFileDescriptor();
2434 if (hasComm != 0) {
2435 // detach (owned by the binder driver)
2436 int comm = readFileDescriptor();
2437
2438 // warning: this must be kept in sync with:
2439 // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2440 enum ParcelFileDescriptorStatus {
2441 DETACHED = 2,
2442 };
2443
2444 #if BYTE_ORDER == BIG_ENDIAN
2445 const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2446 #endif
2447 #if BYTE_ORDER == LITTLE_ENDIAN
2448 const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2449 #endif
2450
2451 ssize_t written = TEMP_FAILURE_RETRY(
2452 ::write(comm, &message, sizeof(message)));
2453
2454 if (written != sizeof(message)) {
2455 ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2456 written, strerror(errno));
2457 return BAD_TYPE;
2458 }
2459 }
2460 return fd;
2461 }
2462
readUniqueFileDescriptor(unique_fd * val) const2463 status_t Parcel::readUniqueFileDescriptor(unique_fd* val) const {
2464 int got = readFileDescriptor();
2465
2466 if (got == BAD_TYPE) {
2467 return BAD_TYPE;
2468 }
2469
2470 int dupFd;
2471 if (status_t err = binder::os::dupFileDescriptor(got, &dupFd); err != OK) {
2472 return BAD_VALUE;
2473 }
2474
2475 val->reset(dupFd);
2476
2477 if (val->get() < 0) {
2478 return BAD_VALUE;
2479 }
2480
2481 return OK;
2482 }
2483
readUniqueParcelFileDescriptor(unique_fd * val) const2484 status_t Parcel::readUniqueParcelFileDescriptor(unique_fd* val) const {
2485 int got = readParcelFileDescriptor();
2486
2487 if (got == BAD_TYPE) {
2488 return BAD_TYPE;
2489 }
2490
2491 int dupFd;
2492 if (status_t err = binder::os::dupFileDescriptor(got, &dupFd); err != OK) {
2493 return BAD_VALUE;
2494 }
2495
2496 val->reset(dupFd);
2497
2498 if (val->get() < 0) {
2499 return BAD_VALUE;
2500 }
2501
2502 return OK;
2503 }
2504
readBlob(size_t len,ReadableBlob * outBlob) const2505 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2506 {
2507 #ifdef BINDER_DISABLE_BLOB
2508 (void)len;
2509 (void)outBlob;
2510 return INVALID_OPERATION;
2511 #else
2512 int32_t blobType;
2513 status_t status = readInt32(&blobType);
2514 if (status) return status;
2515
2516 if (blobType == BLOB_INPLACE) {
2517 ALOGV("readBlob: read in place");
2518 const void* ptr = readInplace(len);
2519 if (!ptr) return BAD_VALUE;
2520
2521 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2522 return NO_ERROR;
2523 }
2524
2525 ALOGV("readBlob: read from ashmem");
2526 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2527 int fd = readFileDescriptor();
2528 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2529
2530 if (!ashmem_valid(fd)) {
2531 ALOGE("invalid fd");
2532 return BAD_VALUE;
2533 }
2534 int size = ashmem_get_size_region(fd);
2535 if (size < 0 || size_t(size) < len) {
2536 ALOGE("request size %zu does not match fd size %d", len, size);
2537 return BAD_VALUE;
2538 }
2539 void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2540 MAP_SHARED, fd, 0);
2541 if (ptr == MAP_FAILED) return NO_MEMORY;
2542
2543 outBlob->init(fd, ptr, len, isMutable);
2544 return NO_ERROR;
2545 #endif
2546 }
2547
read(FlattenableHelperInterface & val) const2548 status_t Parcel::read(FlattenableHelperInterface& val) const
2549 {
2550 // size
2551 const size_t len = this->readInt32();
2552 const size_t fd_count = this->readInt32();
2553
2554 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
2555 // don't accept size_t values which may have come from an
2556 // inadvertent conversion from a negative int.
2557 return BAD_VALUE;
2558 }
2559
2560 // payload
2561 void const* const buf = this->readInplace(pad_size(len));
2562 if (buf == nullptr)
2563 return BAD_VALUE;
2564
2565 int* fds = nullptr;
2566 if (fd_count) {
2567 fds = new (std::nothrow) int[fd_count];
2568 if (fds == nullptr) {
2569 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2570 return BAD_VALUE;
2571 }
2572 }
2573
2574 status_t err = NO_ERROR;
2575 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2576 int fd = this->readFileDescriptor();
2577 if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2578 err = BAD_VALUE;
2579 ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2580 i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2581 // Close all the file descriptors that were dup-ed.
2582 for (size_t j=0; j<i ;j++) {
2583 close(fds[j]);
2584 }
2585 }
2586 }
2587
2588 if (err == NO_ERROR) {
2589 err = val.unflatten(buf, len, fds, fd_count);
2590 }
2591
2592 if (fd_count) {
2593 delete [] fds;
2594 }
2595
2596 return err;
2597 }
2598
2599 #ifdef BINDER_WITH_KERNEL_IPC
readObject(bool nullMetaData) const2600 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2601 {
2602 const auto* kernelFields = maybeKernelFields();
2603 if (kernelFields == nullptr) {
2604 return nullptr;
2605 }
2606
2607 const size_t DPOS = mDataPos;
2608 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2609 const flat_binder_object* obj
2610 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2611 mDataPos = DPOS + sizeof(flat_binder_object);
2612 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2613 // When transferring a NULL object, we don't write it into
2614 // the object list, so we don't want to check for it when
2615 // reading.
2616 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2617 return obj;
2618 }
2619
2620 // Ensure that this object is valid...
2621 binder_size_t* const OBJS = kernelFields->mObjects;
2622 const size_t N = kernelFields->mObjectsSize;
2623 size_t opos = kernelFields->mNextObjectHint;
2624
2625 if (N > 0) {
2626 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2627 this, DPOS, opos);
2628
2629 // Start at the current hint position, looking for an object at
2630 // the current data position.
2631 if (opos < N) {
2632 while (opos < (N-1) && OBJS[opos] < DPOS) {
2633 opos++;
2634 }
2635 } else {
2636 opos = N-1;
2637 }
2638 if (OBJS[opos] == DPOS) {
2639 // Found it!
2640 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2641 this, DPOS, opos);
2642 kernelFields->mNextObjectHint = opos + 1;
2643 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2644 return obj;
2645 }
2646
2647 // Look backwards for it...
2648 while (opos > 0 && OBJS[opos] > DPOS) {
2649 opos--;
2650 }
2651 if (OBJS[opos] == DPOS) {
2652 // Found it!
2653 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2654 this, DPOS, opos);
2655 kernelFields->mNextObjectHint = opos + 1;
2656 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2657 return obj;
2658 }
2659 }
2660 if (!mServiceFuzzing) {
2661 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object "
2662 "list",
2663 this, DPOS);
2664 }
2665 }
2666 return nullptr;
2667 }
2668 #endif // BINDER_WITH_KERNEL_IPC
2669
closeFileDescriptors(size_t newObjectsSize)2670 void Parcel::closeFileDescriptors(size_t newObjectsSize) {
2671 if (auto* kernelFields = maybeKernelFields()) {
2672 #ifdef BINDER_WITH_KERNEL_IPC
2673 size_t i = kernelFields->mObjectsSize;
2674 if (i > 0) {
2675 // ALOGI("Closing file descriptors for %zu objects...", i);
2676 }
2677 while (i > newObjectsSize) {
2678 i--;
2679 const flat_binder_object* flat =
2680 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
2681 if (flat->hdr.type == BINDER_TYPE_FD) {
2682 // ALOGI("Closing fd: %ld", flat->handle);
2683 // FDs from the kernel are always owned
2684 FdTagClose(flat->handle, this);
2685 }
2686 }
2687 #else // BINDER_WITH_KERNEL_IPC
2688 (void)newObjectsSize;
2689 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2690 #endif // BINDER_WITH_KERNEL_IPC
2691 } else if (auto* rpcFields = maybeRpcFields()) {
2692 rpcFields->mFds.reset();
2693 }
2694 }
2695
ipcData() const2696 uintptr_t Parcel::ipcData() const
2697 {
2698 return reinterpret_cast<uintptr_t>(mData);
2699 }
2700
ipcDataSize() const2701 size_t Parcel::ipcDataSize() const
2702 {
2703 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2704 }
2705
ipcObjects() const2706 uintptr_t Parcel::ipcObjects() const
2707 {
2708 if (const auto* kernelFields = maybeKernelFields()) {
2709 return reinterpret_cast<uintptr_t>(kernelFields->mObjects);
2710 }
2711 return 0;
2712 }
2713
ipcObjectsCount() const2714 size_t Parcel::ipcObjectsCount() const
2715 {
2716 if (const auto* kernelFields = maybeKernelFields()) {
2717 return kernelFields->mObjectsSize;
2718 }
2719 return 0;
2720 }
2721
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc)2722 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
2723 size_t objectsCount, release_func relFunc) {
2724 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2725 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2726
2727 freeData();
2728
2729 auto* kernelFields = maybeKernelFields();
2730 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr); // guaranteed by freeData.
2731
2732 mData = const_cast<uint8_t*>(data);
2733 mDataSize = mDataCapacity = dataSize;
2734 kernelFields->mObjects = const_cast<binder_size_t*>(objects);
2735 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsCount;
2736 mOwner = relFunc;
2737
2738 #ifdef BINDER_WITH_KERNEL_IPC
2739 binder_size_t minOffset = 0;
2740 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
2741 binder_size_t offset = kernelFields->mObjects[i];
2742 if (offset < minOffset) {
2743 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2744 __func__, (uint64_t)offset, (uint64_t)minOffset);
2745 kernelFields->mObjectsSize = 0;
2746 break;
2747 }
2748 const flat_binder_object* flat
2749 = reinterpret_cast<const flat_binder_object*>(mData + offset);
2750 uint32_t type = flat->hdr.type;
2751 if (!(type == BINDER_TYPE_BINDER || type == BINDER_TYPE_HANDLE ||
2752 type == BINDER_TYPE_FD)) {
2753 // We should never receive other types (eg BINDER_TYPE_FDA) as long as we don't support
2754 // them in libbinder. If we do receive them, it probably means a kernel bug; try to
2755 // recover gracefully by clearing out the objects.
2756 android_errorWriteLog(0x534e4554, "135930648");
2757 android_errorWriteLog(0x534e4554, "203847542");
2758 ALOGE("%s: unsupported type object (%" PRIu32 ") at offset %" PRIu64 "\n",
2759 __func__, type, (uint64_t)offset);
2760
2761 // WARNING: callers of ipcSetDataReference need to make sure they
2762 // don't rely on mObjectsSize in their release_func.
2763 kernelFields->mObjectsSize = 0;
2764 break;
2765 }
2766 if (type == BINDER_TYPE_FD) {
2767 // FDs from the kernel are always owned
2768 FdTag(flat->handle, nullptr, this);
2769 }
2770 minOffset = offset + sizeof(flat_binder_object);
2771 }
2772 scanForFds();
2773 #else // BINDER_WITH_KERNEL_IPC
2774 LOG_ALWAYS_FATAL_IF(objectsCount != 0,
2775 "Non-zero objects count passed to Parcel with kernel driver disabled");
2776 #endif // BINDER_WITH_KERNEL_IPC
2777 }
2778
rpcSetDataReference(const sp<RpcSession> & session,const uint8_t * data,size_t dataSize,const uint32_t * objectTable,size_t objectTableSize,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds,release_func relFunc)2779 status_t Parcel::rpcSetDataReference(
2780 const sp<RpcSession>& session, const uint8_t* data, size_t dataSize,
2781 const uint32_t* objectTable, size_t objectTableSize,
2782 std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds, release_func relFunc) {
2783 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2784 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2785
2786 LOG_ALWAYS_FATAL_IF(session == nullptr);
2787
2788 if (objectTableSize != ancillaryFds.size()) {
2789 ALOGE("objectTableSize=%zu ancillaryFds.size=%zu", objectTableSize, ancillaryFds.size());
2790 relFunc(data, dataSize, nullptr, 0);
2791 return BAD_VALUE;
2792 }
2793 for (size_t i = 0; i < objectTableSize; i++) {
2794 uint32_t minObjectEnd;
2795 if (__builtin_add_overflow(objectTable[i], sizeof(RpcFields::ObjectType), &minObjectEnd) ||
2796 minObjectEnd >= dataSize) {
2797 ALOGE("received out of range object position: %" PRIu32 " (parcel size is %zu)",
2798 objectTable[i], dataSize);
2799 relFunc(data, dataSize, nullptr, 0);
2800 return BAD_VALUE;
2801 }
2802 }
2803
2804 freeData();
2805 markForRpc(session);
2806
2807 auto* rpcFields = maybeRpcFields();
2808 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr); // guaranteed by markForRpc.
2809
2810 mData = const_cast<uint8_t*>(data);
2811 mDataSize = mDataCapacity = dataSize;
2812 mOwner = relFunc;
2813
2814 rpcFields->mObjectPositions.reserve(objectTableSize);
2815 for (size_t i = 0; i < objectTableSize; i++) {
2816 rpcFields->mObjectPositions.push_back(objectTable[i]);
2817 }
2818 if (!ancillaryFds.empty()) {
2819 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
2820 *rpcFields->mFds = std::move(ancillaryFds);
2821 }
2822
2823 return OK;
2824 }
2825
print(std::ostream & to,uint32_t) const2826 void Parcel::print(std::ostream& to, uint32_t /*flags*/) const {
2827 to << "Parcel(";
2828
2829 if (errorCheck() != NO_ERROR) {
2830 const status_t err = errorCheck();
2831 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2832 } else if (dataSize() > 0) {
2833 const uint8_t* DATA = data();
2834 to << "\t" << HexDump(DATA, dataSize());
2835 #ifdef BINDER_WITH_KERNEL_IPC
2836 if (const auto* kernelFields = maybeKernelFields()) {
2837 const binder_size_t* OBJS = kernelFields->mObjects;
2838 const size_t N = objectsCount();
2839 for (size_t i = 0; i < N; i++) {
2840 const flat_binder_object* flat =
2841 reinterpret_cast<const flat_binder_object*>(DATA + OBJS[i]);
2842 to << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2843 << TypeCode(flat->hdr.type & 0x7f7f7f00) << " = " << flat->binder;
2844 }
2845 }
2846 #endif // BINDER_WITH_KERNEL_IPC
2847 } else {
2848 to << "NULL";
2849 }
2850
2851 to << ")";
2852 }
2853
releaseObjects()2854 void Parcel::releaseObjects()
2855 {
2856 auto* kernelFields = maybeKernelFields();
2857 if (kernelFields == nullptr) {
2858 return;
2859 }
2860
2861 #ifdef BINDER_WITH_KERNEL_IPC
2862 size_t i = kernelFields->mObjectsSize;
2863 if (i == 0) {
2864 return;
2865 }
2866 sp<ProcessState> proc(ProcessState::self());
2867 uint8_t* const data = mData;
2868 binder_size_t* const objects = kernelFields->mObjects;
2869 while (i > 0) {
2870 i--;
2871 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2872 release_object(proc, *flat, this);
2873 }
2874 #endif // BINDER_WITH_KERNEL_IPC
2875 }
2876
acquireObjects()2877 void Parcel::acquireObjects()
2878 {
2879 auto* kernelFields = maybeKernelFields();
2880 if (kernelFields == nullptr) {
2881 return;
2882 }
2883
2884 #ifdef BINDER_WITH_KERNEL_IPC
2885 size_t i = kernelFields->mObjectsSize;
2886 if (i == 0) {
2887 return;
2888 }
2889 const sp<ProcessState> proc(ProcessState::self());
2890 uint8_t* const data = mData;
2891 binder_size_t* const objects = kernelFields->mObjects;
2892 while (i > 0) {
2893 i--;
2894 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2895 acquire_object(proc, *flat, this);
2896 }
2897 #endif // BINDER_WITH_KERNEL_IPC
2898 }
2899
freeData()2900 void Parcel::freeData()
2901 {
2902 freeDataNoInit();
2903 initState();
2904 }
2905
freeDataNoInit()2906 void Parcel::freeDataNoInit()
2907 {
2908 if (mOwner) {
2909 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2910 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2911 auto* kernelFields = maybeKernelFields();
2912 // Close FDs before freeing, otherwise they will leak for kernel binder.
2913 closeFileDescriptors(/*newObjectsSize=*/0);
2914 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
2915 kernelFields ? kernelFields->mObjectsSize : 0);
2916 } else {
2917 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2918 releaseObjects();
2919 if (mData) {
2920 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2921 gParcelGlobalAllocSize -= mDataCapacity;
2922 gParcelGlobalAllocCount--;
2923 if (mDeallocZero) {
2924 zeroMemory(mData, mDataSize);
2925 }
2926 free(mData);
2927 }
2928 auto* kernelFields = maybeKernelFields();
2929 if (kernelFields && kernelFields->mObjects) free(kernelFields->mObjects);
2930 }
2931 }
2932
growData(size_t len)2933 status_t Parcel::growData(size_t len)
2934 {
2935 if (len > INT32_MAX) {
2936 // don't accept size_t values which may have come from an
2937 // inadvertent conversion from a negative int.
2938 return BAD_VALUE;
2939 }
2940
2941 if (mDataPos > mDataSize) {
2942 // b/370831157 - this case used to abort. We also don't expect mDataPos < mDataSize, but
2943 // this would only waste a bit of memory, so it's okay.
2944 ALOGE("growData only expected at the end of a Parcel. pos: %zu, size: %zu, capacity: %zu",
2945 mDataPos, len, mDataCapacity);
2946 return BAD_VALUE;
2947 }
2948
2949 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
2950 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
2951 size_t newSize = ((mDataSize+len)*3)/2;
2952 return (newSize <= mDataSize)
2953 ? (status_t) NO_MEMORY
2954 : continueWrite(std::max(newSize, (size_t) 128));
2955 }
2956
reallocZeroFree(uint8_t * data,size_t oldCapacity,size_t newCapacity,bool zero)2957 static uint8_t* reallocZeroFree(uint8_t* data, size_t oldCapacity, size_t newCapacity, bool zero) {
2958 if (!zero) {
2959 return (uint8_t*)realloc(data, newCapacity);
2960 }
2961 uint8_t* newData = (uint8_t*)malloc(newCapacity);
2962 if (!newData) {
2963 return nullptr;
2964 }
2965
2966 memcpy(newData, data, std::min(oldCapacity, newCapacity));
2967 zeroMemory(data, oldCapacity);
2968 free(data);
2969 return newData;
2970 }
2971
restartWrite(size_t desired)2972 status_t Parcel::restartWrite(size_t desired)
2973 {
2974 if (desired > INT32_MAX) {
2975 // don't accept size_t values which may have come from an
2976 // inadvertent conversion from a negative int.
2977 return BAD_VALUE;
2978 }
2979
2980 if (mOwner) {
2981 freeData();
2982 return continueWrite(desired);
2983 }
2984
2985 releaseObjects();
2986
2987 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
2988 if (!data && desired > mDataCapacity) {
2989 LOG_ALWAYS_FATAL("out of memory");
2990 mError = NO_MEMORY;
2991 return NO_MEMORY;
2992 }
2993
2994 if (data || desired == 0) {
2995 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2996 if (mDataCapacity > desired) {
2997 gParcelGlobalAllocSize -= (mDataCapacity - desired);
2998 } else {
2999 gParcelGlobalAllocSize += (desired - mDataCapacity);
3000 }
3001
3002 if (!mData) {
3003 gParcelGlobalAllocCount++;
3004 }
3005 mData = data;
3006 mDataCapacity = desired;
3007 }
3008
3009 mDataSize = mDataPos = 0;
3010 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
3011 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
3012
3013 if (auto* kernelFields = maybeKernelFields()) {
3014 free(kernelFields->mObjects);
3015 kernelFields->mObjects = nullptr;
3016 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = 0;
3017 kernelFields->mNextObjectHint = 0;
3018 kernelFields->mObjectsSorted = false;
3019 kernelFields->mHasFds = false;
3020 kernelFields->mFdsKnown = true;
3021 } else if (auto* rpcFields = maybeRpcFields()) {
3022 rpcFields->mObjectPositions.clear();
3023 rpcFields->mFds.reset();
3024 }
3025 mAllowFds = true;
3026
3027 return NO_ERROR;
3028 }
3029
continueWrite(size_t desired)3030 status_t Parcel::continueWrite(size_t desired)
3031 {
3032 if (desired > INT32_MAX) {
3033 // don't accept size_t values which may have come from an
3034 // inadvertent conversion from a negative int.
3035 return BAD_VALUE;
3036 }
3037
3038 auto* kernelFields = maybeKernelFields();
3039 auto* rpcFields = maybeRpcFields();
3040
3041 // If shrinking, first adjust for any objects that appear
3042 // after the new data size.
3043 size_t objectsSize =
3044 kernelFields ? kernelFields->mObjectsSize : rpcFields->mObjectPositions.size();
3045 if (desired < mDataSize) {
3046 if (desired == 0) {
3047 objectsSize = 0;
3048 } else {
3049 if (kernelFields) {
3050 #ifdef BINDER_WITH_KERNEL_IPC
3051 validateReadData(mDataSize); // hack to sort the objects
3052 while (objectsSize > 0) {
3053 if (kernelFields->mObjects[objectsSize - 1] + sizeof(flat_binder_object) <=
3054 desired)
3055 break;
3056 objectsSize--;
3057 }
3058 #endif // BINDER_WITH_KERNEL_IPC
3059 } else {
3060 while (objectsSize > 0) {
3061 // Object size varies by type.
3062 uint32_t pos = rpcFields->mObjectPositions[objectsSize - 1];
3063 size_t size = sizeof(RpcFields::ObjectType);
3064 uint32_t minObjectEnd;
3065 if (__builtin_add_overflow(pos, sizeof(RpcFields::ObjectType), &minObjectEnd) ||
3066 minObjectEnd > mDataSize) {
3067 return BAD_VALUE;
3068 }
3069 const auto type = *reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
3070 switch (type) {
3071 case RpcFields::TYPE_BINDER_NULL:
3072 break;
3073 case RpcFields::TYPE_BINDER:
3074 size += sizeof(uint64_t); // address
3075 break;
3076 case RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR:
3077 size += sizeof(int32_t); // fd index
3078 break;
3079 }
3080
3081 if (pos + size <= desired) break;
3082 objectsSize--;
3083 }
3084 }
3085 }
3086 }
3087
3088 if (mOwner) {
3089 // If the size is going to zero, just release the owner's data.
3090 if (desired == 0) {
3091 freeData();
3092 return NO_ERROR;
3093 }
3094
3095 // If there is a different owner, we need to take
3096 // posession.
3097 uint8_t* data = (uint8_t*)malloc(desired);
3098 if (!data) {
3099 mError = NO_MEMORY;
3100 return NO_MEMORY;
3101 }
3102 binder_size_t* objects = nullptr;
3103
3104 if (kernelFields && objectsSize) {
3105 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
3106 if (!objects) {
3107 free(data);
3108
3109 mError = NO_MEMORY;
3110 return NO_MEMORY;
3111 }
3112
3113 // Little hack to only acquire references on objects
3114 // we will be keeping.
3115 size_t oldObjectsSize = kernelFields->mObjectsSize;
3116 kernelFields->mObjectsSize = objectsSize;
3117 acquireObjects();
3118 kernelFields->mObjectsSize = oldObjectsSize;
3119 }
3120 if (rpcFields) {
3121 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
3122 free(data);
3123 return status;
3124 }
3125 }
3126
3127 if (mData) {
3128 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
3129 }
3130 #ifdef BINDER_WITH_KERNEL_IPC
3131 if (objects && kernelFields && kernelFields->mObjects) {
3132 memcpy(objects, kernelFields->mObjects, objectsSize * sizeof(binder_size_t));
3133 // All FDs are owned when `mOwner`, even when `cookie == 0`. When
3134 // we switch to `!mOwner`, we need to explicitly mark the FDs as
3135 // owned.
3136 for (size_t i = 0; i < objectsSize; i++) {
3137 flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
3138 if (flat->hdr.type == BINDER_TYPE_FD) {
3139 flat->cookie = 1;
3140 }
3141 }
3142 }
3143 // ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
3144 if (kernelFields) {
3145 closeFileDescriptors(objectsSize);
3146 }
3147 #endif // BINDER_WITH_KERNEL_IPC
3148 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
3149 kernelFields ? kernelFields->mObjectsSize : 0);
3150 mOwner = nullptr;
3151
3152 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
3153 gParcelGlobalAllocSize += desired;
3154 gParcelGlobalAllocCount++;
3155
3156 mData = data;
3157 mDataSize = (mDataSize < desired) ? mDataSize : desired;
3158 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3159 mDataCapacity = desired;
3160 if (kernelFields) {
3161 kernelFields->mObjects = objects;
3162 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsSize;
3163 kernelFields->mNextObjectHint = 0;
3164 kernelFields->mObjectsSorted = false;
3165 }
3166
3167 } else if (mData) {
3168 if (kernelFields && objectsSize < kernelFields->mObjectsSize) {
3169 #ifdef BINDER_WITH_KERNEL_IPC
3170 // Need to release refs on any objects we are dropping.
3171 const sp<ProcessState> proc(ProcessState::self());
3172 for (size_t i = objectsSize; i < kernelFields->mObjectsSize; i++) {
3173 const flat_binder_object* flat =
3174 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
3175 if (flat->hdr.type == BINDER_TYPE_FD) {
3176 // will need to rescan because we may have lopped off the only FDs
3177 kernelFields->mFdsKnown = false;
3178 }
3179 release_object(proc, *flat, this);
3180 }
3181
3182 if (objectsSize == 0) {
3183 free(kernelFields->mObjects);
3184 kernelFields->mObjects = nullptr;
3185 kernelFields->mObjectsCapacity = 0;
3186 } else {
3187 binder_size_t* objects =
3188 (binder_size_t*)realloc(kernelFields->mObjects,
3189 objectsSize * sizeof(binder_size_t));
3190 if (objects) {
3191 kernelFields->mObjects = objects;
3192 kernelFields->mObjectsCapacity = objectsSize;
3193 }
3194 }
3195 kernelFields->mObjectsSize = objectsSize;
3196 kernelFields->mNextObjectHint = 0;
3197 kernelFields->mObjectsSorted = false;
3198 #else // BINDER_WITH_KERNEL_IPC
3199 LOG_ALWAYS_FATAL("Non-zero numObjects for RPC Parcel");
3200 #endif // BINDER_WITH_KERNEL_IPC
3201 }
3202 if (rpcFields) {
3203 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
3204 return status;
3205 }
3206 }
3207
3208 // We own the data, so we can just do a realloc().
3209 if (desired > mDataCapacity) {
3210 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
3211 if (data) {
3212 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
3213 desired);
3214 gParcelGlobalAllocSize += desired;
3215 gParcelGlobalAllocSize -= mDataCapacity;
3216 mData = data;
3217 mDataCapacity = desired;
3218 } else {
3219 mError = NO_MEMORY;
3220 return NO_MEMORY;
3221 }
3222 } else {
3223 if (mDataSize > desired) {
3224 mDataSize = desired;
3225 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3226 }
3227 if (mDataPos > desired) {
3228 mDataPos = desired;
3229 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3230 }
3231 }
3232
3233 } else {
3234 // This is the first data. Easy!
3235 uint8_t* data = (uint8_t*)malloc(desired);
3236 if (!data) {
3237 mError = NO_MEMORY;
3238 return NO_MEMORY;
3239 }
3240
3241 if (!(mDataCapacity == 0 &&
3242 (kernelFields == nullptr ||
3243 (kernelFields->mObjects == nullptr && kernelFields->mObjectsCapacity == 0)))) {
3244 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity,
3245 kernelFields ? kernelFields->mObjects : nullptr,
3246 kernelFields ? kernelFields->mObjectsCapacity : 0, desired);
3247 }
3248
3249 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
3250 gParcelGlobalAllocSize += desired;
3251 gParcelGlobalAllocCount++;
3252
3253 mData = data;
3254 mDataSize = mDataPos = 0;
3255 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3256 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3257 mDataCapacity = desired;
3258 }
3259
3260 return NO_ERROR;
3261 }
3262
truncateRpcObjects(size_t newObjectsSize)3263 status_t Parcel::truncateRpcObjects(size_t newObjectsSize) {
3264 auto* rpcFields = maybeRpcFields();
3265 if (newObjectsSize == 0) {
3266 rpcFields->mObjectPositions.clear();
3267 if (rpcFields->mFds) {
3268 rpcFields->mFds->clear();
3269 }
3270 return OK;
3271 }
3272 while (rpcFields->mObjectPositions.size() > newObjectsSize) {
3273 uint32_t pos = rpcFields->mObjectPositions.back();
3274 uint32_t minObjectEnd;
3275 if (__builtin_add_overflow(pos, sizeof(RpcFields::ObjectType), &minObjectEnd) ||
3276 minObjectEnd > mDataSize) {
3277 return BAD_VALUE;
3278 }
3279 const auto type = *reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
3280 if (type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
3281 uint32_t objectEnd;
3282 if (__builtin_add_overflow(minObjectEnd, sizeof(int32_t), &objectEnd) ||
3283 objectEnd > mDataSize) {
3284 return BAD_VALUE;
3285 }
3286 const auto fdIndex = *reinterpret_cast<const int32_t*>(mData + minObjectEnd);
3287 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
3288 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
3289 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
3290 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
3291 return BAD_VALUE;
3292 }
3293 // In practice, this always removes the last element.
3294 rpcFields->mFds->erase(rpcFields->mFds->begin() + fdIndex);
3295 }
3296 rpcFields->mObjectPositions.pop_back();
3297 }
3298 return OK;
3299 }
3300
initState()3301 void Parcel::initState()
3302 {
3303 LOG_ALLOC("Parcel %p: initState", this);
3304 mError = NO_ERROR;
3305 mData = nullptr;
3306 mDataSize = 0;
3307 mDataCapacity = 0;
3308 mDataPos = 0;
3309 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
3310 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
3311 mVariantFields.emplace<KernelFields>();
3312 mAllowFds = true;
3313 mDeallocZero = false;
3314 mOwner = nullptr;
3315 mEnforceNoDataAvail = true;
3316 mServiceFuzzing = false;
3317 }
3318
scanForFds() const3319 void Parcel::scanForFds() const {
3320 auto* kernelFields = maybeKernelFields();
3321 if (kernelFields == nullptr) {
3322 return;
3323 }
3324 status_t status = hasFileDescriptorsInRange(0, dataSize(), &kernelFields->mHasFds);
3325 ALOGE_IF(status != NO_ERROR, "Error %d calling hasFileDescriptorsInRange()", status);
3326 kernelFields->mFdsKnown = true;
3327 }
3328
3329 #ifdef BINDER_WITH_KERNEL_IPC
getBlobAshmemSize() const3330 size_t Parcel::getBlobAshmemSize() const
3331 {
3332 // This used to return the size of all blobs that were written to ashmem, now we're returning
3333 // the ashmem currently referenced by this Parcel, which should be equivalent.
3334 // TODO(b/202029388): Remove method once ABI can be changed.
3335 return getOpenAshmemSize();
3336 }
3337
getOpenAshmemSize() const3338 size_t Parcel::getOpenAshmemSize() const
3339 {
3340 auto* kernelFields = maybeKernelFields();
3341 if (kernelFields == nullptr) {
3342 return 0;
3343 }
3344
3345 size_t openAshmemSize = 0;
3346 #ifndef BINDER_DISABLE_BLOB
3347 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
3348 const flat_binder_object* flat =
3349 reinterpret_cast<const flat_binder_object*>(mData + kernelFields->mObjects[i]);
3350
3351 // cookie is compared against zero for historical reasons
3352 // > obj.cookie = takeOwnership ? 1 : 0;
3353 if (flat->hdr.type == BINDER_TYPE_FD && flat->cookie != 0 && ashmem_valid(flat->handle)) {
3354 int size = ashmem_get_size_region(flat->handle);
3355 if (__builtin_add_overflow(openAshmemSize, size, &openAshmemSize)) {
3356 ALOGE("Overflow when computing ashmem size.");
3357 return SIZE_MAX;
3358 }
3359 }
3360 }
3361 #endif
3362 return openAshmemSize;
3363 }
3364 #endif // BINDER_WITH_KERNEL_IPC
3365
3366 // --- Parcel::Blob ---
3367
Blob()3368 Parcel::Blob::Blob() :
3369 mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
3370 }
3371
~Blob()3372 Parcel::Blob::~Blob() {
3373 release();
3374 }
3375
release()3376 void Parcel::Blob::release() {
3377 if (mFd != -1 && mData) {
3378 if (::munmap(mData, mSize) == -1) {
3379 ALOGW("munmap() failed: %s", strerror(errno));
3380 }
3381 }
3382 clear();
3383 }
3384
init(int fd,void * data,size_t size,bool isMutable)3385 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
3386 mFd = fd;
3387 mData = data;
3388 mSize = size;
3389 mMutable = isMutable;
3390 }
3391
clear()3392 void Parcel::Blob::clear() {
3393 mFd = -1;
3394 mData = nullptr;
3395 mSize = 0;
3396 mMutable = false;
3397 }
3398
3399 } // namespace android
3400