1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <linux/sched.h>
24 #include <pthread.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <sys/resource.h>
32 #include <unistd.h>
33
34 #include <binder/Binder.h>
35 #include <binder/BpBinder.h>
36 #include <binder/IPCThreadState.h>
37 #include <binder/Parcel.h>
38 #include <binder/ProcessState.h>
39 #include <binder/Stability.h>
40 #include <binder/Status.h>
41 #include <binder/TextOutput.h>
42
43 #include <cutils/ashmem.h>
44 #include <utils/Debug.h>
45 #include <utils/Flattenable.h>
46 #include <utils/Log.h>
47 #include <utils/misc.h>
48 #include <utils/String8.h>
49 #include <utils/String16.h>
50
51 #include <private/binder/binder_module.h>
52 #include "Static.h"
53
54 #define LOG_REFS(...)
55 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
56 #define LOG_ALLOC(...)
57 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
58
59 // ---------------------------------------------------------------------------
60
61 // This macro should never be used at runtime, as a too large value
62 // of s could cause an integer overflow. Instead, you should always
63 // use the wrapper function pad_size()
64 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
65
pad_size(size_t s)66 static size_t pad_size(size_t s) {
67 if (s > (std::numeric_limits<size_t>::max() - 3)) {
68 LOG_ALWAYS_FATAL("pad size too big %zu", s);
69 }
70 return PAD_SIZE_UNSAFE(s);
71 }
72
73 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
74 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
75
76 namespace android {
77
78 // many things compile this into prebuilts on the stack
79 static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120);
80
81 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
82 static size_t gParcelGlobalAllocSize = 0;
83 static size_t gParcelGlobalAllocCount = 0;
84
85 static size_t gMaxFds = 0;
86
87 // Maximum size of a blob to transfer in-place.
88 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
89
90 enum {
91 BLOB_INPLACE = 0,
92 BLOB_ASHMEM_IMMUTABLE = 1,
93 BLOB_ASHMEM_MUTABLE = 2,
94 };
95
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)96 static void acquire_object(const sp<ProcessState>& proc,
97 const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
98 {
99 switch (obj.hdr.type) {
100 case BINDER_TYPE_BINDER:
101 if (obj.binder) {
102 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
103 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
104 }
105 return;
106 case BINDER_TYPE_HANDLE: {
107 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
108 if (b != nullptr) {
109 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
110 b->incStrong(who);
111 }
112 return;
113 }
114 case BINDER_TYPE_FD: {
115 if ((obj.cookie != 0) && (outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
116 // If we own an ashmem fd, keep track of how much memory it refers to.
117 int size = ashmem_get_size_region(obj.handle);
118 if (size > 0) {
119 *outAshmemSize += size;
120 }
121 }
122 return;
123 }
124 }
125
126 ALOGD("Invalid object type 0x%08x", obj.hdr.type);
127 }
128
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)129 static void release_object(const sp<ProcessState>& proc,
130 const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
131 {
132 switch (obj.hdr.type) {
133 case BINDER_TYPE_BINDER:
134 if (obj.binder) {
135 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
136 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
137 }
138 return;
139 case BINDER_TYPE_HANDLE: {
140 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
141 if (b != nullptr) {
142 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
143 b->decStrong(who);
144 }
145 return;
146 }
147 case BINDER_TYPE_FD: {
148 if (obj.cookie != 0) { // owned
149 if ((outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
150 int size = ashmem_get_size_region(obj.handle);
151 if (size > 0) {
152 // ashmem size might have changed since last time it was accounted for, e.g.
153 // in acquire_object(). Value of *outAshmemSize is not critical since we are
154 // releasing the object anyway. Check for integer overflow condition.
155 *outAshmemSize -= std::min(*outAshmemSize, static_cast<size_t>(size));
156 }
157 }
158
159 close(obj.handle);
160 }
161 return;
162 }
163 }
164
165 ALOGE("Invalid object type 0x%08x", obj.hdr.type);
166 }
167
finishFlattenBinder(const sp<IBinder> & binder,const flat_binder_object & flat)168 status_t Parcel::finishFlattenBinder(
169 const sp<IBinder>& binder, const flat_binder_object& flat)
170 {
171 status_t status = writeObject(flat, false);
172 if (status != OK) return status;
173
174 internal::Stability::tryMarkCompilationUnit(binder.get());
175 return writeInt32(internal::Stability::get(binder.get()));
176 }
177
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const178 status_t Parcel::finishUnflattenBinder(
179 const sp<IBinder>& binder, sp<IBinder>* out) const
180 {
181 int32_t stability;
182 status_t status = readInt32(&stability);
183 if (status != OK) return status;
184
185 status = internal::Stability::set(binder.get(), stability, true /*log*/);
186 if (status != OK) return status;
187
188 *out = binder;
189 return OK;
190 }
191
schedPolicyMask(int policy,int priority)192 static constexpr inline int schedPolicyMask(int policy, int priority) {
193 return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
194 }
195
flattenBinder(const sp<IBinder> & binder)196 status_t Parcel::flattenBinder(const sp<IBinder>& binder)
197 {
198 flat_binder_object obj;
199 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
200
201 int schedBits = 0;
202 if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
203 schedBits = schedPolicyMask(SCHED_NORMAL, 19);
204 }
205
206 if (binder != nullptr) {
207 BBinder *local = binder->localBinder();
208 if (!local) {
209 BpBinder *proxy = binder->remoteBinder();
210 if (proxy == nullptr) {
211 ALOGE("null proxy");
212 }
213 const int32_t handle = proxy ? proxy->handle() : 0;
214 obj.hdr.type = BINDER_TYPE_HANDLE;
215 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
216 obj.handle = handle;
217 obj.cookie = 0;
218 } else {
219 int policy = local->getMinSchedulerPolicy();
220 int priority = local->getMinSchedulerPriority();
221
222 if (policy != 0 || priority != 0) {
223 // override value, since it is set explicitly
224 schedBits = schedPolicyMask(policy, priority);
225 }
226 if (local->isRequestingSid()) {
227 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
228 }
229 obj.hdr.type = BINDER_TYPE_BINDER;
230 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
231 obj.cookie = reinterpret_cast<uintptr_t>(local);
232 }
233 } else {
234 obj.hdr.type = BINDER_TYPE_BINDER;
235 obj.binder = 0;
236 obj.cookie = 0;
237 }
238
239 obj.flags |= schedBits;
240
241 return finishFlattenBinder(binder, obj);
242 }
243
unflattenBinder(sp<IBinder> * out) const244 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
245 {
246 const flat_binder_object* flat = readObject(false);
247
248 if (flat) {
249 switch (flat->hdr.type) {
250 case BINDER_TYPE_BINDER: {
251 sp<IBinder> binder = reinterpret_cast<IBinder*>(flat->cookie);
252 return finishUnflattenBinder(binder, out);
253 }
254 case BINDER_TYPE_HANDLE: {
255 sp<IBinder> binder =
256 ProcessState::self()->getStrongProxyForHandle(flat->handle);
257 return finishUnflattenBinder(binder, out);
258 }
259 }
260 }
261 return BAD_TYPE;
262 }
263
264 // ---------------------------------------------------------------------------
265
Parcel()266 Parcel::Parcel()
267 {
268 LOG_ALLOC("Parcel %p: constructing", this);
269 initState();
270 }
271
~Parcel()272 Parcel::~Parcel()
273 {
274 freeDataNoInit();
275 LOG_ALLOC("Parcel %p: destroyed", this);
276 }
277
getGlobalAllocSize()278 size_t Parcel::getGlobalAllocSize() {
279 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
280 size_t size = gParcelGlobalAllocSize;
281 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
282 return size;
283 }
284
getGlobalAllocCount()285 size_t Parcel::getGlobalAllocCount() {
286 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
287 size_t count = gParcelGlobalAllocCount;
288 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
289 return count;
290 }
291
data() const292 const uint8_t* Parcel::data() const
293 {
294 return mData;
295 }
296
dataSize() const297 size_t Parcel::dataSize() const
298 {
299 return (mDataSize > mDataPos ? mDataSize : mDataPos);
300 }
301
dataAvail() const302 size_t Parcel::dataAvail() const
303 {
304 size_t result = dataSize() - dataPosition();
305 if (result > INT32_MAX) {
306 LOG_ALWAYS_FATAL("result too big: %zu", result);
307 }
308 return result;
309 }
310
dataPosition() const311 size_t Parcel::dataPosition() const
312 {
313 return mDataPos;
314 }
315
dataCapacity() const316 size_t Parcel::dataCapacity() const
317 {
318 return mDataCapacity;
319 }
320
setDataSize(size_t size)321 status_t Parcel::setDataSize(size_t size)
322 {
323 if (size > INT32_MAX) {
324 // don't accept size_t values which may have come from an
325 // inadvertent conversion from a negative int.
326 return BAD_VALUE;
327 }
328
329 status_t err;
330 err = continueWrite(size);
331 if (err == NO_ERROR) {
332 mDataSize = size;
333 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
334 }
335 return err;
336 }
337
setDataPosition(size_t pos) const338 void Parcel::setDataPosition(size_t pos) const
339 {
340 if (pos > INT32_MAX) {
341 // don't accept size_t values which may have come from an
342 // inadvertent conversion from a negative int.
343 LOG_ALWAYS_FATAL("pos too big: %zu", pos);
344 }
345
346 mDataPos = pos;
347 mNextObjectHint = 0;
348 mObjectsSorted = false;
349 }
350
setDataCapacity(size_t size)351 status_t Parcel::setDataCapacity(size_t size)
352 {
353 if (size > INT32_MAX) {
354 // don't accept size_t values which may have come from an
355 // inadvertent conversion from a negative int.
356 return BAD_VALUE;
357 }
358
359 if (size > mDataCapacity) return continueWrite(size);
360 return NO_ERROR;
361 }
362
setData(const uint8_t * buffer,size_t len)363 status_t Parcel::setData(const uint8_t* buffer, size_t len)
364 {
365 if (len > INT32_MAX) {
366 // don't accept size_t values which may have come from an
367 // inadvertent conversion from a negative int.
368 return BAD_VALUE;
369 }
370
371 status_t err = restartWrite(len);
372 if (err == NO_ERROR) {
373 memcpy(const_cast<uint8_t*>(data()), buffer, len);
374 mDataSize = len;
375 mFdsKnown = false;
376 }
377 return err;
378 }
379
appendFrom(const Parcel * parcel,size_t offset,size_t len)380 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
381 {
382 status_t err;
383 const uint8_t *data = parcel->mData;
384 const binder_size_t *objects = parcel->mObjects;
385 size_t size = parcel->mObjectsSize;
386 int startPos = mDataPos;
387 int firstIndex = -1, lastIndex = -2;
388
389 if (len == 0) {
390 return NO_ERROR;
391 }
392
393 if (len > INT32_MAX) {
394 // don't accept size_t values which may have come from an
395 // inadvertent conversion from a negative int.
396 return BAD_VALUE;
397 }
398
399 // range checks against the source parcel size
400 if ((offset > parcel->mDataSize)
401 || (len > parcel->mDataSize)
402 || (offset + len > parcel->mDataSize)) {
403 return BAD_VALUE;
404 }
405
406 // Count objects in range
407 for (int i = 0; i < (int) size; i++) {
408 size_t off = objects[i];
409 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
410 if (firstIndex == -1) {
411 firstIndex = i;
412 }
413 lastIndex = i;
414 }
415 }
416 int numObjects = lastIndex - firstIndex + 1;
417
418 if ((mDataSize+len) > mDataCapacity) {
419 // grow data
420 err = growData(len);
421 if (err != NO_ERROR) {
422 return err;
423 }
424 }
425
426 // append data
427 memcpy(mData + mDataPos, data + offset, len);
428 mDataPos += len;
429 mDataSize += len;
430
431 err = NO_ERROR;
432
433 if (numObjects > 0) {
434 const sp<ProcessState> proc(ProcessState::self());
435 // grow objects
436 if (mObjectsCapacity < mObjectsSize + numObjects) {
437 if ((size_t) numObjects > SIZE_MAX - mObjectsSize) return NO_MEMORY; // overflow
438 if (mObjectsSize + numObjects > SIZE_MAX / 3) return NO_MEMORY; // overflow
439 size_t newSize = ((mObjectsSize + numObjects)*3)/2;
440 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
441 binder_size_t *objects =
442 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
443 if (objects == (binder_size_t*)nullptr) {
444 return NO_MEMORY;
445 }
446 mObjects = objects;
447 mObjectsCapacity = newSize;
448 }
449
450 // append and acquire objects
451 int idx = mObjectsSize;
452 for (int i = firstIndex; i <= lastIndex; i++) {
453 size_t off = objects[i] - offset + startPos;
454 mObjects[idx++] = off;
455 mObjectsSize++;
456
457 flat_binder_object* flat
458 = reinterpret_cast<flat_binder_object*>(mData + off);
459 acquire_object(proc, *flat, this, &mOpenAshmemSize);
460
461 if (flat->hdr.type == BINDER_TYPE_FD) {
462 // If this is a file descriptor, we need to dup it so the
463 // new Parcel now owns its own fd, and can declare that we
464 // officially know we have fds.
465 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
466 flat->cookie = 1;
467 mHasFds = mFdsKnown = true;
468 if (!mAllowFds) {
469 err = FDS_NOT_ALLOWED;
470 }
471 }
472 }
473 }
474
475 return err;
476 }
477
compareData(const Parcel & other)478 int Parcel::compareData(const Parcel& other) {
479 size_t size = dataSize();
480 if (size != other.dataSize()) {
481 return size < other.dataSize() ? -1 : 1;
482 }
483 return memcmp(data(), other.data(), size);
484 }
485
allowFds() const486 bool Parcel::allowFds() const
487 {
488 return mAllowFds;
489 }
490
pushAllowFds(bool allowFds)491 bool Parcel::pushAllowFds(bool allowFds)
492 {
493 const bool origValue = mAllowFds;
494 if (!allowFds) {
495 mAllowFds = false;
496 }
497 return origValue;
498 }
499
restoreAllowFds(bool lastValue)500 void Parcel::restoreAllowFds(bool lastValue)
501 {
502 mAllowFds = lastValue;
503 }
504
hasFileDescriptors() const505 bool Parcel::hasFileDescriptors() const
506 {
507 if (!mFdsKnown) {
508 scanForFds();
509 }
510 return mHasFds;
511 }
512
updateWorkSourceRequestHeaderPosition() const513 void Parcel::updateWorkSourceRequestHeaderPosition() const {
514 // Only update the request headers once. We only want to point
515 // to the first headers read/written.
516 if (!mRequestHeaderPresent) {
517 mWorkSourceRequestHeaderPosition = dataPosition();
518 mRequestHeaderPresent = true;
519 }
520 }
521
522 #if defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
523 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
524 #else
525 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
526 #endif
527
528 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)529 status_t Parcel::writeInterfaceToken(const String16& interface)
530 {
531 const IPCThreadState* threadState = IPCThreadState::self();
532 writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
533 updateWorkSourceRequestHeaderPosition();
534 writeInt32(threadState->shouldPropagateWorkSource() ?
535 threadState->getCallingWorkSourceUid() : IPCThreadState::kUnsetWorkSource);
536 writeInt32(kHeader);
537 // currently the interface identification token is just its name as a string
538 return writeString16(interface);
539 }
540
replaceCallingWorkSourceUid(uid_t uid)541 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
542 {
543 if (!mRequestHeaderPresent) {
544 return false;
545 }
546
547 const size_t initialPosition = dataPosition();
548 setDataPosition(mWorkSourceRequestHeaderPosition);
549 status_t err = writeInt32(uid);
550 setDataPosition(initialPosition);
551 return err == NO_ERROR;
552 }
553
readCallingWorkSourceUid() const554 uid_t Parcel::readCallingWorkSourceUid() const
555 {
556 if (!mRequestHeaderPresent) {
557 return IPCThreadState::kUnsetWorkSource;
558 }
559
560 const size_t initialPosition = dataPosition();
561 setDataPosition(mWorkSourceRequestHeaderPosition);
562 uid_t uid = readInt32();
563 setDataPosition(initialPosition);
564 return uid;
565 }
566
checkInterface(IBinder * binder) const567 bool Parcel::checkInterface(IBinder* binder) const
568 {
569 return enforceInterface(binder->getInterfaceDescriptor());
570 }
571
enforceInterface(const String16 & interface,IPCThreadState * threadState) const572 bool Parcel::enforceInterface(const String16& interface,
573 IPCThreadState* threadState) const
574 {
575 return enforceInterface(interface.string(), interface.size(), threadState);
576 }
577
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const578 bool Parcel::enforceInterface(const char16_t* interface,
579 size_t len,
580 IPCThreadState* threadState) const
581 {
582 // StrictModePolicy.
583 int32_t strictPolicy = readInt32();
584 if (threadState == nullptr) {
585 threadState = IPCThreadState::self();
586 }
587 if ((threadState->getLastTransactionBinderFlags() &
588 IBinder::FLAG_ONEWAY) != 0) {
589 // For one-way calls, the callee is running entirely
590 // disconnected from the caller, so disable StrictMode entirely.
591 // Not only does disk/network usage not impact the caller, but
592 // there's no way to commuicate back any violations anyway.
593 threadState->setStrictModePolicy(0);
594 } else {
595 threadState->setStrictModePolicy(strictPolicy);
596 }
597 // WorkSource.
598 updateWorkSourceRequestHeaderPosition();
599 int32_t workSource = readInt32();
600 threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
601 // vendor header
602 int32_t header = readInt32();
603 if (header != kHeader) {
604 ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader, header);
605 return false;
606 }
607 // Interface descriptor.
608 size_t parcel_interface_len;
609 const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
610 if (len == parcel_interface_len &&
611 (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
612 return true;
613 } else {
614 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
615 String8(interface, len).string(),
616 String8(parcel_interface, parcel_interface_len).string());
617 return false;
618 }
619 }
620
objectsCount() const621 size_t Parcel::objectsCount() const
622 {
623 return mObjectsSize;
624 }
625
errorCheck() const626 status_t Parcel::errorCheck() const
627 {
628 return mError;
629 }
630
setError(status_t err)631 void Parcel::setError(status_t err)
632 {
633 mError = err;
634 }
635
finishWrite(size_t len)636 status_t Parcel::finishWrite(size_t len)
637 {
638 if (len > INT32_MAX) {
639 // don't accept size_t values which may have come from an
640 // inadvertent conversion from a negative int.
641 return BAD_VALUE;
642 }
643
644 //printf("Finish write of %d\n", len);
645 mDataPos += len;
646 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
647 if (mDataPos > mDataSize) {
648 mDataSize = mDataPos;
649 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
650 }
651 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
652 return NO_ERROR;
653 }
654
writeUnpadded(const void * data,size_t len)655 status_t Parcel::writeUnpadded(const void* data, size_t len)
656 {
657 if (len > INT32_MAX) {
658 // don't accept size_t values which may have come from an
659 // inadvertent conversion from a negative int.
660 return BAD_VALUE;
661 }
662
663 size_t end = mDataPos + len;
664 if (end < mDataPos) {
665 // integer overflow
666 return BAD_VALUE;
667 }
668
669 if (end <= mDataCapacity) {
670 restart_write:
671 memcpy(mData+mDataPos, data, len);
672 return finishWrite(len);
673 }
674
675 status_t err = growData(len);
676 if (err == NO_ERROR) goto restart_write;
677 return err;
678 }
679
write(const void * data,size_t len)680 status_t Parcel::write(const void* data, size_t len)
681 {
682 if (len > INT32_MAX) {
683 // don't accept size_t values which may have come from an
684 // inadvertent conversion from a negative int.
685 return BAD_VALUE;
686 }
687
688 void* const d = writeInplace(len);
689 if (d) {
690 memcpy(d, data, len);
691 return NO_ERROR;
692 }
693 return mError;
694 }
695
writeInplace(size_t len)696 void* Parcel::writeInplace(size_t len)
697 {
698 if (len > INT32_MAX) {
699 // don't accept size_t values which may have come from an
700 // inadvertent conversion from a negative int.
701 return nullptr;
702 }
703
704 const size_t padded = pad_size(len);
705
706 // sanity check for integer overflow
707 if (mDataPos+padded < mDataPos) {
708 return nullptr;
709 }
710
711 if ((mDataPos+padded) <= mDataCapacity) {
712 restart_write:
713 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
714 uint8_t* const data = mData+mDataPos;
715
716 // Need to pad at end?
717 if (padded != len) {
718 #if BYTE_ORDER == BIG_ENDIAN
719 static const uint32_t mask[4] = {
720 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
721 };
722 #endif
723 #if BYTE_ORDER == LITTLE_ENDIAN
724 static const uint32_t mask[4] = {
725 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
726 };
727 #endif
728 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
729 // *reinterpret_cast<void**>(data+padded-4));
730 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
731 }
732
733 finishWrite(padded);
734 return data;
735 }
736
737 status_t err = growData(padded);
738 if (err == NO_ERROR) goto restart_write;
739 return nullptr;
740 }
741
writeUtf8AsUtf16(const std::string & str)742 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
743 const uint8_t* strData = (uint8_t*)str.data();
744 const size_t strLen= str.length();
745 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
746 if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
747 return BAD_VALUE;
748 }
749
750 status_t err = writeInt32(utf16Len);
751 if (err) {
752 return err;
753 }
754
755 // Allocate enough bytes to hold our converted string and its terminating NULL.
756 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
757 if (!dst) {
758 return NO_MEMORY;
759 }
760
761 utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
762
763 return NO_ERROR;
764 }
765
writeUtf8AsUtf16(const std::optional<std::string> & str)766 status_t Parcel::writeUtf8AsUtf16(const std::optional<std::string>& str) {
767 if (!str) {
768 return writeInt32(-1);
769 }
770 return writeUtf8AsUtf16(*str);
771 }
772
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)773 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
774 if (!str) {
775 return writeInt32(-1);
776 }
777 return writeUtf8AsUtf16(*str);
778 }
779
writeByteVectorInternal(const int8_t * data,size_t size)780 status_t Parcel::writeByteVectorInternal(const int8_t* data, size_t size) {
781 if (size > std::numeric_limits<int32_t>::max()) {
782 return BAD_VALUE;
783 }
784
785 status_t status = writeInt32(size);
786 if (status != OK) {
787 return status;
788 }
789
790 return write(data, size);
791 }
792
writeByteVector(const std::vector<int8_t> & val)793 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
794 return writeByteVectorInternal(val.data(), val.size());
795 }
796
writeByteVector(const std::optional<std::vector<int8_t>> & val)797 status_t Parcel::writeByteVector(const std::optional<std::vector<int8_t>>& val)
798 {
799 if (!val) return writeInt32(-1);
800 return writeByteVectorInternal(val->data(), val->size());
801 }
802
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)803 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
804 {
805 if (!val) return writeInt32(-1);
806 return writeByteVectorInternal(val->data(), val->size());
807 }
808
writeByteVector(const std::vector<uint8_t> & val)809 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
810 return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val.data()), val.size());
811 }
812
writeByteVector(const std::optional<std::vector<uint8_t>> & val)813 status_t Parcel::writeByteVector(const std::optional<std::vector<uint8_t>>& val)
814 {
815 if (!val) return writeInt32(-1);
816 return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val->data()), val->size());
817 }
818
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)819 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
820 {
821 if (!val) return writeInt32(-1);
822 return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val->data()), val->size());
823 }
824
writeInt32Vector(const std::vector<int32_t> & val)825 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
826 {
827 return writeTypedVector(val, &Parcel::writeInt32);
828 }
829
writeInt32Vector(const std::optional<std::vector<int32_t>> & val)830 status_t Parcel::writeInt32Vector(const std::optional<std::vector<int32_t>>& val)
831 {
832 return writeNullableTypedVector(val, &Parcel::writeInt32);
833 }
834
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)835 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
836 {
837 return writeNullableTypedVector(val, &Parcel::writeInt32);
838 }
839
writeInt64Vector(const std::vector<int64_t> & val)840 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
841 {
842 return writeTypedVector(val, &Parcel::writeInt64);
843 }
844
writeInt64Vector(const std::optional<std::vector<int64_t>> & val)845 status_t Parcel::writeInt64Vector(const std::optional<std::vector<int64_t>>& val)
846 {
847 return writeNullableTypedVector(val, &Parcel::writeInt64);
848 }
849
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)850 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
851 {
852 return writeNullableTypedVector(val, &Parcel::writeInt64);
853 }
854
writeUint64Vector(const std::vector<uint64_t> & val)855 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val)
856 {
857 return writeTypedVector(val, &Parcel::writeUint64);
858 }
859
writeUint64Vector(const std::optional<std::vector<uint64_t>> & val)860 status_t Parcel::writeUint64Vector(const std::optional<std::vector<uint64_t>>& val)
861 {
862 return writeNullableTypedVector(val, &Parcel::writeUint64);
863 }
864
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)865 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val)
866 {
867 return writeNullableTypedVector(val, &Parcel::writeUint64);
868 }
869
writeFloatVector(const std::vector<float> & val)870 status_t Parcel::writeFloatVector(const std::vector<float>& val)
871 {
872 return writeTypedVector(val, &Parcel::writeFloat);
873 }
874
writeFloatVector(const std::optional<std::vector<float>> & val)875 status_t Parcel::writeFloatVector(const std::optional<std::vector<float>>& val)
876 {
877 return writeNullableTypedVector(val, &Parcel::writeFloat);
878 }
879
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)880 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
881 {
882 return writeNullableTypedVector(val, &Parcel::writeFloat);
883 }
884
writeDoubleVector(const std::vector<double> & val)885 status_t Parcel::writeDoubleVector(const std::vector<double>& val)
886 {
887 return writeTypedVector(val, &Parcel::writeDouble);
888 }
889
writeDoubleVector(const std::optional<std::vector<double>> & val)890 status_t Parcel::writeDoubleVector(const std::optional<std::vector<double>>& val)
891 {
892 return writeNullableTypedVector(val, &Parcel::writeDouble);
893 }
894
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)895 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
896 {
897 return writeNullableTypedVector(val, &Parcel::writeDouble);
898 }
899
writeBoolVector(const std::vector<bool> & val)900 status_t Parcel::writeBoolVector(const std::vector<bool>& val)
901 {
902 return writeTypedVector(val, &Parcel::writeBool);
903 }
904
writeBoolVector(const std::optional<std::vector<bool>> & val)905 status_t Parcel::writeBoolVector(const std::optional<std::vector<bool>>& val)
906 {
907 return writeNullableTypedVector(val, &Parcel::writeBool);
908 }
909
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)910 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
911 {
912 return writeNullableTypedVector(val, &Parcel::writeBool);
913 }
914
writeCharVector(const std::vector<char16_t> & val)915 status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
916 {
917 return writeTypedVector(val, &Parcel::writeChar);
918 }
919
writeCharVector(const std::optional<std::vector<char16_t>> & val)920 status_t Parcel::writeCharVector(const std::optional<std::vector<char16_t>>& val)
921 {
922 return writeNullableTypedVector(val, &Parcel::writeChar);
923 }
924
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)925 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
926 {
927 return writeNullableTypedVector(val, &Parcel::writeChar);
928 }
929
writeString16Vector(const std::vector<String16> & val)930 status_t Parcel::writeString16Vector(const std::vector<String16>& val)
931 {
932 return writeTypedVector(val, &Parcel::writeString16);
933 }
934
writeString16Vector(const std::optional<std::vector<std::optional<String16>>> & val)935 status_t Parcel::writeString16Vector(
936 const std::optional<std::vector<std::optional<String16>>>& val)
937 {
938 return writeNullableTypedVector(val, &Parcel::writeString16);
939 }
940
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)941 status_t Parcel::writeString16Vector(
942 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
943 {
944 return writeNullableTypedVector(val, &Parcel::writeString16);
945 }
946
writeUtf8VectorAsUtf16Vector(const std::optional<std::vector<std::optional<std::string>>> & val)947 status_t Parcel::writeUtf8VectorAsUtf16Vector(
948 const std::optional<std::vector<std::optional<std::string>>>& val) {
949 return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
950 }
951
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)952 status_t Parcel::writeUtf8VectorAsUtf16Vector(
953 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
954 return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
955 }
956
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)957 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
958 return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
959 }
960
writeInt32(int32_t val)961 status_t Parcel::writeInt32(int32_t val)
962 {
963 return writeAligned(val);
964 }
965
writeUint32(uint32_t val)966 status_t Parcel::writeUint32(uint32_t val)
967 {
968 return writeAligned(val);
969 }
970
writeInt32Array(size_t len,const int32_t * val)971 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
972 if (len > INT32_MAX) {
973 // don't accept size_t values which may have come from an
974 // inadvertent conversion from a negative int.
975 return BAD_VALUE;
976 }
977
978 if (!val) {
979 return writeInt32(-1);
980 }
981 status_t ret = writeInt32(static_cast<uint32_t>(len));
982 if (ret == NO_ERROR) {
983 ret = write(val, len * sizeof(*val));
984 }
985 return ret;
986 }
writeByteArray(size_t len,const uint8_t * val)987 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
988 if (len > INT32_MAX) {
989 // don't accept size_t values which may have come from an
990 // inadvertent conversion from a negative int.
991 return BAD_VALUE;
992 }
993
994 if (!val) {
995 return writeInt32(-1);
996 }
997 status_t ret = writeInt32(static_cast<uint32_t>(len));
998 if (ret == NO_ERROR) {
999 ret = write(val, len * sizeof(*val));
1000 }
1001 return ret;
1002 }
1003
writeBool(bool val)1004 status_t Parcel::writeBool(bool val)
1005 {
1006 return writeInt32(int32_t(val));
1007 }
1008
writeChar(char16_t val)1009 status_t Parcel::writeChar(char16_t val)
1010 {
1011 return writeInt32(int32_t(val));
1012 }
1013
writeByte(int8_t val)1014 status_t Parcel::writeByte(int8_t val)
1015 {
1016 return writeInt32(int32_t(val));
1017 }
1018
writeInt64(int64_t val)1019 status_t Parcel::writeInt64(int64_t val)
1020 {
1021 return writeAligned(val);
1022 }
1023
writeUint64(uint64_t val)1024 status_t Parcel::writeUint64(uint64_t val)
1025 {
1026 return writeAligned(val);
1027 }
1028
writePointer(uintptr_t val)1029 status_t Parcel::writePointer(uintptr_t val)
1030 {
1031 return writeAligned<binder_uintptr_t>(val);
1032 }
1033
writeFloat(float val)1034 status_t Parcel::writeFloat(float val)
1035 {
1036 return writeAligned(val);
1037 }
1038
1039 #if defined(__mips__) && defined(__mips_hard_float)
1040
writeDouble(double val)1041 status_t Parcel::writeDouble(double val)
1042 {
1043 union {
1044 double d;
1045 unsigned long long ll;
1046 } u;
1047 u.d = val;
1048 return writeAligned(u.ll);
1049 }
1050
1051 #else
1052
writeDouble(double val)1053 status_t Parcel::writeDouble(double val)
1054 {
1055 return writeAligned(val);
1056 }
1057
1058 #endif
1059
writeCString(const char * str)1060 status_t Parcel::writeCString(const char* str)
1061 {
1062 return write(str, strlen(str)+1);
1063 }
1064
writeString8(const String8 & str)1065 status_t Parcel::writeString8(const String8& str)
1066 {
1067 status_t err = writeInt32(str.bytes());
1068 // only write string if its length is more than zero characters,
1069 // as readString8 will only read if the length field is non-zero.
1070 // this is slightly different from how writeString16 works.
1071 if (str.bytes() > 0 && err == NO_ERROR) {
1072 err = write(str.string(), str.bytes()+1);
1073 }
1074 return err;
1075 }
1076
writeString16(const std::optional<String16> & str)1077 status_t Parcel::writeString16(const std::optional<String16>& str)
1078 {
1079 if (!str) {
1080 return writeInt32(-1);
1081 }
1082
1083 return writeString16(*str);
1084 }
1085
writeString16(const std::unique_ptr<String16> & str)1086 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1087 {
1088 if (!str) {
1089 return writeInt32(-1);
1090 }
1091
1092 return writeString16(*str);
1093 }
1094
writeString16(const String16 & str)1095 status_t Parcel::writeString16(const String16& str)
1096 {
1097 return writeString16(str.string(), str.size());
1098 }
1099
writeString16(const char16_t * str,size_t len)1100 status_t Parcel::writeString16(const char16_t* str, size_t len)
1101 {
1102 if (str == nullptr) return writeInt32(-1);
1103
1104 status_t err = writeInt32(len);
1105 if (err == NO_ERROR) {
1106 len *= sizeof(char16_t);
1107 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1108 if (data) {
1109 memcpy(data, str, len);
1110 *reinterpret_cast<char16_t*>(data+len) = 0;
1111 return NO_ERROR;
1112 }
1113 err = mError;
1114 }
1115 return err;
1116 }
1117
writeStrongBinder(const sp<IBinder> & val)1118 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1119 {
1120 return flattenBinder(val);
1121 }
1122
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1123 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1124 {
1125 return writeTypedVector(val, &Parcel::writeStrongBinder);
1126 }
1127
writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>> & val)1128 status_t Parcel::writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>>& val)
1129 {
1130 return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1131 }
1132
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1133 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1134 {
1135 return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1136 }
1137
readStrongBinderVector(std::optional<std::vector<sp<IBinder>>> * val) const1138 status_t Parcel::readStrongBinderVector(std::optional<std::vector<sp<IBinder>>>* val) const {
1139 return readNullableTypedVector(val, &Parcel::readNullableStrongBinder);
1140 }
1141
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1142 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1143 return readNullableTypedVector(val, &Parcel::readNullableStrongBinder);
1144 }
1145
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1146 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1147 return readTypedVector(val, &Parcel::readStrongBinder);
1148 }
1149
writeRawNullableParcelable(const Parcelable * parcelable)1150 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1151 if (!parcelable) {
1152 return writeInt32(0);
1153 }
1154
1155 return writeParcelable(*parcelable);
1156 }
1157
writeParcelable(const Parcelable & parcelable)1158 status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1159 status_t status = writeInt32(1); // parcelable is not null.
1160 if (status != OK) {
1161 return status;
1162 }
1163 return parcelable.writeToParcel(this);
1164 }
1165
writeNativeHandle(const native_handle * handle)1166 status_t Parcel::writeNativeHandle(const native_handle* handle)
1167 {
1168 if (!handle || handle->version != sizeof(native_handle))
1169 return BAD_TYPE;
1170
1171 status_t err;
1172 err = writeInt32(handle->numFds);
1173 if (err != NO_ERROR) return err;
1174
1175 err = writeInt32(handle->numInts);
1176 if (err != NO_ERROR) return err;
1177
1178 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1179 err = writeDupFileDescriptor(handle->data[i]);
1180
1181 if (err != NO_ERROR) {
1182 ALOGD("write native handle, write dup fd failed");
1183 return err;
1184 }
1185 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1186 return err;
1187 }
1188
writeFileDescriptor(int fd,bool takeOwnership)1189 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1190 {
1191 flat_binder_object obj;
1192 obj.hdr.type = BINDER_TYPE_FD;
1193 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1194 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1195 obj.handle = fd;
1196 obj.cookie = takeOwnership ? 1 : 0;
1197 return writeObject(obj, true);
1198 }
1199
writeDupFileDescriptor(int fd)1200 status_t Parcel::writeDupFileDescriptor(int fd)
1201 {
1202 int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1203 if (dupFd < 0) {
1204 return -errno;
1205 }
1206 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1207 if (err != OK) {
1208 close(dupFd);
1209 }
1210 return err;
1211 }
1212
writeParcelFileDescriptor(int fd,bool takeOwnership)1213 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1214 {
1215 writeInt32(0);
1216 return writeFileDescriptor(fd, takeOwnership);
1217 }
1218
writeDupParcelFileDescriptor(int fd)1219 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1220 {
1221 int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1222 if (dupFd < 0) {
1223 return -errno;
1224 }
1225 status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1226 if (err != OK) {
1227 close(dupFd);
1228 }
1229 return err;
1230 }
1231
writeUniqueFileDescriptor(const base::unique_fd & fd)1232 status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) {
1233 return writeDupFileDescriptor(fd.get());
1234 }
1235
writeUniqueFileDescriptorVector(const std::vector<base::unique_fd> & val)1236 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) {
1237 return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1238 }
1239
writeUniqueFileDescriptorVector(const std::optional<std::vector<base::unique_fd>> & val)1240 status_t Parcel::writeUniqueFileDescriptorVector(const std::optional<std::vector<base::unique_fd>>& val) {
1241 return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1242 }
1243
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>> & val)1244 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) {
1245 return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1246 }
1247
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1248 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1249 {
1250 if (len > INT32_MAX) {
1251 // don't accept size_t values which may have come from an
1252 // inadvertent conversion from a negative int.
1253 return BAD_VALUE;
1254 }
1255
1256 status_t status;
1257 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1258 ALOGV("writeBlob: write in place");
1259 status = writeInt32(BLOB_INPLACE);
1260 if (status) return status;
1261
1262 void* ptr = writeInplace(len);
1263 if (!ptr) return NO_MEMORY;
1264
1265 outBlob->init(-1, ptr, len, false);
1266 return NO_ERROR;
1267 }
1268
1269 ALOGV("writeBlob: write to ashmem");
1270 int fd = ashmem_create_region("Parcel Blob", len);
1271 if (fd < 0) return NO_MEMORY;
1272
1273 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1274 if (result < 0) {
1275 status = result;
1276 } else {
1277 void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1278 if (ptr == MAP_FAILED) {
1279 status = -errno;
1280 } else {
1281 if (!mutableCopy) {
1282 result = ashmem_set_prot_region(fd, PROT_READ);
1283 }
1284 if (result < 0) {
1285 status = result;
1286 } else {
1287 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1288 if (!status) {
1289 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1290 if (!status) {
1291 outBlob->init(fd, ptr, len, mutableCopy);
1292 return NO_ERROR;
1293 }
1294 }
1295 }
1296 }
1297 ::munmap(ptr, len);
1298 }
1299 ::close(fd);
1300 return status;
1301 }
1302
writeDupImmutableBlobFileDescriptor(int fd)1303 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1304 {
1305 // Must match up with what's done in writeBlob.
1306 if (!mAllowFds) return FDS_NOT_ALLOWED;
1307 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1308 if (status) return status;
1309 return writeDupFileDescriptor(fd);
1310 }
1311
write(const FlattenableHelperInterface & val)1312 status_t Parcel::write(const FlattenableHelperInterface& val)
1313 {
1314 status_t err;
1315
1316 // size if needed
1317 const size_t len = val.getFlattenedSize();
1318 const size_t fd_count = val.getFdCount();
1319
1320 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1321 // don't accept size_t values which may have come from an
1322 // inadvertent conversion from a negative int.
1323 return BAD_VALUE;
1324 }
1325
1326 err = this->writeInt32(len);
1327 if (err) return err;
1328
1329 err = this->writeInt32(fd_count);
1330 if (err) return err;
1331
1332 // payload
1333 void* const buf = this->writeInplace(len);
1334 if (buf == nullptr)
1335 return BAD_VALUE;
1336
1337 int* fds = nullptr;
1338 if (fd_count) {
1339 fds = new (std::nothrow) int[fd_count];
1340 if (fds == nullptr) {
1341 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1342 return BAD_VALUE;
1343 }
1344 }
1345
1346 err = val.flatten(buf, len, fds, fd_count);
1347 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1348 err = this->writeDupFileDescriptor( fds[i] );
1349 }
1350
1351 if (fd_count) {
1352 delete [] fds;
1353 }
1354
1355 return err;
1356 }
1357
writeObject(const flat_binder_object & val,bool nullMetaData)1358 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1359 {
1360 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1361 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1362 if (enoughData && enoughObjects) {
1363 restart_write:
1364 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1365
1366 // remember if it's a file descriptor
1367 if (val.hdr.type == BINDER_TYPE_FD) {
1368 if (!mAllowFds) {
1369 // fail before modifying our object index
1370 return FDS_NOT_ALLOWED;
1371 }
1372 mHasFds = mFdsKnown = true;
1373 }
1374
1375 // Need to write meta-data?
1376 if (nullMetaData || val.binder != 0) {
1377 mObjects[mObjectsSize] = mDataPos;
1378 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1379 mObjectsSize++;
1380 }
1381
1382 return finishWrite(sizeof(flat_binder_object));
1383 }
1384
1385 if (!enoughData) {
1386 const status_t err = growData(sizeof(val));
1387 if (err != NO_ERROR) return err;
1388 }
1389 if (!enoughObjects) {
1390 if (mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1391 if ((mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1392 size_t newSize = ((mObjectsSize+2)*3)/2;
1393 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1394 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1395 if (objects == nullptr) return NO_MEMORY;
1396 mObjects = objects;
1397 mObjectsCapacity = newSize;
1398 }
1399
1400 goto restart_write;
1401 }
1402
writeNoException()1403 status_t Parcel::writeNoException()
1404 {
1405 binder::Status status;
1406 return status.writeToParcel(this);
1407 }
1408
validateReadData(size_t upperBound) const1409 status_t Parcel::validateReadData(size_t upperBound) const
1410 {
1411 // Don't allow non-object reads on object data
1412 if (mObjectsSorted || mObjectsSize <= 1) {
1413 data_sorted:
1414 // Expect to check only against the next object
1415 if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
1416 // For some reason the current read position is greater than the next object
1417 // hint. Iterate until we find the right object
1418 size_t nextObject = mNextObjectHint;
1419 do {
1420 if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
1421 // Requested info overlaps with an object
1422 ALOGE("Attempt to read from protected data in Parcel %p", this);
1423 return PERMISSION_DENIED;
1424 }
1425 nextObject++;
1426 } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
1427 mNextObjectHint = nextObject;
1428 }
1429 return NO_ERROR;
1430 }
1431 // Quickly determine if mObjects is sorted.
1432 binder_size_t* currObj = mObjects + mObjectsSize - 1;
1433 binder_size_t* prevObj = currObj;
1434 while (currObj > mObjects) {
1435 prevObj--;
1436 if(*prevObj > *currObj) {
1437 goto data_unsorted;
1438 }
1439 currObj--;
1440 }
1441 mObjectsSorted = true;
1442 goto data_sorted;
1443
1444 data_unsorted:
1445 // Insertion Sort mObjects
1446 // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1447 // switch to std::sort(mObjects, mObjects + mObjectsSize);
1448 for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
1449 binder_size_t temp = *iter0;
1450 binder_size_t* iter1 = iter0 - 1;
1451 while (iter1 >= mObjects && *iter1 > temp) {
1452 *(iter1 + 1) = *iter1;
1453 iter1--;
1454 }
1455 *(iter1 + 1) = temp;
1456 }
1457 mNextObjectHint = 0;
1458 mObjectsSorted = true;
1459 goto data_sorted;
1460 }
1461
read(void * outData,size_t len) const1462 status_t Parcel::read(void* outData, size_t len) const
1463 {
1464 if (len > INT32_MAX) {
1465 // don't accept size_t values which may have come from an
1466 // inadvertent conversion from a negative int.
1467 return BAD_VALUE;
1468 }
1469
1470 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1471 && len <= pad_size(len)) {
1472 if (mObjectsSize > 0) {
1473 status_t err = validateReadData(mDataPos + pad_size(len));
1474 if(err != NO_ERROR) {
1475 // Still increment the data position by the expected length
1476 mDataPos += pad_size(len);
1477 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1478 return err;
1479 }
1480 }
1481 memcpy(outData, mData+mDataPos, len);
1482 mDataPos += pad_size(len);
1483 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1484 return NO_ERROR;
1485 }
1486 return NOT_ENOUGH_DATA;
1487 }
1488
readInplace(size_t len) const1489 const void* Parcel::readInplace(size_t len) const
1490 {
1491 if (len > INT32_MAX) {
1492 // don't accept size_t values which may have come from an
1493 // inadvertent conversion from a negative int.
1494 return nullptr;
1495 }
1496
1497 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1498 && len <= pad_size(len)) {
1499 if (mObjectsSize > 0) {
1500 status_t err = validateReadData(mDataPos + pad_size(len));
1501 if(err != NO_ERROR) {
1502 // Still increment the data position by the expected length
1503 mDataPos += pad_size(len);
1504 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1505 return nullptr;
1506 }
1507 }
1508
1509 const void* data = mData+mDataPos;
1510 mDataPos += pad_size(len);
1511 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1512 return data;
1513 }
1514 return nullptr;
1515 }
1516
1517 template<class T>
readAligned(T * pArg) const1518 status_t Parcel::readAligned(T *pArg) const {
1519 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1520
1521 if ((mDataPos+sizeof(T)) <= mDataSize) {
1522 if (mObjectsSize > 0) {
1523 status_t err = validateReadData(mDataPos + sizeof(T));
1524 if(err != NO_ERROR) {
1525 // Still increment the data position by the expected length
1526 mDataPos += sizeof(T);
1527 return err;
1528 }
1529 }
1530
1531 const void* data = mData+mDataPos;
1532 mDataPos += sizeof(T);
1533 *pArg = *reinterpret_cast<const T*>(data);
1534 return NO_ERROR;
1535 } else {
1536 return NOT_ENOUGH_DATA;
1537 }
1538 }
1539
1540 template<class T>
readAligned() const1541 T Parcel::readAligned() const {
1542 T result;
1543 if (readAligned(&result) != NO_ERROR) {
1544 result = 0;
1545 }
1546
1547 return result;
1548 }
1549
1550 template<class T>
writeAligned(T val)1551 status_t Parcel::writeAligned(T val) {
1552 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1553
1554 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1555 restart_write:
1556 *reinterpret_cast<T*>(mData+mDataPos) = val;
1557 return finishWrite(sizeof(val));
1558 }
1559
1560 status_t err = growData(sizeof(val));
1561 if (err == NO_ERROR) goto restart_write;
1562 return err;
1563 }
1564
readByteVector(std::vector<int8_t> * val) const1565 status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1566 size_t size;
1567 if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1568 return readByteVectorInternal(val, size);
1569 }
1570
readByteVector(std::vector<uint8_t> * val) const1571 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1572 size_t size;
1573 if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1574 return readByteVectorInternal(val, size);
1575 }
1576
readByteVector(std::optional<std::vector<int8_t>> * val) const1577 status_t Parcel::readByteVector(std::optional<std::vector<int8_t>>* val) const {
1578 size_t size;
1579 if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1580 if (!*val) {
1581 // reserveOutVector does not create the out vector if size is < 0.
1582 // This occurs when writing a null byte vector.
1583 return OK;
1584 }
1585 return readByteVectorInternal(&**val, size);
1586 }
1587
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1588 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1589 size_t size;
1590 if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1591 if (val->get() == nullptr) {
1592 // reserveOutVector does not create the out vector if size is < 0.
1593 // This occurs when writing a null byte vector.
1594 return OK;
1595 }
1596 return readByteVectorInternal(val->get(), size);
1597 }
1598
readByteVector(std::optional<std::vector<uint8_t>> * val) const1599 status_t Parcel::readByteVector(std::optional<std::vector<uint8_t>>* val) const {
1600 size_t size;
1601 if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1602 if (!*val) {
1603 // reserveOutVector does not create the out vector if size is < 0.
1604 // This occurs when writing a null byte vector.
1605 return OK;
1606 }
1607 return readByteVectorInternal(&**val, size);
1608 }
1609
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1610 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1611 size_t size;
1612 if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1613 if (val->get() == nullptr) {
1614 // reserveOutVector does not create the out vector if size is < 0.
1615 // This occurs when writing a null byte vector.
1616 return OK;
1617 }
1618 return readByteVectorInternal(val->get(), size);
1619 }
1620
readInt32Vector(std::optional<std::vector<int32_t>> * val) const1621 status_t Parcel::readInt32Vector(std::optional<std::vector<int32_t>>* val) const {
1622 return readNullableTypedVector(val, &Parcel::readInt32);
1623 }
1624
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1625 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1626 return readNullableTypedVector(val, &Parcel::readInt32);
1627 }
1628
readInt32Vector(std::vector<int32_t> * val) const1629 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1630 return readTypedVector(val, &Parcel::readInt32);
1631 }
1632
readInt64Vector(std::optional<std::vector<int64_t>> * val) const1633 status_t Parcel::readInt64Vector(std::optional<std::vector<int64_t>>* val) const {
1634 return readNullableTypedVector(val, &Parcel::readInt64);
1635 }
1636
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1637 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1638 return readNullableTypedVector(val, &Parcel::readInt64);
1639 }
1640
readInt64Vector(std::vector<int64_t> * val) const1641 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1642 return readTypedVector(val, &Parcel::readInt64);
1643 }
1644
readUint64Vector(std::optional<std::vector<uint64_t>> * val) const1645 status_t Parcel::readUint64Vector(std::optional<std::vector<uint64_t>>* val) const {
1646 return readNullableTypedVector(val, &Parcel::readUint64);
1647 }
1648
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1649 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const {
1650 return readNullableTypedVector(val, &Parcel::readUint64);
1651 }
1652
readUint64Vector(std::vector<uint64_t> * val) const1653 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const {
1654 return readTypedVector(val, &Parcel::readUint64);
1655 }
1656
readFloatVector(std::optional<std::vector<float>> * val) const1657 status_t Parcel::readFloatVector(std::optional<std::vector<float>>* val) const {
1658 return readNullableTypedVector(val, &Parcel::readFloat);
1659 }
1660
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1661 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1662 return readNullableTypedVector(val, &Parcel::readFloat);
1663 }
1664
readFloatVector(std::vector<float> * val) const1665 status_t Parcel::readFloatVector(std::vector<float>* val) const {
1666 return readTypedVector(val, &Parcel::readFloat);
1667 }
1668
readDoubleVector(std::optional<std::vector<double>> * val) const1669 status_t Parcel::readDoubleVector(std::optional<std::vector<double>>* val) const {
1670 return readNullableTypedVector(val, &Parcel::readDouble);
1671 }
1672
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1673 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1674 return readNullableTypedVector(val, &Parcel::readDouble);
1675 }
1676
readDoubleVector(std::vector<double> * val) const1677 status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1678 return readTypedVector(val, &Parcel::readDouble);
1679 }
1680
readBoolVector(std::optional<std::vector<bool>> * val) const1681 status_t Parcel::readBoolVector(std::optional<std::vector<bool>>* val) const {
1682 const int32_t start = dataPosition();
1683 int32_t size;
1684 status_t status = readInt32(&size);
1685 val->reset();
1686
1687 if (status != OK || size < 0) {
1688 return status;
1689 }
1690
1691 setDataPosition(start);
1692 val->emplace();
1693
1694 status = readBoolVector(&**val);
1695
1696 if (status != OK) {
1697 val->reset();
1698 }
1699
1700 return status;
1701 }
1702
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1703 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1704 const int32_t start = dataPosition();
1705 int32_t size;
1706 status_t status = readInt32(&size);
1707 val->reset();
1708
1709 if (status != OK || size < 0) {
1710 return status;
1711 }
1712
1713 setDataPosition(start);
1714 val->reset(new (std::nothrow) std::vector<bool>());
1715
1716 status = readBoolVector(val->get());
1717
1718 if (status != OK) {
1719 val->reset();
1720 }
1721
1722 return status;
1723 }
1724
readBoolVector(std::vector<bool> * val) const1725 status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1726 int32_t size;
1727 status_t status = readInt32(&size);
1728
1729 if (status != OK) {
1730 return status;
1731 }
1732
1733 if (size < 0) {
1734 return UNEXPECTED_NULL;
1735 }
1736
1737 val->resize(size);
1738
1739 /* C++ bool handling means a vector of bools isn't necessarily addressable
1740 * (we might use individual bits)
1741 */
1742 bool data;
1743 for (int32_t i = 0; i < size; ++i) {
1744 status = readBool(&data);
1745 (*val)[i] = data;
1746
1747 if (status != OK) {
1748 return status;
1749 }
1750 }
1751
1752 return OK;
1753 }
1754
readCharVector(std::optional<std::vector<char16_t>> * val) const1755 status_t Parcel::readCharVector(std::optional<std::vector<char16_t>>* val) const {
1756 return readNullableTypedVector(val, &Parcel::readChar);
1757 }
1758
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1759 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1760 return readNullableTypedVector(val, &Parcel::readChar);
1761 }
1762
readCharVector(std::vector<char16_t> * val) const1763 status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1764 return readTypedVector(val, &Parcel::readChar);
1765 }
1766
readString16Vector(std::optional<std::vector<std::optional<String16>>> * val) const1767 status_t Parcel::readString16Vector(
1768 std::optional<std::vector<std::optional<String16>>>* val) const {
1769 return readNullableTypedVector(val, &Parcel::readString16);
1770 }
1771
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1772 status_t Parcel::readString16Vector(
1773 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1774 return readNullableTypedVector(val, &Parcel::readString16);
1775 }
1776
readString16Vector(std::vector<String16> * val) const1777 status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1778 return readTypedVector(val, &Parcel::readString16);
1779 }
1780
readUtf8VectorFromUtf16Vector(std::optional<std::vector<std::optional<std::string>>> * val) const1781 status_t Parcel::readUtf8VectorFromUtf16Vector(
1782 std::optional<std::vector<std::optional<std::string>>>* val) const {
1783 return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1784 }
1785
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1786 status_t Parcel::readUtf8VectorFromUtf16Vector(
1787 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1788 return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1789 }
1790
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1791 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1792 return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1793 }
1794
readInt32(int32_t * pArg) const1795 status_t Parcel::readInt32(int32_t *pArg) const
1796 {
1797 return readAligned(pArg);
1798 }
1799
readInt32() const1800 int32_t Parcel::readInt32() const
1801 {
1802 return readAligned<int32_t>();
1803 }
1804
readUint32(uint32_t * pArg) const1805 status_t Parcel::readUint32(uint32_t *pArg) const
1806 {
1807 return readAligned(pArg);
1808 }
1809
readUint32() const1810 uint32_t Parcel::readUint32() const
1811 {
1812 return readAligned<uint32_t>();
1813 }
1814
readInt64(int64_t * pArg) const1815 status_t Parcel::readInt64(int64_t *pArg) const
1816 {
1817 return readAligned(pArg);
1818 }
1819
1820
readInt64() const1821 int64_t Parcel::readInt64() const
1822 {
1823 return readAligned<int64_t>();
1824 }
1825
readUint64(uint64_t * pArg) const1826 status_t Parcel::readUint64(uint64_t *pArg) const
1827 {
1828 return readAligned(pArg);
1829 }
1830
readUint64() const1831 uint64_t Parcel::readUint64() const
1832 {
1833 return readAligned<uint64_t>();
1834 }
1835
readPointer(uintptr_t * pArg) const1836 status_t Parcel::readPointer(uintptr_t *pArg) const
1837 {
1838 status_t ret;
1839 binder_uintptr_t ptr;
1840 ret = readAligned(&ptr);
1841 if (!ret)
1842 *pArg = ptr;
1843 return ret;
1844 }
1845
readPointer() const1846 uintptr_t Parcel::readPointer() const
1847 {
1848 return readAligned<binder_uintptr_t>();
1849 }
1850
1851
readFloat(float * pArg) const1852 status_t Parcel::readFloat(float *pArg) const
1853 {
1854 return readAligned(pArg);
1855 }
1856
1857
readFloat() const1858 float Parcel::readFloat() const
1859 {
1860 return readAligned<float>();
1861 }
1862
1863 #if defined(__mips__) && defined(__mips_hard_float)
1864
readDouble(double * pArg) const1865 status_t Parcel::readDouble(double *pArg) const
1866 {
1867 union {
1868 double d;
1869 unsigned long long ll;
1870 } u;
1871 u.d = 0;
1872 status_t status;
1873 status = readAligned(&u.ll);
1874 *pArg = u.d;
1875 return status;
1876 }
1877
readDouble() const1878 double Parcel::readDouble() const
1879 {
1880 union {
1881 double d;
1882 unsigned long long ll;
1883 } u;
1884 u.ll = readAligned<unsigned long long>();
1885 return u.d;
1886 }
1887
1888 #else
1889
readDouble(double * pArg) const1890 status_t Parcel::readDouble(double *pArg) const
1891 {
1892 return readAligned(pArg);
1893 }
1894
readDouble() const1895 double Parcel::readDouble() const
1896 {
1897 return readAligned<double>();
1898 }
1899
1900 #endif
1901
readIntPtr(intptr_t * pArg) const1902 status_t Parcel::readIntPtr(intptr_t *pArg) const
1903 {
1904 return readAligned(pArg);
1905 }
1906
1907
readIntPtr() const1908 intptr_t Parcel::readIntPtr() const
1909 {
1910 return readAligned<intptr_t>();
1911 }
1912
readBool(bool * pArg) const1913 status_t Parcel::readBool(bool *pArg) const
1914 {
1915 int32_t tmp = 0;
1916 status_t ret = readInt32(&tmp);
1917 *pArg = (tmp != 0);
1918 return ret;
1919 }
1920
readBool() const1921 bool Parcel::readBool() const
1922 {
1923 return readInt32() != 0;
1924 }
1925
readChar(char16_t * pArg) const1926 status_t Parcel::readChar(char16_t *pArg) const
1927 {
1928 int32_t tmp = 0;
1929 status_t ret = readInt32(&tmp);
1930 *pArg = char16_t(tmp);
1931 return ret;
1932 }
1933
readChar() const1934 char16_t Parcel::readChar() const
1935 {
1936 return char16_t(readInt32());
1937 }
1938
readByte(int8_t * pArg) const1939 status_t Parcel::readByte(int8_t *pArg) const
1940 {
1941 int32_t tmp = 0;
1942 status_t ret = readInt32(&tmp);
1943 *pArg = int8_t(tmp);
1944 return ret;
1945 }
1946
readByte() const1947 int8_t Parcel::readByte() const
1948 {
1949 return int8_t(readInt32());
1950 }
1951
readUtf8FromUtf16(std::string * str) const1952 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
1953 size_t utf16Size = 0;
1954 const char16_t* src = readString16Inplace(&utf16Size);
1955 if (!src) {
1956 return UNEXPECTED_NULL;
1957 }
1958
1959 // Save ourselves the trouble, we're done.
1960 if (utf16Size == 0u) {
1961 str->clear();
1962 return NO_ERROR;
1963 }
1964
1965 // Allow for closing '\0'
1966 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
1967 if (utf8Size < 1) {
1968 return BAD_VALUE;
1969 }
1970 // Note that while it is probably safe to assume string::resize keeps a
1971 // spare byte around for the trailing null, we still pass the size including the trailing null
1972 str->resize(utf8Size);
1973 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
1974 str->resize(utf8Size - 1);
1975 return NO_ERROR;
1976 }
1977
readUtf8FromUtf16(std::optional<std::string> * str) const1978 status_t Parcel::readUtf8FromUtf16(std::optional<std::string>* str) const {
1979 const int32_t start = dataPosition();
1980 int32_t size;
1981 status_t status = readInt32(&size);
1982 str->reset();
1983
1984 if (status != OK || size < 0) {
1985 return status;
1986 }
1987
1988 setDataPosition(start);
1989 str->emplace();
1990 return readUtf8FromUtf16(&**str);
1991 }
1992
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1993 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
1994 const int32_t start = dataPosition();
1995 int32_t size;
1996 status_t status = readInt32(&size);
1997 str->reset();
1998
1999 if (status != OK || size < 0) {
2000 return status;
2001 }
2002
2003 setDataPosition(start);
2004 str->reset(new (std::nothrow) std::string());
2005 return readUtf8FromUtf16(str->get());
2006 }
2007
readCString() const2008 const char* Parcel::readCString() const
2009 {
2010 if (mDataPos < mDataSize) {
2011 const size_t avail = mDataSize-mDataPos;
2012 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2013 // is the string's trailing NUL within the parcel's valid bounds?
2014 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2015 if (eos) {
2016 const size_t len = eos - str;
2017 mDataPos += pad_size(len+1);
2018 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
2019 return str;
2020 }
2021 }
2022 return nullptr;
2023 }
2024
readString8() const2025 String8 Parcel::readString8() const
2026 {
2027 String8 retString;
2028 status_t status = readString8(&retString);
2029 if (status != OK) {
2030 // We don't care about errors here, so just return an empty string.
2031 return String8();
2032 }
2033 return retString;
2034 }
2035
readString8(String8 * pArg) const2036 status_t Parcel::readString8(String8* pArg) const
2037 {
2038 int32_t size;
2039 status_t status = readInt32(&size);
2040 if (status != OK) {
2041 return status;
2042 }
2043 // watch for potential int overflow from size+1
2044 if (size < 0 || size >= INT32_MAX) {
2045 return BAD_VALUE;
2046 }
2047 // |writeString8| writes nothing for empty string.
2048 if (size == 0) {
2049 *pArg = String8();
2050 return OK;
2051 }
2052 const char* str = (const char*)readInplace(size + 1);
2053 if (str == nullptr) {
2054 return BAD_VALUE;
2055 }
2056 pArg->setTo(str, size);
2057 return OK;
2058 }
2059
readString16() const2060 String16 Parcel::readString16() const
2061 {
2062 size_t len;
2063 const char16_t* str = readString16Inplace(&len);
2064 if (str) return String16(str, len);
2065 ALOGE("Reading a NULL string not supported here.");
2066 return String16();
2067 }
2068
readString16(std::optional<String16> * pArg) const2069 status_t Parcel::readString16(std::optional<String16>* pArg) const
2070 {
2071 const int32_t start = dataPosition();
2072 int32_t size;
2073 status_t status = readInt32(&size);
2074 pArg->reset();
2075
2076 if (status != OK || size < 0) {
2077 return status;
2078 }
2079
2080 setDataPosition(start);
2081 pArg->emplace();
2082
2083 status = readString16(&**pArg);
2084
2085 if (status != OK) {
2086 pArg->reset();
2087 }
2088
2089 return status;
2090 }
2091
readString16(std::unique_ptr<String16> * pArg) const2092 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
2093 {
2094 const int32_t start = dataPosition();
2095 int32_t size;
2096 status_t status = readInt32(&size);
2097 pArg->reset();
2098
2099 if (status != OK || size < 0) {
2100 return status;
2101 }
2102
2103 setDataPosition(start);
2104 pArg->reset(new (std::nothrow) String16());
2105
2106 status = readString16(pArg->get());
2107
2108 if (status != OK) {
2109 pArg->reset();
2110 }
2111
2112 return status;
2113 }
2114
readString16(String16 * pArg) const2115 status_t Parcel::readString16(String16* pArg) const
2116 {
2117 size_t len;
2118 const char16_t* str = readString16Inplace(&len);
2119 if (str) {
2120 pArg->setTo(str, len);
2121 return 0;
2122 } else {
2123 *pArg = String16();
2124 return UNEXPECTED_NULL;
2125 }
2126 }
2127
readString16Inplace(size_t * outLen) const2128 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2129 {
2130 int32_t size = readInt32();
2131 // watch for potential int overflow from size+1
2132 if (size >= 0 && size < INT32_MAX) {
2133 *outLen = size;
2134 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2135 if (str != nullptr) {
2136 return str;
2137 }
2138 }
2139 *outLen = 0;
2140 return nullptr;
2141 }
2142
readStrongBinder(sp<IBinder> * val) const2143 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2144 {
2145 status_t status = readNullableStrongBinder(val);
2146 if (status == OK && !val->get()) {
2147 status = UNEXPECTED_NULL;
2148 }
2149 return status;
2150 }
2151
readNullableStrongBinder(sp<IBinder> * val) const2152 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2153 {
2154 return unflattenBinder(val);
2155 }
2156
readStrongBinder() const2157 sp<IBinder> Parcel::readStrongBinder() const
2158 {
2159 sp<IBinder> val;
2160 // Note that a lot of code in Android reads binders by hand with this
2161 // method, and that code has historically been ok with getting nullptr
2162 // back (while ignoring error codes).
2163 readNullableStrongBinder(&val);
2164 return val;
2165 }
2166
readParcelable(Parcelable * parcelable) const2167 status_t Parcel::readParcelable(Parcelable* parcelable) const {
2168 int32_t have_parcelable = 0;
2169 status_t status = readInt32(&have_parcelable);
2170 if (status != OK) {
2171 return status;
2172 }
2173 if (!have_parcelable) {
2174 return UNEXPECTED_NULL;
2175 }
2176 return parcelable->readFromParcel(this);
2177 }
2178
readExceptionCode() const2179 int32_t Parcel::readExceptionCode() const
2180 {
2181 binder::Status status;
2182 status.readFromParcel(*this);
2183 return status.exceptionCode();
2184 }
2185
readNativeHandle() const2186 native_handle* Parcel::readNativeHandle() const
2187 {
2188 int numFds, numInts;
2189 status_t err;
2190 err = readInt32(&numFds);
2191 if (err != NO_ERROR) return nullptr;
2192 err = readInt32(&numInts);
2193 if (err != NO_ERROR) return nullptr;
2194
2195 native_handle* h = native_handle_create(numFds, numInts);
2196 if (!h) {
2197 return nullptr;
2198 }
2199
2200 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2201 h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2202 if (h->data[i] < 0) {
2203 for (int j = 0; j < i; j++) {
2204 close(h->data[j]);
2205 }
2206 native_handle_delete(h);
2207 return nullptr;
2208 }
2209 }
2210 err = read(h->data + numFds, sizeof(int)*numInts);
2211 if (err != NO_ERROR) {
2212 native_handle_close(h);
2213 native_handle_delete(h);
2214 h = nullptr;
2215 }
2216 return h;
2217 }
2218
readFileDescriptor() const2219 int Parcel::readFileDescriptor() const
2220 {
2221 const flat_binder_object* flat = readObject(true);
2222
2223 if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2224 return flat->handle;
2225 }
2226
2227 return BAD_TYPE;
2228 }
2229
readParcelFileDescriptor() const2230 int Parcel::readParcelFileDescriptor() const
2231 {
2232 int32_t hasComm = readInt32();
2233 int fd = readFileDescriptor();
2234 if (hasComm != 0) {
2235 // detach (owned by the binder driver)
2236 int comm = readFileDescriptor();
2237
2238 // warning: this must be kept in sync with:
2239 // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2240 enum ParcelFileDescriptorStatus {
2241 DETACHED = 2,
2242 };
2243
2244 #if BYTE_ORDER == BIG_ENDIAN
2245 const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2246 #endif
2247 #if BYTE_ORDER == LITTLE_ENDIAN
2248 const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2249 #endif
2250
2251 ssize_t written = TEMP_FAILURE_RETRY(
2252 ::write(comm, &message, sizeof(message)));
2253
2254 if (written == -1 || written != sizeof(message)) {
2255 ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2256 written, strerror(errno));
2257 return BAD_TYPE;
2258 }
2259 }
2260 return fd;
2261 }
2262
readUniqueFileDescriptor(base::unique_fd * val) const2263 status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const
2264 {
2265 int got = readFileDescriptor();
2266
2267 if (got == BAD_TYPE) {
2268 return BAD_TYPE;
2269 }
2270
2271 val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2272
2273 if (val->get() < 0) {
2274 return BAD_VALUE;
2275 }
2276
2277 return OK;
2278 }
2279
readUniqueParcelFileDescriptor(base::unique_fd * val) const2280 status_t Parcel::readUniqueParcelFileDescriptor(base::unique_fd* val) const
2281 {
2282 int got = readParcelFileDescriptor();
2283
2284 if (got == BAD_TYPE) {
2285 return BAD_TYPE;
2286 }
2287
2288 val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2289
2290 if (val->get() < 0) {
2291 return BAD_VALUE;
2292 }
2293
2294 return OK;
2295 }
2296
readUniqueFileDescriptorVector(std::optional<std::vector<base::unique_fd>> * val) const2297 status_t Parcel::readUniqueFileDescriptorVector(std::optional<std::vector<base::unique_fd>>* val) const {
2298 return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2299 }
2300
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>> * val) const2301 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const {
2302 return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2303 }
2304
readUniqueFileDescriptorVector(std::vector<base::unique_fd> * val) const2305 status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const {
2306 return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2307 }
2308
readBlob(size_t len,ReadableBlob * outBlob) const2309 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2310 {
2311 int32_t blobType;
2312 status_t status = readInt32(&blobType);
2313 if (status) return status;
2314
2315 if (blobType == BLOB_INPLACE) {
2316 ALOGV("readBlob: read in place");
2317 const void* ptr = readInplace(len);
2318 if (!ptr) return BAD_VALUE;
2319
2320 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2321 return NO_ERROR;
2322 }
2323
2324 ALOGV("readBlob: read from ashmem");
2325 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2326 int fd = readFileDescriptor();
2327 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2328
2329 if (!ashmem_valid(fd)) {
2330 ALOGE("invalid fd");
2331 return BAD_VALUE;
2332 }
2333 int size = ashmem_get_size_region(fd);
2334 if (size < 0 || size_t(size) < len) {
2335 ALOGE("request size %zu does not match fd size %d", len, size);
2336 return BAD_VALUE;
2337 }
2338 void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2339 MAP_SHARED, fd, 0);
2340 if (ptr == MAP_FAILED) return NO_MEMORY;
2341
2342 outBlob->init(fd, ptr, len, isMutable);
2343 return NO_ERROR;
2344 }
2345
read(FlattenableHelperInterface & val) const2346 status_t Parcel::read(FlattenableHelperInterface& val) const
2347 {
2348 // size
2349 const size_t len = this->readInt32();
2350 const size_t fd_count = this->readInt32();
2351
2352 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2353 // don't accept size_t values which may have come from an
2354 // inadvertent conversion from a negative int.
2355 return BAD_VALUE;
2356 }
2357
2358 // payload
2359 void const* const buf = this->readInplace(pad_size(len));
2360 if (buf == nullptr)
2361 return BAD_VALUE;
2362
2363 int* fds = nullptr;
2364 if (fd_count) {
2365 fds = new (std::nothrow) int[fd_count];
2366 if (fds == nullptr) {
2367 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2368 return BAD_VALUE;
2369 }
2370 }
2371
2372 status_t err = NO_ERROR;
2373 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2374 int fd = this->readFileDescriptor();
2375 if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2376 err = BAD_VALUE;
2377 ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2378 i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2379 // Close all the file descriptors that were dup-ed.
2380 for (size_t j=0; j<i ;j++) {
2381 close(fds[j]);
2382 }
2383 }
2384 }
2385
2386 if (err == NO_ERROR) {
2387 err = val.unflatten(buf, len, fds, fd_count);
2388 }
2389
2390 if (fd_count) {
2391 delete [] fds;
2392 }
2393
2394 return err;
2395 }
readObject(bool nullMetaData) const2396 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2397 {
2398 const size_t DPOS = mDataPos;
2399 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2400 const flat_binder_object* obj
2401 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2402 mDataPos = DPOS + sizeof(flat_binder_object);
2403 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2404 // When transferring a NULL object, we don't write it into
2405 // the object list, so we don't want to check for it when
2406 // reading.
2407 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2408 return obj;
2409 }
2410
2411 // Ensure that this object is valid...
2412 binder_size_t* const OBJS = mObjects;
2413 const size_t N = mObjectsSize;
2414 size_t opos = mNextObjectHint;
2415
2416 if (N > 0) {
2417 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2418 this, DPOS, opos);
2419
2420 // Start at the current hint position, looking for an object at
2421 // the current data position.
2422 if (opos < N) {
2423 while (opos < (N-1) && OBJS[opos] < DPOS) {
2424 opos++;
2425 }
2426 } else {
2427 opos = N-1;
2428 }
2429 if (OBJS[opos] == DPOS) {
2430 // Found it!
2431 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2432 this, DPOS, opos);
2433 mNextObjectHint = opos+1;
2434 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2435 return obj;
2436 }
2437
2438 // Look backwards for it...
2439 while (opos > 0 && OBJS[opos] > DPOS) {
2440 opos--;
2441 }
2442 if (OBJS[opos] == DPOS) {
2443 // Found it!
2444 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2445 this, DPOS, opos);
2446 mNextObjectHint = opos+1;
2447 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2448 return obj;
2449 }
2450 }
2451 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2452 this, DPOS);
2453 }
2454 return nullptr;
2455 }
2456
closeFileDescriptors()2457 void Parcel::closeFileDescriptors()
2458 {
2459 size_t i = mObjectsSize;
2460 if (i > 0) {
2461 //ALOGI("Closing file descriptors for %zu objects...", i);
2462 }
2463 while (i > 0) {
2464 i--;
2465 const flat_binder_object* flat
2466 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2467 if (flat->hdr.type == BINDER_TYPE_FD) {
2468 //ALOGI("Closing fd: %ld", flat->handle);
2469 close(flat->handle);
2470 }
2471 }
2472 }
2473
ipcData() const2474 uintptr_t Parcel::ipcData() const
2475 {
2476 return reinterpret_cast<uintptr_t>(mData);
2477 }
2478
ipcDataSize() const2479 size_t Parcel::ipcDataSize() const
2480 {
2481 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2482 }
2483
ipcObjects() const2484 uintptr_t Parcel::ipcObjects() const
2485 {
2486 return reinterpret_cast<uintptr_t>(mObjects);
2487 }
2488
ipcObjectsCount() const2489 size_t Parcel::ipcObjectsCount() const
2490 {
2491 return mObjectsSize;
2492 }
2493
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)2494 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2495 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2496 {
2497 binder_size_t minOffset = 0;
2498 freeDataNoInit();
2499 mError = NO_ERROR;
2500 mData = const_cast<uint8_t*>(data);
2501 mDataSize = mDataCapacity = dataSize;
2502 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2503 mDataPos = 0;
2504 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2505 mObjects = const_cast<binder_size_t*>(objects);
2506 mObjectsSize = mObjectsCapacity = objectsCount;
2507 mNextObjectHint = 0;
2508 mObjectsSorted = false;
2509 mOwner = relFunc;
2510 mOwnerCookie = relCookie;
2511 for (size_t i = 0; i < mObjectsSize; i++) {
2512 binder_size_t offset = mObjects[i];
2513 if (offset < minOffset) {
2514 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2515 __func__, (uint64_t)offset, (uint64_t)minOffset);
2516 mObjectsSize = 0;
2517 break;
2518 }
2519 minOffset = offset + sizeof(flat_binder_object);
2520 }
2521 scanForFds();
2522 }
2523
print(TextOutput & to,uint32_t) const2524 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2525 {
2526 to << "Parcel(";
2527
2528 if (errorCheck() != NO_ERROR) {
2529 const status_t err = errorCheck();
2530 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2531 } else if (dataSize() > 0) {
2532 const uint8_t* DATA = data();
2533 to << indent << HexDump(DATA, dataSize()) << dedent;
2534 const binder_size_t* OBJS = mObjects;
2535 const size_t N = objectsCount();
2536 for (size_t i=0; i<N; i++) {
2537 const flat_binder_object* flat
2538 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2539 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2540 << TypeCode(flat->hdr.type & 0x7f7f7f00)
2541 << " = " << flat->binder;
2542 }
2543 } else {
2544 to << "NULL";
2545 }
2546
2547 to << ")";
2548 }
2549
releaseObjects()2550 void Parcel::releaseObjects()
2551 {
2552 size_t i = mObjectsSize;
2553 if (i == 0) {
2554 return;
2555 }
2556 sp<ProcessState> proc(ProcessState::self());
2557 uint8_t* const data = mData;
2558 binder_size_t* const objects = mObjects;
2559 while (i > 0) {
2560 i--;
2561 const flat_binder_object* flat
2562 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2563 release_object(proc, *flat, this, &mOpenAshmemSize);
2564 }
2565 }
2566
acquireObjects()2567 void Parcel::acquireObjects()
2568 {
2569 size_t i = mObjectsSize;
2570 if (i == 0) {
2571 return;
2572 }
2573 const sp<ProcessState> proc(ProcessState::self());
2574 uint8_t* const data = mData;
2575 binder_size_t* const objects = mObjects;
2576 while (i > 0) {
2577 i--;
2578 const flat_binder_object* flat
2579 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2580 acquire_object(proc, *flat, this, &mOpenAshmemSize);
2581 }
2582 }
2583
freeData()2584 void Parcel::freeData()
2585 {
2586 freeDataNoInit();
2587 initState();
2588 }
2589
freeDataNoInit()2590 void Parcel::freeDataNoInit()
2591 {
2592 if (mOwner) {
2593 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2594 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2595 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2596 } else {
2597 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2598 releaseObjects();
2599 if (mData) {
2600 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2601 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2602 if (mDataCapacity <= gParcelGlobalAllocSize) {
2603 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2604 } else {
2605 gParcelGlobalAllocSize = 0;
2606 }
2607 if (gParcelGlobalAllocCount > 0) {
2608 gParcelGlobalAllocCount--;
2609 }
2610 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2611 free(mData);
2612 }
2613 if (mObjects) free(mObjects);
2614 }
2615 }
2616
growData(size_t len)2617 status_t Parcel::growData(size_t len)
2618 {
2619 if (len > INT32_MAX) {
2620 // don't accept size_t values which may have come from an
2621 // inadvertent conversion from a negative int.
2622 return BAD_VALUE;
2623 }
2624
2625 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
2626 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
2627 size_t newSize = ((mDataSize+len)*3)/2;
2628 return (newSize <= mDataSize)
2629 ? (status_t) NO_MEMORY
2630 : continueWrite(std::max(newSize, (size_t) 128));
2631 }
2632
restartWrite(size_t desired)2633 status_t Parcel::restartWrite(size_t desired)
2634 {
2635 if (desired > INT32_MAX) {
2636 // don't accept size_t values which may have come from an
2637 // inadvertent conversion from a negative int.
2638 return BAD_VALUE;
2639 }
2640
2641 if (mOwner) {
2642 freeData();
2643 return continueWrite(desired);
2644 }
2645
2646 uint8_t* data = (uint8_t*)realloc(mData, desired);
2647 if (!data && desired > mDataCapacity) {
2648 mError = NO_MEMORY;
2649 return NO_MEMORY;
2650 }
2651
2652 releaseObjects();
2653
2654 if (data) {
2655 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2656 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2657 gParcelGlobalAllocSize += desired;
2658 gParcelGlobalAllocSize -= mDataCapacity;
2659 if (!mData) {
2660 gParcelGlobalAllocCount++;
2661 }
2662 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2663 mData = data;
2664 mDataCapacity = desired;
2665 }
2666
2667 mDataSize = mDataPos = 0;
2668 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2669 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2670
2671 free(mObjects);
2672 mObjects = nullptr;
2673 mObjectsSize = mObjectsCapacity = 0;
2674 mNextObjectHint = 0;
2675 mObjectsSorted = false;
2676 mHasFds = false;
2677 mFdsKnown = true;
2678 mAllowFds = true;
2679
2680 return NO_ERROR;
2681 }
2682
continueWrite(size_t desired)2683 status_t Parcel::continueWrite(size_t desired)
2684 {
2685 if (desired > INT32_MAX) {
2686 // don't accept size_t values which may have come from an
2687 // inadvertent conversion from a negative int.
2688 return BAD_VALUE;
2689 }
2690
2691 // If shrinking, first adjust for any objects that appear
2692 // after the new data size.
2693 size_t objectsSize = mObjectsSize;
2694 if (desired < mDataSize) {
2695 if (desired == 0) {
2696 objectsSize = 0;
2697 } else {
2698 while (objectsSize > 0) {
2699 if (mObjects[objectsSize-1] < desired)
2700 break;
2701 objectsSize--;
2702 }
2703 }
2704 }
2705
2706 if (mOwner) {
2707 // If the size is going to zero, just release the owner's data.
2708 if (desired == 0) {
2709 freeData();
2710 return NO_ERROR;
2711 }
2712
2713 // If there is a different owner, we need to take
2714 // posession.
2715 uint8_t* data = (uint8_t*)malloc(desired);
2716 if (!data) {
2717 mError = NO_MEMORY;
2718 return NO_MEMORY;
2719 }
2720 binder_size_t* objects = nullptr;
2721
2722 if (objectsSize) {
2723 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2724 if (!objects) {
2725 free(data);
2726
2727 mError = NO_MEMORY;
2728 return NO_MEMORY;
2729 }
2730
2731 // Little hack to only acquire references on objects
2732 // we will be keeping.
2733 size_t oldObjectsSize = mObjectsSize;
2734 mObjectsSize = objectsSize;
2735 acquireObjects();
2736 mObjectsSize = oldObjectsSize;
2737 }
2738
2739 if (mData) {
2740 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2741 }
2742 if (objects && mObjects) {
2743 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2744 }
2745 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2746 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2747 mOwner = nullptr;
2748
2749 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2750 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2751 gParcelGlobalAllocSize += desired;
2752 gParcelGlobalAllocCount++;
2753 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2754
2755 mData = data;
2756 mObjects = objects;
2757 mDataSize = (mDataSize < desired) ? mDataSize : desired;
2758 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2759 mDataCapacity = desired;
2760 mObjectsSize = mObjectsCapacity = objectsSize;
2761 mNextObjectHint = 0;
2762 mObjectsSorted = false;
2763
2764 } else if (mData) {
2765 if (objectsSize < mObjectsSize) {
2766 // Need to release refs on any objects we are dropping.
2767 const sp<ProcessState> proc(ProcessState::self());
2768 for (size_t i=objectsSize; i<mObjectsSize; i++) {
2769 const flat_binder_object* flat
2770 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2771 if (flat->hdr.type == BINDER_TYPE_FD) {
2772 // will need to rescan because we may have lopped off the only FDs
2773 mFdsKnown = false;
2774 }
2775 release_object(proc, *flat, this, &mOpenAshmemSize);
2776 }
2777
2778 if (objectsSize == 0) {
2779 free(mObjects);
2780 mObjects = nullptr;
2781 mObjectsCapacity = 0;
2782 } else {
2783 binder_size_t* objects =
2784 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2785 if (objects) {
2786 mObjects = objects;
2787 mObjectsCapacity = objectsSize;
2788 }
2789 }
2790 mObjectsSize = objectsSize;
2791 mNextObjectHint = 0;
2792 mObjectsSorted = false;
2793 }
2794
2795 // We own the data, so we can just do a realloc().
2796 if (desired > mDataCapacity) {
2797 uint8_t* data = (uint8_t*)realloc(mData, desired);
2798 if (data) {
2799 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2800 desired);
2801 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2802 gParcelGlobalAllocSize += desired;
2803 gParcelGlobalAllocSize -= mDataCapacity;
2804 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2805 mData = data;
2806 mDataCapacity = desired;
2807 } else {
2808 mError = NO_MEMORY;
2809 return NO_MEMORY;
2810 }
2811 } else {
2812 if (mDataSize > desired) {
2813 mDataSize = desired;
2814 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2815 }
2816 if (mDataPos > desired) {
2817 mDataPos = desired;
2818 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2819 }
2820 }
2821
2822 } else {
2823 // This is the first data. Easy!
2824 uint8_t* data = (uint8_t*)malloc(desired);
2825 if (!data) {
2826 mError = NO_MEMORY;
2827 return NO_MEMORY;
2828 }
2829
2830 if(!(mDataCapacity == 0 && mObjects == nullptr
2831 && mObjectsCapacity == 0)) {
2832 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2833 }
2834
2835 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2836 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2837 gParcelGlobalAllocSize += desired;
2838 gParcelGlobalAllocCount++;
2839 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2840
2841 mData = data;
2842 mDataSize = mDataPos = 0;
2843 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2844 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2845 mDataCapacity = desired;
2846 }
2847
2848 return NO_ERROR;
2849 }
2850
initState()2851 void Parcel::initState()
2852 {
2853 LOG_ALLOC("Parcel %p: initState", this);
2854 mError = NO_ERROR;
2855 mData = nullptr;
2856 mDataSize = 0;
2857 mDataCapacity = 0;
2858 mDataPos = 0;
2859 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2860 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2861 mObjects = nullptr;
2862 mObjectsSize = 0;
2863 mObjectsCapacity = 0;
2864 mNextObjectHint = 0;
2865 mObjectsSorted = false;
2866 mHasFds = false;
2867 mFdsKnown = true;
2868 mAllowFds = true;
2869 mOwner = nullptr;
2870 mOpenAshmemSize = 0;
2871 mWorkSourceRequestHeaderPosition = 0;
2872 mRequestHeaderPresent = false;
2873
2874 // racing multiple init leads only to multiple identical write
2875 if (gMaxFds == 0) {
2876 struct rlimit result;
2877 if (!getrlimit(RLIMIT_NOFILE, &result)) {
2878 gMaxFds = (size_t)result.rlim_cur;
2879 //ALOGI("parcel fd limit set to %zu", gMaxFds);
2880 } else {
2881 ALOGW("Unable to getrlimit: %s", strerror(errno));
2882 gMaxFds = 1024;
2883 }
2884 }
2885 }
2886
scanForFds() const2887 void Parcel::scanForFds() const
2888 {
2889 bool hasFds = false;
2890 for (size_t i=0; i<mObjectsSize; i++) {
2891 const flat_binder_object* flat
2892 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2893 if (flat->hdr.type == BINDER_TYPE_FD) {
2894 hasFds = true;
2895 break;
2896 }
2897 }
2898 mHasFds = hasFds;
2899 mFdsKnown = true;
2900 }
2901
getBlobAshmemSize() const2902 size_t Parcel::getBlobAshmemSize() const
2903 {
2904 // This used to return the size of all blobs that were written to ashmem, now we're returning
2905 // the ashmem currently referenced by this Parcel, which should be equivalent.
2906 // TODO: Remove method once ABI can be changed.
2907 return mOpenAshmemSize;
2908 }
2909
getOpenAshmemSize() const2910 size_t Parcel::getOpenAshmemSize() const
2911 {
2912 return mOpenAshmemSize;
2913 }
2914
2915 // --- Parcel::Blob ---
2916
Blob()2917 Parcel::Blob::Blob() :
2918 mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
2919 }
2920
~Blob()2921 Parcel::Blob::~Blob() {
2922 release();
2923 }
2924
release()2925 void Parcel::Blob::release() {
2926 if (mFd != -1 && mData) {
2927 ::munmap(mData, mSize);
2928 }
2929 clear();
2930 }
2931
init(int fd,void * data,size_t size,bool isMutable)2932 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2933 mFd = fd;
2934 mData = data;
2935 mSize = size;
2936 mMutable = isMutable;
2937 }
2938
clear()2939 void Parcel::Blob::clear() {
2940 mFd = -1;
2941 mData = nullptr;
2942 mSize = 0;
2943 mMutable = false;
2944 }
2945
2946 } // namespace android
2947