1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "IMemory"
18
19 #include <atomic>
20 #include <stdatomic.h>
21
22 #include <fcntl.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28 #include <unistd.h>
29
30 #include <binder/IMemory.h>
31 #include <binder/Parcel.h>
32 #include <log/log.h>
33
34 #include <utils/KeyedVector.h>
35 #include <utils/threads.h>
36
37 #define VERBOSE 0
38
39 namespace android {
40 // ---------------------------------------------------------------------------
41
42 class HeapCache : public IBinder::DeathRecipient
43 {
44 public:
45 HeapCache();
46 virtual ~HeapCache();
47
48 virtual void binderDied(const wp<IBinder>& who);
49
50 sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
51 void free_heap(const sp<IBinder>& binder);
52 sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
53 void dump_heaps();
54
55 private:
56 // For IMemory.cpp
57 struct heap_info_t {
58 sp<IMemoryHeap> heap;
59 int32_t count;
60 // Note that this cannot be meaningfully copied.
61 };
62
63 void free_heap(const wp<IBinder>& binder);
64
65 Mutex mHeapCacheLock; // Protects entire vector below.
66 KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
67 // We do not use the copy-on-write capabilities of KeyedVector.
68 // TODO: Reimplemement based on standard C++ container?
69 };
70
71 static sp<HeapCache> gHeapCache = new HeapCache();
72
73 /******************************************************************************/
74
75 enum {
76 HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
77 };
78
79 class BpMemoryHeap : public BpInterface<IMemoryHeap>
80 {
81 public:
82 explicit BpMemoryHeap(const sp<IBinder>& impl);
83 virtual ~BpMemoryHeap();
84
85 int getHeapID() const override;
86 void* getBase() const override;
87 size_t getSize() const override;
88 uint32_t getFlags() const override;
89 off_t getOffset() const override;
90
91 private:
92 friend class IMemory;
93 friend class HeapCache;
94
95 // for debugging in this module
find_heap(const sp<IBinder> & binder)96 static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
97 return gHeapCache->find_heap(binder);
98 }
free_heap(const sp<IBinder> & binder)99 static inline void free_heap(const sp<IBinder>& binder) {
100 gHeapCache->free_heap(binder);
101 }
get_heap(const sp<IBinder> & binder)102 static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
103 return gHeapCache->get_heap(binder);
104 }
dump_heaps()105 static inline void dump_heaps() {
106 gHeapCache->dump_heaps();
107 }
108
109 void assertMapped() const;
110 void assertReallyMapped() const;
111
112 mutable std::atomic<int32_t> mHeapId;
113 mutable void* mBase;
114 mutable size_t mSize;
115 mutable uint32_t mFlags;
116 mutable off_t mOffset;
117 mutable bool mRealHeap;
118 mutable Mutex mLock;
119 };
120
121 // ----------------------------------------------------------------------------
122
123 enum {
124 GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
125 };
126
127 class BpMemory : public BpInterface<IMemory>
128 {
129 public:
130 explicit BpMemory(const sp<IBinder>& impl);
131 virtual ~BpMemory();
132 // NOLINTNEXTLINE(google-default-arguments)
133 virtual sp<IMemoryHeap> getMemory(ssize_t* offset=nullptr, size_t* size=nullptr) const;
134
135 private:
136 mutable sp<IMemoryHeap> mHeap;
137 mutable ssize_t mOffset;
138 mutable size_t mSize;
139 };
140
141 /******************************************************************************/
142
fastPointer(const sp<IBinder> & binder,ssize_t offset) const143 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
144 {
145 sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
146 void* const base = realHeap->base();
147 if (base == MAP_FAILED)
148 return nullptr;
149 return static_cast<char*>(base) + offset;
150 }
151
unsecurePointer() const152 void* IMemory::unsecurePointer() const {
153 return pointer();
154 }
155
pointer() const156 void* IMemory::pointer() const {
157 ssize_t offset;
158 sp<IMemoryHeap> heap = getMemory(&offset);
159 void* const base = heap!=nullptr ? heap->base() : MAP_FAILED;
160 if (base == MAP_FAILED)
161 return nullptr;
162 return static_cast<char*>(base) + offset;
163 }
164
size() const165 size_t IMemory::size() const {
166 size_t size;
167 getMemory(nullptr, &size);
168 return size;
169 }
170
offset() const171 ssize_t IMemory::offset() const {
172 ssize_t offset;
173 getMemory(&offset);
174 return offset;
175 }
176
177 /******************************************************************************/
178
BpMemory(const sp<IBinder> & impl)179 BpMemory::BpMemory(const sp<IBinder>& impl)
180 : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
181 {
182 }
183
~BpMemory()184 BpMemory::~BpMemory()
185 {
186 }
187
188 // NOLINTNEXTLINE(google-default-arguments)
getMemory(ssize_t * offset,size_t * size) const189 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
190 {
191 if (mHeap == nullptr) {
192 Parcel data, reply;
193 data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
194 if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
195 sp<IBinder> heap = reply.readStrongBinder();
196 if (heap != nullptr) {
197 mHeap = interface_cast<IMemoryHeap>(heap);
198 if (mHeap != nullptr) {
199 const int64_t offset64 = reply.readInt64();
200 const uint64_t size64 = reply.readUint64();
201 const ssize_t o = (ssize_t)offset64;
202 const size_t s = (size_t)size64;
203 size_t heapSize = mHeap->getSize();
204 if (s == size64 && o == offset64 // ILP32 bounds check
205 && s <= heapSize
206 && o >= 0
207 && (static_cast<size_t>(o) <= heapSize - s)) {
208 mOffset = o;
209 mSize = s;
210 } else {
211 // Hm.
212 android_errorWriteWithInfoLog(0x534e4554,
213 "26877992", -1, nullptr, 0);
214 mOffset = 0;
215 mSize = 0;
216 }
217 }
218 }
219 }
220 }
221 if (offset) *offset = mOffset;
222 if (size) *size = mSize;
223 return (mSize > 0) ? mHeap : nullptr;
224 }
225
226 // ---------------------------------------------------------------------------
227
228 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory");
229
BnMemory()230 BnMemory::BnMemory() {
231 }
232
~BnMemory()233 BnMemory::~BnMemory() {
234 }
235
236 // NOLINTNEXTLINE(google-default-arguments)
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)237 status_t BnMemory::onTransact(
238 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
239 {
240 switch(code) {
241 case GET_MEMORY: {
242 CHECK_INTERFACE(IMemory, data, reply);
243 ssize_t offset;
244 size_t size;
245 reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
246 reply->writeInt64(offset);
247 reply->writeUint64(size);
248 return NO_ERROR;
249 } break;
250 default:
251 return BBinder::onTransact(code, data, reply, flags);
252 }
253 }
254
255
256 /******************************************************************************/
257
BpMemoryHeap(const sp<IBinder> & impl)258 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
259 : BpInterface<IMemoryHeap>(impl),
260 mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
261 {
262 }
263
~BpMemoryHeap()264 BpMemoryHeap::~BpMemoryHeap() {
265 int32_t heapId = mHeapId.load(memory_order_relaxed);
266 if (heapId != -1) {
267 close(heapId);
268 if (mRealHeap) {
269 // by construction we're the last one
270 if (mBase != MAP_FAILED) {
271 sp<IBinder> binder = IInterface::asBinder(this);
272
273 if (VERBOSE) {
274 ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
275 binder.get(), this, mSize, heapId);
276 }
277
278 munmap(mBase, mSize);
279 }
280 } else {
281 // remove from list only if it was mapped before
282 sp<IBinder> binder = IInterface::asBinder(this);
283 free_heap(binder);
284 }
285 }
286 }
287
assertMapped() const288 void BpMemoryHeap::assertMapped() const
289 {
290 int32_t heapId = mHeapId.load(memory_order_acquire);
291 if (heapId == -1) {
292 sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
293 sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
294 heap->assertReallyMapped();
295 if (heap->mBase != MAP_FAILED) {
296 Mutex::Autolock _l(mLock);
297 if (mHeapId.load(memory_order_relaxed) == -1) {
298 mBase = heap->mBase;
299 mSize = heap->mSize;
300 mOffset = heap->mOffset;
301 int fd = fcntl(heap->mHeapId.load(memory_order_relaxed), F_DUPFD_CLOEXEC, 0);
302 ALOGE_IF(fd==-1, "cannot dup fd=%d",
303 heap->mHeapId.load(memory_order_relaxed));
304 mHeapId.store(fd, memory_order_release);
305 }
306 } else {
307 // something went wrong
308 free_heap(binder);
309 }
310 }
311 }
312
assertReallyMapped() const313 void BpMemoryHeap::assertReallyMapped() const
314 {
315 int32_t heapId = mHeapId.load(memory_order_acquire);
316 if (heapId == -1) {
317
318 // remote call without mLock held, worse case scenario, we end up
319 // calling transact() from multiple threads, but that's not a problem,
320 // only mmap below must be in the critical section.
321
322 Parcel data, reply;
323 data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
324 status_t err = remote()->transact(HEAP_ID, data, &reply);
325 int parcel_fd = reply.readFileDescriptor();
326 const uint64_t size64 = reply.readUint64();
327 const int64_t offset64 = reply.readInt64();
328 const uint32_t flags = reply.readUint32();
329 const size_t size = (size_t)size64;
330 const off_t offset = (off_t)offset64;
331 if (err != NO_ERROR || // failed transaction
332 size != size64 || offset != offset64) { // ILP32 size check
333 ALOGE("binder=%p transaction failed fd=%d, size=%zu, err=%d (%s)",
334 IInterface::asBinder(this).get(),
335 parcel_fd, size, err, strerror(-err));
336 return;
337 }
338
339 Mutex::Autolock _l(mLock);
340 if (mHeapId.load(memory_order_relaxed) == -1) {
341 int fd = fcntl(parcel_fd, F_DUPFD_CLOEXEC, 0);
342 ALOGE_IF(fd == -1, "cannot dup fd=%d, size=%zu, err=%d (%s)",
343 parcel_fd, size, err, strerror(errno));
344
345 int access = PROT_READ;
346 if (!(flags & READ_ONLY)) {
347 access |= PROT_WRITE;
348 }
349 mRealHeap = true;
350 mBase = mmap(nullptr, size, access, MAP_SHARED, fd, offset);
351 if (mBase == MAP_FAILED) {
352 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zu, fd=%d (%s)",
353 IInterface::asBinder(this).get(), size, fd, strerror(errno));
354 close(fd);
355 } else {
356 mSize = size;
357 mFlags = flags;
358 mOffset = offset;
359 mHeapId.store(fd, memory_order_release);
360 }
361 }
362 }
363 }
364
getHeapID() const365 int BpMemoryHeap::getHeapID() const {
366 assertMapped();
367 // We either stored mHeapId ourselves, or loaded it with acquire semantics.
368 return mHeapId.load(memory_order_relaxed);
369 }
370
getBase() const371 void* BpMemoryHeap::getBase() const {
372 assertMapped();
373 return mBase;
374 }
375
getSize() const376 size_t BpMemoryHeap::getSize() const {
377 assertMapped();
378 return mSize;
379 }
380
getFlags() const381 uint32_t BpMemoryHeap::getFlags() const {
382 assertMapped();
383 return mFlags;
384 }
385
getOffset() const386 off_t BpMemoryHeap::getOffset() const {
387 assertMapped();
388 return mOffset;
389 }
390
391 // ---------------------------------------------------------------------------
392
393 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap");
394
BnMemoryHeap()395 BnMemoryHeap::BnMemoryHeap() {
396 }
397
~BnMemoryHeap()398 BnMemoryHeap::~BnMemoryHeap() {
399 }
400
401 // NOLINTNEXTLINE(google-default-arguments)
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)402 status_t BnMemoryHeap::onTransact(
403 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
404 {
405 switch(code) {
406 case HEAP_ID: {
407 CHECK_INTERFACE(IMemoryHeap, data, reply);
408 reply->writeFileDescriptor(getHeapID());
409 reply->writeUint64(getSize());
410 reply->writeInt64(getOffset());
411 reply->writeUint32(getFlags());
412 return NO_ERROR;
413 } break;
414 default:
415 return BBinder::onTransact(code, data, reply, flags);
416 }
417 }
418
419 /*****************************************************************************/
420
HeapCache()421 HeapCache::HeapCache()
422 : DeathRecipient()
423 {
424 }
425
~HeapCache()426 HeapCache::~HeapCache()
427 {
428 }
429
binderDied(const wp<IBinder> & binder)430 void HeapCache::binderDied(const wp<IBinder>& binder)
431 {
432 //ALOGD("binderDied binder=%p", binder.unsafe_get());
433 free_heap(binder);
434 }
435
find_heap(const sp<IBinder> & binder)436 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
437 {
438 Mutex::Autolock _l(mHeapCacheLock);
439 ssize_t i = mHeapCache.indexOfKey(binder);
440 if (i>=0) {
441 heap_info_t& info = mHeapCache.editValueAt(i);
442 ALOGD_IF(VERBOSE,
443 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
444 binder.get(), info.heap.get(),
445 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
446 static_cast<BpMemoryHeap*>(info.heap.get())
447 ->mHeapId.load(memory_order_relaxed),
448 info.count);
449 ++info.count;
450 return info.heap;
451 } else {
452 heap_info_t info;
453 info.heap = interface_cast<IMemoryHeap>(binder);
454 info.count = 1;
455 //ALOGD("adding binder=%p, heap=%p, count=%d",
456 // binder.get(), info.heap.get(), info.count);
457 mHeapCache.add(binder, info);
458 return info.heap;
459 }
460 }
461
free_heap(const sp<IBinder> & binder)462 void HeapCache::free_heap(const sp<IBinder>& binder) {
463 free_heap( wp<IBinder>(binder) );
464 }
465
free_heap(const wp<IBinder> & binder)466 void HeapCache::free_heap(const wp<IBinder>& binder)
467 {
468 sp<IMemoryHeap> rel;
469 {
470 Mutex::Autolock _l(mHeapCacheLock);
471 ssize_t i = mHeapCache.indexOfKey(binder);
472 if (i>=0) {
473 heap_info_t& info(mHeapCache.editValueAt(i));
474 if (--info.count == 0) {
475 ALOGD_IF(VERBOSE,
476 "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
477 binder.unsafe_get(), info.heap.get(),
478 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
479 static_cast<BpMemoryHeap*>(info.heap.get())
480 ->mHeapId.load(memory_order_relaxed),
481 info.count);
482 rel = mHeapCache.valueAt(i).heap;
483 mHeapCache.removeItemsAt(i);
484 }
485 } else {
486 ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
487 }
488 }
489 }
490
get_heap(const sp<IBinder> & binder)491 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
492 {
493 sp<IMemoryHeap> realHeap;
494 Mutex::Autolock _l(mHeapCacheLock);
495 ssize_t i = mHeapCache.indexOfKey(binder);
496 if (i>=0) realHeap = mHeapCache.valueAt(i).heap;
497 else realHeap = interface_cast<IMemoryHeap>(binder);
498 return realHeap;
499 }
500
dump_heaps()501 void HeapCache::dump_heaps()
502 {
503 Mutex::Autolock _l(mHeapCacheLock);
504 int c = mHeapCache.size();
505 for (int i=0 ; i<c ; i++) {
506 const heap_info_t& info = mHeapCache.valueAt(i);
507 BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
508 ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
509 mHeapCache.keyAt(i).unsafe_get(),
510 info.heap.get(), info.count,
511 h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
512 }
513 }
514
515
516 // ---------------------------------------------------------------------------
517 } // namespace android
518