1 /* Copyright (C) 2019 The Android Open Source Project
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <gtest/gtest.h>
17 #include <inttypes.h>
18 #include <linux/dma-buf.h>
19 #include <poll.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <sys/types.h>
23 #include <unistd.h>
24
25 #include <fstream>
26 #include <string>
27 #include <unordered_map>
28 #include <vector>
29
30 #include <android-base/file.h>
31 #include <android-base/logging.h>
32 #include <android-base/stringprintf.h>
33 #include <android-base/unique_fd.h>
34 #include <ion/ion.h>
35
36 #include <dmabufinfo/dmabufinfo.h>
37
38 using namespace ::android::dmabufinfo;
39 using namespace ::android::base;
40
41 #define MAX_HEAP_NAME 32
42 #define ION_HEAP_ANY_MASK (0x7fffffff)
43
44 struct ion_heap_data {
45 char name[MAX_HEAP_NAME];
46 __u32 type;
47 __u32 heap_id;
48 __u32 reserved0;
49 __u32 reserved1;
50 __u32 reserved2;
51 };
52
53 #ifndef DMA_BUF_SET_NAME
54 #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 5, const char*)
55 #endif
56
57 class fd_sharer {
58 public:
59 fd_sharer();
~fd_sharer()60 ~fd_sharer() { kill(); }
61
ok() const62 bool ok() const { return child_pid > 0; }
63 bool sendfd(int fd);
64 bool kill();
pid() const65 pid_t pid() const { return child_pid; }
66
67 private:
68 unique_fd parent_fd, child_fd;
69 pid_t child_pid;
70
71 void run();
72 };
73
fd_sharer()74 fd_sharer::fd_sharer() : parent_fd{}, child_fd{}, child_pid{-1} {
75 bool sp_ok = android::base::Socketpair(SOCK_STREAM, &parent_fd, &child_fd);
76 if (!sp_ok) return;
77
78 child_pid = fork();
79 if (child_pid < 0) return;
80
81 if (child_pid == 0) run();
82 }
83
kill()84 bool fd_sharer::kill() {
85 int err = ::kill(child_pid, SIGKILL);
86 if (err < 0) return false;
87
88 return ::waitpid(child_pid, nullptr, 0) == child_pid;
89 }
90
run()91 void fd_sharer::run() {
92 while (true) {
93 int fd;
94 char unused = 0;
95
96 iovec iov{};
97 iov.iov_base = &unused;
98 iov.iov_len = sizeof(unused);
99
100 msghdr msg{};
101 msg.msg_iov = &iov;
102 msg.msg_iovlen = 1;
103
104 char cmsg_buf[CMSG_SPACE(sizeof(fd))];
105 msg.msg_control = cmsg_buf;
106 msg.msg_controllen = sizeof(cmsg_buf);
107
108 cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
109 cmsg->cmsg_level = SOL_SOCKET;
110 cmsg->cmsg_type = SCM_RIGHTS;
111 cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
112
113 ssize_t s = TEMP_FAILURE_RETRY(recvmsg(child_fd, &msg, 0));
114 if (s == -1) break;
115
116 s = TEMP_FAILURE_RETRY(write(child_fd, &unused, sizeof(unused)));
117 if (s == -1) break;
118 }
119 }
120
sendfd(int fd)121 bool fd_sharer::sendfd(int fd) {
122 char unused = 0;
123
124 iovec iov{};
125 iov.iov_base = &unused;
126 iov.iov_len = sizeof(unused);
127
128 msghdr msg{};
129 msg.msg_iov = &iov;
130 msg.msg_iovlen = 1;
131
132 char cmsg_buf[CMSG_SPACE(sizeof(fd))];
133 msg.msg_control = cmsg_buf;
134 msg.msg_controllen = sizeof(cmsg_buf);
135
136 cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
137 cmsg->cmsg_level = SOL_SOCKET;
138 cmsg->cmsg_type = SCM_RIGHTS;
139 cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
140
141 int* fd_buf = reinterpret_cast<int*>(CMSG_DATA(cmsg));
142 *fd_buf = fd;
143
144 ssize_t s = TEMP_FAILURE_RETRY(sendmsg(parent_fd, &msg, 0));
145 if (s == -1) return false;
146
147 // The target process installs the fd into its fd table during recvmsg().
148 // So if we return now, there's a brief window between sendfd() finishing
149 // and libmemoryinfo actually seeing that the buffer has been shared. This
150 // window is just large enough to break tests.
151 //
152 // To work around this, wait for the target process to respond with a dummy
153 // byte, with a timeout of 1 s.
154 pollfd p{};
155 p.fd = parent_fd;
156 p.events = POLL_IN;
157 int ready = poll(&p, 1, 1000);
158 if (ready != 1) return false;
159
160 s = TEMP_FAILURE_RETRY(read(parent_fd, &unused, sizeof(unused)));
161 if (s == -1) return false;
162
163 return true;
164 }
165
166 #define EXPECT_ONE_BUF_EQ(_bufptr, _name, _fdrefs, _maprefs, _expname, _count, _size) \
167 do { \
168 EXPECT_EQ(_bufptr->name(), _name); \
169 EXPECT_EQ(_bufptr->fdrefs().size(), _fdrefs); \
170 EXPECT_EQ(_bufptr->maprefs().size(), _maprefs); \
171 EXPECT_EQ(_bufptr->exporter(), _expname); \
172 EXPECT_EQ(_bufptr->count(), _count); \
173 EXPECT_EQ(_bufptr->size(), _size); \
174 } while (0)
175
176 #define EXPECT_PID_IN_FDREFS(_bufptr, _pid, _expect) \
177 do { \
178 const std::unordered_map<pid_t, int>& _fdrefs = _bufptr->fdrefs(); \
179 auto _ref = _fdrefs.find(_pid); \
180 EXPECT_EQ((_ref != _fdrefs.end()), _expect); \
181 } while (0)
182
183 #define EXPECT_PID_IN_MAPREFS(_bufptr, _pid, _expect) \
184 do { \
185 const std::unordered_map<pid_t, int>& _maprefs = _bufptr->maprefs(); \
186 auto _ref = _maprefs.find(_pid); \
187 EXPECT_EQ((_ref != _maprefs.end()), _expect); \
188 } while (0)
189
TEST(DmaBufInfoParser,TestReadDmaBufInfo)190 TEST(DmaBufInfoParser, TestReadDmaBufInfo) {
191 std::string bufinfo = R"bufinfo(00045056 00000002 00000007 00000002 ion 00022069
192 Attached Devices:
193 Total 0 devices attached
194 01048576 00000002 00000007 00000001 ion 00019834 CAMERA
195 Attached Devices:
196 soc:qcom,cam_smmu:msm_cam_smmu_icp
197 Total 1 devices attached)bufinfo";
198
199 TemporaryFile tf;
200 ASSERT_TRUE(tf.fd != -1);
201 ASSERT_TRUE(::android::base::WriteStringToFd(bufinfo, tf.fd));
202 std::string path = std::string(tf.path);
203
204 std::vector<DmaBuffer> dmabufs;
205 EXPECT_TRUE(ReadDmaBufInfo(&dmabufs, path));
206
207 EXPECT_EQ(dmabufs.size(), 2UL);
208
209 EXPECT_EQ(dmabufs[0].size(), 45056UL);
210 EXPECT_EQ(dmabufs[0].inode(), 22069UL);
211 EXPECT_EQ(dmabufs[0].count(), 2UL);
212 EXPECT_EQ(dmabufs[0].exporter(), "ion");
213 EXPECT_TRUE(dmabufs[0].name().empty());
214 EXPECT_EQ(dmabufs[0].total_refs(), 0ULL);
215 EXPECT_TRUE(dmabufs[0].fdrefs().empty());
216 EXPECT_TRUE(dmabufs[0].maprefs().empty());
217
218 EXPECT_EQ(dmabufs[1].size(), 1048576UL);
219 EXPECT_EQ(dmabufs[1].inode(), 19834UL);
220 EXPECT_EQ(dmabufs[1].count(), 1UL);
221 EXPECT_EQ(dmabufs[1].exporter(), "ion");
222 EXPECT_FALSE(dmabufs[1].name().empty());
223 EXPECT_EQ(dmabufs[1].name(), "CAMERA");
224 EXPECT_EQ(dmabufs[1].total_refs(), 0ULL);
225 EXPECT_TRUE(dmabufs[1].fdrefs().empty());
226 EXPECT_TRUE(dmabufs[1].maprefs().empty());
227 }
228
229 class DmaBufTester : public ::testing::Test {
230 public:
DmaBufTester()231 DmaBufTester() : ion_fd(ion_open()), ion_heap_mask(get_ion_heap_mask()) {}
232
~DmaBufTester()233 ~DmaBufTester() {
234 if (ion_fd >= 0) {
235 ion_close(ion_fd);
236 }
237 }
238
is_valid()239 bool is_valid() { return (ion_fd >= 0 && ion_heap_mask > 0); }
240
allocate(uint64_t size,const std::string & name)241 unique_fd allocate(uint64_t size, const std::string& name) {
242 int fd;
243 int err = ion_alloc_fd(ion_fd, size, 0, ion_heap_mask, 0, &fd);
244 if (err < 0) {
245 printf("Failed ion_alloc_fd, return value: %d\n", err);
246 return unique_fd{};
247 }
248
249 if (!name.empty()) {
250 if (ioctl(fd, DMA_BUF_SET_NAME, name.c_str()) == -1) {
251 printf("Failed ioctl(DMA_BUF_SET_NAME): %s\n", strerror(errno));
252 close(fd);
253 return unique_fd{};
254 }
255 }
256
257 return unique_fd{fd};
258 }
259
readAndCheckDmaBuffer(std::vector<DmaBuffer> * dmabufs,pid_t pid,const std::string name,size_t fdrefs_size,size_t maprefs_size,const std::string exporter,size_t refcount,uint64_t buf_size,bool expectFdrefs,bool expectMapRefs)260 void readAndCheckDmaBuffer(std::vector<DmaBuffer>* dmabufs, pid_t pid, const std::string name,
261 size_t fdrefs_size, size_t maprefs_size, const std::string exporter,
262 size_t refcount, uint64_t buf_size, bool expectFdrefs,
263 bool expectMapRefs) {
264 EXPECT_TRUE(ReadDmaBufInfo(pid, dmabufs));
265 EXPECT_EQ(dmabufs->size(), 1UL);
266 EXPECT_ONE_BUF_EQ(dmabufs->begin(), name, fdrefs_size, maprefs_size, exporter, refcount,
267 buf_size);
268 // Make sure the buffer has the right pid too.
269 EXPECT_PID_IN_FDREFS(dmabufs->begin(), pid, expectFdrefs);
270 EXPECT_PID_IN_MAPREFS(dmabufs->begin(), pid, expectMapRefs);
271 }
272
checkPidRef(DmaBuffer & dmabuf,pid_t pid,int expectFdrefs)273 bool checkPidRef(DmaBuffer& dmabuf, pid_t pid, int expectFdrefs) {
274 int fdrefs = dmabuf.fdrefs().find(pid)->second;
275 return fdrefs == expectFdrefs;
276 }
277
278 private:
get_ion_heap_mask()279 int get_ion_heap_mask() {
280 if (ion_fd < 0) {
281 return 0;
282 }
283
284 if (ion_is_legacy(ion_fd)) {
285 // Since ION is still in staging, we've seen that the heap mask ids are also
286 // changed across kernels for some reason. So, here we basically ask for a buffer
287 // from _any_ heap.
288 return ION_HEAP_ANY_MASK;
289 }
290
291 int cnt;
292 int err = ion_query_heap_cnt(ion_fd, &cnt);
293 if (err < 0) {
294 return err;
295 }
296
297 std::vector<ion_heap_data> heaps;
298 heaps.resize(cnt);
299 err = ion_query_get_heaps(ion_fd, cnt, &heaps[0]);
300 if (err < 0) {
301 return err;
302 }
303
304 unsigned int ret = 0;
305 for (auto& it : heaps) {
306 if (!strcmp(it.name, "ion_system_heap")) {
307 ret |= (1 << it.heap_id);
308 }
309 }
310
311 return ret;
312 }
313
314 int ion_fd;
315 const int ion_heap_mask;
316 };
317
TEST_F(DmaBufTester,TestFdRef)318 TEST_F(DmaBufTester, TestFdRef) {
319 // Test if a dma buffer is found while the corresponding file descriptor
320 // is open
321 ASSERT_TRUE(is_valid());
322 pid_t pid = getpid();
323 std::vector<DmaBuffer> dmabufs;
324 {
325 // Allocate one buffer and make sure the library can see it
326 unique_fd buf = allocate(4096, "dmabuftester-4k");
327 ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
328 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
329
330 EXPECT_EQ(dmabufs.size(), 1UL);
331 EXPECT_ONE_BUF_EQ(dmabufs.begin(), "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL);
332
333 // Make sure the buffer has the right pid too.
334 EXPECT_PID_IN_FDREFS(dmabufs.begin(), pid, true);
335 }
336
337 // Now make sure the buffer has disappeared
338 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
339 EXPECT_TRUE(dmabufs.empty());
340 }
341
TEST_F(DmaBufTester,TestMapRef)342 TEST_F(DmaBufTester, TestMapRef) {
343 // Test to make sure we can find a buffer if the fd is closed but the buffer
344 // is mapped
345 ASSERT_TRUE(is_valid());
346 pid_t pid = getpid();
347 std::vector<DmaBuffer> dmabufs;
348 {
349 // Allocate one buffer and make sure the library can see it
350 unique_fd buf = allocate(4096, "dmabuftester-4k");
351 ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
352 auto ptr = mmap(0, 4096, PROT_READ, MAP_SHARED, buf, 0);
353 ASSERT_NE(ptr, MAP_FAILED);
354 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
355
356 EXPECT_EQ(dmabufs.size(), 1UL);
357 EXPECT_ONE_BUF_EQ(dmabufs.begin(), "dmabuftester-4k", 1UL, 1UL, "ion", 2UL, 4096ULL);
358
359 // Make sure the buffer has the right pid too.
360 EXPECT_PID_IN_FDREFS(dmabufs.begin(), pid, true);
361 EXPECT_PID_IN_MAPREFS(dmabufs.begin(), pid, true);
362
363 // close the file descriptor and re-read the stats
364 buf.reset(-1);
365 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
366
367 EXPECT_EQ(dmabufs.size(), 1UL);
368 EXPECT_ONE_BUF_EQ(dmabufs.begin(), "<unknown>", 0UL, 1UL, "<unknown>", 0UL, 4096ULL);
369
370 EXPECT_PID_IN_FDREFS(dmabufs.begin(), pid, false);
371 EXPECT_PID_IN_MAPREFS(dmabufs.begin(), pid, true);
372
373 // unmap the bufer and lose all references
374 munmap(ptr, 4096);
375 }
376
377 // Now make sure the buffer has disappeared
378 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
379 EXPECT_TRUE(dmabufs.empty());
380 }
381
TEST_F(DmaBufTester,TestSharedfd)382 TEST_F(DmaBufTester, TestSharedfd) {
383 // Each time a shared buffer is received over a socket, the remote process
384 // will take an extra reference on it.
385
386 ASSERT_TRUE(is_valid());
387
388 pid_t pid = getpid();
389 std::vector<DmaBuffer> dmabufs;
390 {
391 fd_sharer sharer{};
392 ASSERT_TRUE(sharer.ok());
393 // Allocate one buffer and make sure the library can see it
394 unique_fd buf = allocate(4096, "dmabuftester-4k");
395 ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
396 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
397 false);
398
399 ASSERT_TRUE(sharer.sendfd(buf));
400 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 2UL, 4096ULL, true,
401 false);
402 EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 1));
403 readAndCheckDmaBuffer(&dmabufs, sharer.pid(), "dmabuftester-4k", 1UL, 0UL, "ion", 2UL,
404 4096ULL, true, false);
405 EXPECT_TRUE(checkPidRef(dmabufs[0], sharer.pid(), 1));
406
407 ASSERT_TRUE(sharer.sendfd(buf));
408 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 3UL, 4096ULL, true,
409 false);
410 EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 1));
411 readAndCheckDmaBuffer(&dmabufs, sharer.pid(), "dmabuftester-4k", 1UL, 0UL, "ion", 3UL,
412 4096ULL, true, false);
413 EXPECT_TRUE(checkPidRef(dmabufs[0], sharer.pid(), 2));
414
415 ASSERT_TRUE(sharer.kill());
416 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
417 false);
418 }
419
420 // Now make sure the buffer has disappeared
421 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
422 EXPECT_TRUE(dmabufs.empty());
423 }
424
TEST_F(DmaBufTester,DupFdTest)425 TEST_F(DmaBufTester, DupFdTest) {
426 // dup()ing an fd will make this process take an extra reference on the
427 // shared buffer.
428
429 ASSERT_TRUE(is_valid());
430
431 pid_t pid = getpid();
432 std::vector<DmaBuffer> dmabufs;
433 {
434 // Allocate one buffer and make sure the library can see it
435 unique_fd buf = allocate(4096, "dmabuftester-4k");
436 ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
437 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
438 false);
439
440 unique_fd buf2{dup(buf)};
441 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 2UL, 4096ULL, true,
442 false);
443 EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 2));
444
445 close(buf2.release());
446 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
447 false);
448 EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 1));
449 }
450
451 // Now make sure the buffer has disappeared
452 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
453 EXPECT_TRUE(dmabufs.empty());
454 }
455
TEST_F(DmaBufTester,ForkTest)456 TEST_F(DmaBufTester, ForkTest) {
457 // fork()ing a child will cause the child to automatically take a reference
458 // on any existing shared buffers.
459 ASSERT_TRUE(is_valid());
460
461 pid_t pid = getpid();
462 std::vector<DmaBuffer> dmabufs;
463 {
464 // Allocate one buffer and make sure the library can see it
465 unique_fd buf = allocate(4096, "dmabuftester-4k");
466 ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
467 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
468 false);
469 fd_sharer sharer{};
470 ASSERT_TRUE(sharer.ok());
471 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 2UL, 4096ULL, true,
472 false);
473 readAndCheckDmaBuffer(&dmabufs, sharer.pid(), "dmabuftester-4k", 1UL, 0UL, "ion", 2UL,
474 4096ULL, true, false);
475 ASSERT_TRUE(sharer.kill());
476 readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
477 false);
478 }
479
480 // Now make sure the buffer has disappeared
481 ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
482 EXPECT_TRUE(dmabufs.empty());
483 }
484
main(int argc,char ** argv)485 int main(int argc, char** argv) {
486 ::testing::InitGoogleTest(&argc, argv);
487 ::android::base::InitLogging(argv, android::base::StderrLogger);
488 return RUN_ALL_TESTS();
489 }
490