1 /*
2 * Copyright (C) 2016-2017 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <cstdlib>
20 #include <string.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24
25 #include <log/log.h>
26 #include <cutils/atomic.h>
27
28 #include <ion/ion.h>
29 #include <sys/ioctl.h>
30
31 #include <hardware/hardware.h>
32
33 #if GRALLOC_USE_GRALLOC1_API == 1
34 #include <hardware/gralloc1.h>
35 #else
36 #include <hardware/gralloc.h>
37 #endif
38
39 #include "mali_gralloc_module.h"
40 #include "mali_gralloc_private_interface_types.h"
41 #include "mali_gralloc_buffer.h"
42 #include "gralloc_helper.h"
43 #include "framebuffer_device.h"
44 #include "mali_gralloc_formats.h"
45 #include "mali_gralloc_usages.h"
46 #include "mali_gralloc_bufferdescriptor.h"
47 #include "ion_4.12.h"
48
49
50
51 #define ION_SYSTEM (char*)"ion_system_heap"
52 #define ION_CMA (char*)"linux,cma"
53 static bool gralloc_legacy_ion;
54 static int system_heap_id;
55 static int cma_heap_id;
56
57 static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds);
58
init_afbc(uint8_t * buf,uint64_t internal_format,int w,int h)59 static void init_afbc(uint8_t *buf, uint64_t internal_format, int w, int h)
60 {
61 uint32_t n_headers = (w * h) / 256;
62 uint32_t body_offset = n_headers * 16;
63 uint32_t headers[][4] = {
64 { body_offset, 0x1, 0x10000, 0x0 }, /* Layouts 0, 3, 4 */
65 { (body_offset + (1 << 28)), 0x80200040, 0x1004000, 0x20080 } /* Layouts 1, 5 */
66 };
67 uint32_t i, layout;
68
69 /* For AFBC 1.2, header buffer can be initilized to 0 for Layouts 0, 3, 4 */
70 if (internal_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
71 {
72 memset(headers[0], 0, sizeof(uint32_t) * 4);
73 }
74 /* map format if necessary (also removes internal extension bits) */
75 uint64_t base_format = internal_format & MALI_GRALLOC_INTFMT_FMT_MASK;
76
77 switch (base_format)
78 {
79 case MALI_GRALLOC_FORMAT_INTERNAL_RGBA_8888:
80 case MALI_GRALLOC_FORMAT_INTERNAL_RGBX_8888:
81 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_888:
82 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_565:
83 case MALI_GRALLOC_FORMAT_INTERNAL_BGRA_8888:
84 layout = 0;
85 break;
86
87 case MALI_GRALLOC_FORMAT_INTERNAL_YV12:
88 case MALI_GRALLOC_FORMAT_INTERNAL_NV12:
89 case MALI_GRALLOC_FORMAT_INTERNAL_NV21:
90 layout = 1;
91 break;
92
93 default:
94 layout = 0;
95 }
96
97 ALOGV("Writing AFBC header layout %d for format %" PRIu64, layout, base_format);
98
99 for (i = 0; i < n_headers; i++)
100 {
101 memcpy(buf, headers[layout], sizeof(headers[layout]));
102 buf += sizeof(headers[layout]);
103 }
104 }
105
106
107
find_heap_id(int ion_client,char * name)108 static int find_heap_id(int ion_client, char *name)
109 {
110 int i, ret, cnt, heap_id = -1;
111 struct ion_heap_data *data;
112
113 ret = ion_query_heap_cnt(ion_client, &cnt);
114
115 if (ret)
116 {
117 AERR("ion count query failed with %s", strerror(errno));
118 return -1;
119 }
120
121 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
122 if (!data)
123 {
124 AERR("Error allocating data %s\n", strerror(errno));
125 return -1;
126 }
127
128 ret = ion_query_get_heaps(ion_client, cnt, data);
129 if (ret)
130 {
131 AERR("Error querying heaps from ion %s", strerror(errno));
132 }
133 else
134 {
135 for (i = 0; i < cnt; i++) {
136 if (strcmp(data[i].name, name) == 0) {
137 heap_id = data[i].heap_id;
138 break;
139 }
140 }
141
142 if (i == cnt)
143 {
144 AERR("No %s Heap Found amongst %d heaps\n", name, cnt);
145 heap_id = -1;
146 }
147 }
148
149 free(data);
150 return heap_id;
151 }
152
153
alloc_from_ion_heap(int ion_fd,size_t size,unsigned int heap_mask,unsigned int flags,int * min_pgsz)154 static int alloc_from_ion_heap(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *min_pgsz)
155 {
156 ion_user_handle_t ion_hnd = -1;
157 int shared_fd, ret;
158
159 if ((ion_fd < 0) || (size <= 0) || (heap_mask == 0) || (min_pgsz == NULL))
160 {
161 return -1;
162 }
163
164 /**
165 * step 1: ion_alloc new ion_hnd
166 * step 2: ion_share from ion_hnd and get shared_fd
167 * step 3: ion free the given ion_hnd
168 * step 4: when we need to free this ion buffer, just close the shared_fd,
169 * kernel will count the reference of file struct, so it's safe to
170 * be transfered between processes.
171 */
172 ret = ion_alloc(ion_fd, size, 0, heap_mask, flags, &ion_hnd);
173
174 if (ret < 0)
175 {
176 #if defined(ION_HEAP_SECURE_MASK)
177
178 if (heap_mask == ION_HEAP_SECURE_MASK)
179 {
180 return -1;
181 }
182 else
183 #endif
184 {
185 /* If everything else failed try system heap */
186 flags = 0; /* Fallback option flags are not longer valid */
187 heap_mask = ION_HEAP_SYSTEM_MASK;
188 ret = ion_alloc(ion_fd, size, 0, heap_mask, flags, &ion_hnd);
189 }
190 }
191
192 ret = ion_share(ion_fd, ion_hnd, &shared_fd);
193
194 if (ret != 0)
195 {
196 AERR("ion_share( %d ) failed", ion_fd);
197 shared_fd = -1;
198 }
199
200 ret = ion_free(ion_fd, ion_hnd);
201
202 if (0 != ret)
203 {
204 AERR("ion_free( %d ) failed", ion_fd);
205 close(shared_fd);
206 shared_fd = -1;
207 }
208
209 if (ret >= 0)
210 {
211 switch (heap_mask)
212 {
213 case ION_HEAP_SYSTEM_MASK:
214 *min_pgsz = SZ_4K;
215 break;
216
217 case ION_HEAP_SYSTEM_CONTIG_MASK:
218 case ION_HEAP_CARVEOUT_MASK:
219 #ifdef ION_HEAP_TYPE_DMA_MASK
220 case ION_HEAP_TYPE_DMA_MASK:
221 #endif
222 *min_pgsz = size;
223 break;
224 #ifdef ION_HEAP_CHUNK_MASK
225
226 /* NOTE: if have this heap make sure your ION chunk size is 2M*/
227 case ION_HEAP_CHUNK_MASK:
228 *min_pgsz = SZ_2M;
229 break;
230 #endif
231 #ifdef ION_HEAP_COMPOUND_PAGE_MASK
232
233 case ION_HEAP_COMPOUND_PAGE_MASK:
234 *min_pgsz = SZ_2M;
235 break;
236 #endif
237 /* If have customized heap please set the suitable pg type according to
238 * the customized ION implementation
239 */
240 #ifdef ION_HEAP_CUSTOM_MASK
241
242 case ION_HEAP_CUSTOM_MASK:
243 *min_pgsz = SZ_4K;
244 break;
245 #endif
246
247 default:
248 *min_pgsz = SZ_4K;
249 break;
250 }
251 }
252
253 return shared_fd;
254 }
255
pick_ion_heap(uint64_t usage)256 unsigned int pick_ion_heap(uint64_t usage)
257 {
258 unsigned int heap_mask;
259
260 if (usage & GRALLOC_USAGE_PROTECTED)
261 {
262 #if defined(ION_HEAP_SECURE_MASK)
263 heap_mask = ION_HEAP_SECURE_MASK;
264 #else
265 AERR("Protected ION memory is not supported on this platform.");
266 return 0;
267 #endif
268 }
269
270 #if defined(ION_HEAP_TYPE_COMPOUND_PAGE_MASK) && GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
271 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
272 {
273 heap_mask = ION_HEAP_TYPE_COMPOUND_PAGE_MASK;
274 }
275
276 #elif defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
277 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB)))
278 {
279 heap_mask = ION_HEAP_TYPE_DMA_MASK;
280 }
281
282 #endif
283 else
284 {
285 heap_mask = ION_HEAP_SYSTEM_MASK;
286 }
287
288 return heap_mask;
289 }
290
set_ion_flags(unsigned int heap_mask,uint64_t usage,unsigned int * priv_heap_flag,int * ion_flags)291 void set_ion_flags(unsigned int heap_mask, uint64_t usage, unsigned int *priv_heap_flag, int *ion_flags)
292 {
293 #if !GRALLOC_USE_ION_DMA_HEAP
294 GRALLOC_UNUSED(heap_mask);
295 #endif
296
297 if (priv_heap_flag)
298 {
299 #if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
300
301 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
302 {
303 *priv_heap_flag = private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP;
304 }
305
306 #endif
307 }
308
309 if (ion_flags)
310 {
311 #if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
312
313 if (heap_mask != ION_HEAP_TYPE_DMA_MASK)
314 {
315 #endif
316
317 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
318 {
319 *ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
320 }
321
322 #if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
323 }
324
325 #endif
326 }
327 }
328
check_buffers_sharable(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors)329 static bool check_buffers_sharable(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
330 {
331 unsigned int shared_backend_heap_mask = 0;
332 int shared_ion_flags = 0;
333 uint64_t usage;
334 uint32_t i;
335
336 if (numDescriptors <= 1)
337 {
338 return false;
339 }
340
341 for (i = 0; i < numDescriptors; i++)
342 {
343 unsigned int heap_mask;
344 int ion_flags;
345 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
346
347 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
348 heap_mask = pick_ion_heap(usage);
349
350 if (0 == heap_mask)
351 {
352 return false;
353 }
354
355 set_ion_flags(heap_mask, usage, NULL, &ion_flags);
356
357 if (0 != shared_backend_heap_mask)
358 {
359 if (shared_backend_heap_mask != heap_mask || shared_ion_flags != ion_flags)
360 {
361 return false;
362 }
363 }
364 else
365 {
366 shared_backend_heap_mask = heap_mask;
367 shared_ion_flags = ion_flags;
368 }
369 }
370
371 return true;
372 }
373
get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors)374 static int get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
375 {
376 uint32_t i, max_buffer_index = 0;
377 size_t max_buffer_size = 0;
378
379 for (i = 0; i < numDescriptors; i++)
380 {
381 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
382
383 if (max_buffer_size < bufDescriptor->size)
384 {
385 max_buffer_index = i;
386 max_buffer_size = bufDescriptor->size;
387 }
388 }
389
390 return max_buffer_index;
391 }
392
mali_gralloc_ion_allocate(mali_gralloc_module * m,const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors,buffer_handle_t * pHandle,bool * shared_backend)393 int mali_gralloc_ion_allocate(mali_gralloc_module *m, const gralloc_buffer_descriptor_t *descriptors,
394 uint32_t numDescriptors, buffer_handle_t *pHandle, bool *shared_backend)
395 {
396 static int support_protected = 1; /* initially, assume we support protected memory */
397 unsigned int heap_mask, priv_heap_flag = 0;
398 unsigned char *cpu_ptr = NULL;
399 uint64_t usage;
400 uint32_t i, max_buffer_index = 0;
401 int shared_fd, ret, ion_flags = 0;
402 int min_pgsz = 0;
403
404 if (m->ion_client < 0)
405 {
406 m->ion_client = ion_open();
407
408 if (m->ion_client < 0)
409 {
410 AERR("ion_open failed with %s", strerror(errno));
411 return -1;
412 }
413
414 gralloc_legacy_ion = ion_is_legacy(m->ion_client);
415 if (!gralloc_legacy_ion)
416 {
417 system_heap_id = find_heap_id(m->ion_client, ION_SYSTEM);
418 cma_heap_id = find_heap_id(m->ion_client, ION_CMA);
419 if (system_heap_id < 0)
420 {
421 ion_close(m->ion_client);
422 m->ion_client = -1;
423 AERR( "ion_open failed: no system heap found" );
424 return -1;
425 }
426 if (cma_heap_id < 0) {
427 AERR("No cma heap found, falling back to system");
428 cma_heap_id = system_heap_id;
429 }
430 }
431 }
432
433 *shared_backend = check_buffers_sharable(descriptors, numDescriptors);
434
435 if (*shared_backend)
436 {
437 buffer_descriptor_t *max_bufDescriptor;
438
439 max_buffer_index = get_max_buffer_descriptor_index(descriptors, numDescriptors);
440 max_bufDescriptor = (buffer_descriptor_t *)(descriptors[max_buffer_index]);
441 usage = max_bufDescriptor->consumer_usage | max_bufDescriptor->producer_usage;
442
443 heap_mask = pick_ion_heap(usage);
444
445 if (heap_mask == 0)
446 {
447 AERR("Failed to find an appropriate ion heap");
448 return -1;
449 }
450
451 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
452 if (gralloc_legacy_ion)
453 {
454 shared_fd = alloc_from_ion_heap(m->ion_client, max_bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
455 }
456 else
457 {
458 int heap = 1 << system_heap_id;
459 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
460 heap = 1 << cma_heap_id;
461
462 ret = ion_alloc_fd(m->ion_client, max_bufDescriptor->size, 0, heap, 0, &(shared_fd));
463 if (ret != 0)
464 {
465 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
466 return -1;
467 }
468 min_pgsz = SZ_4K;
469 }
470
471 if (shared_fd < 0)
472 {
473 AERR("ion_alloc failed form client: ( %d )", m->ion_client);
474 return -1;
475 }
476
477 for (i = 0; i < numDescriptors; i++)
478 {
479 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
480 int tmp_fd;
481
482 if (i != max_buffer_index)
483 {
484 tmp_fd = dup(shared_fd);
485
486 if (tmp_fd < 0)
487 {
488 /* need to free already allocated memory. */
489 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
490 return -1;
491 }
492 }
493 else
494 {
495 tmp_fd = shared_fd;
496 }
497
498 private_handle_t *hnd = new private_handle_t(
499 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
500 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, tmp_fd, bufDescriptor->hal_format,
501 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
502 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
503 max_bufDescriptor->size, bufDescriptor->layer_count);
504
505 if (NULL == hnd)
506 {
507 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
508 return -1;
509 }
510
511 pHandle[i] = hnd;
512 }
513 }
514 else
515 {
516 for (i = 0; i < numDescriptors; i++)
517 {
518 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
519 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
520
521 heap_mask = pick_ion_heap(usage);
522
523 if (heap_mask == 0)
524 {
525 AERR("Failed to find an appropriate ion heap");
526 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
527 return -1;
528 }
529
530 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
531 if (gralloc_legacy_ion)
532 {
533 shared_fd = alloc_from_ion_heap(m->ion_client, bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
534 }
535 else
536 {
537 int heap = 1 << system_heap_id;
538 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
539 heap = 1 << cma_heap_id;
540
541 ret = ion_alloc_fd(m->ion_client, bufDescriptor->size, 0, heap, 0, &(shared_fd));
542 if (ret != 0)
543 {
544 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
545 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
546 return -1;
547 }
548 min_pgsz = SZ_4K;
549 }
550
551 if (shared_fd < 0)
552 {
553 AERR("ion_alloc failed from client ( %d )", m->ion_client);
554
555 /* need to free already allocated memory. not just this one */
556 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
557
558 return -1;
559 }
560
561 private_handle_t *hnd = new private_handle_t(
562 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
563 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, shared_fd, bufDescriptor->hal_format,
564 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
565 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
566 bufDescriptor->size, bufDescriptor->layer_count);
567
568 if (NULL == hnd)
569 {
570 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
571 return -1;
572 }
573
574 pHandle[i] = hnd;
575 }
576 }
577
578 for (i = 0; i < numDescriptors; i++)
579 {
580 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
581 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
582
583 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
584 hnd->usage = usage;
585
586 if (!(usage & GRALLOC_USAGE_PROTECTED))
587 {
588 cpu_ptr =
589 (unsigned char *)mmap(NULL, bufDescriptor->size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
590
591 if (MAP_FAILED == cpu_ptr)
592 {
593 AERR("mmap failed from client ( %d ), fd ( %d )", m->ion_client, hnd->share_fd);
594 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
595 return -1;
596 }
597
598 #if GRALLOC_INIT_AFBC == 1
599
600 if ((bufDescriptor->internal_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK) && (!(*shared_backend)))
601 {
602 init_afbc(cpu_ptr, bufDescriptor->internal_format, bufDescriptor->width, bufDescriptor->height);
603 }
604
605 #endif
606 hnd->base = cpu_ptr;
607 }
608 }
609
610 return 0;
611 }
612
mali_gralloc_ion_free(private_handle_t const * hnd)613 void mali_gralloc_ion_free(private_handle_t const *hnd)
614 {
615 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
616 {
617 return;
618 }
619 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
620 {
621 /* Buffer might be unregistered already so we need to assure we have a valid handle*/
622 if (0 != hnd->base)
623 {
624 if (0 != munmap((void *)hnd->base, hnd->size))
625 {
626 AERR("Failed to munmap handle %p", hnd);
627 }
628 }
629
630 close(hnd->share_fd);
631 memset((void *)hnd, 0, sizeof(*hnd));
632 }
633 }
634
mali_gralloc_ion_free_internal(buffer_handle_t * pHandle,uint32_t num_hnds)635 static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds)
636 {
637 uint32_t i = 0;
638
639 for (i = 0; i < num_hnds; i++)
640 {
641 if (NULL != pHandle[i])
642 {
643 mali_gralloc_ion_free((private_handle_t *)(pHandle[i]));
644 }
645 }
646
647 return;
648 }
649
mali_gralloc_ion_sync(const mali_gralloc_module * m,private_handle_t * hnd)650 void mali_gralloc_ion_sync(const mali_gralloc_module *m, private_handle_t *hnd)
651 {
652 if (!gralloc_legacy_ion)
653 return;
654
655 if (m != NULL && hnd != NULL)
656 {
657 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
658 {
659 case private_handle_t::PRIV_FLAGS_USES_ION:
660 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP))
661 {
662 ion_sync_fd(m->ion_client, hnd->share_fd);
663 }
664
665 break;
666 }
667 }
668 }
669
mali_gralloc_ion_map(private_handle_t * hnd)670 int mali_gralloc_ion_map(private_handle_t *hnd)
671 {
672 int retval = -EINVAL;
673
674 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
675 {
676 case private_handle_t::PRIV_FLAGS_USES_ION:
677 unsigned char *mappedAddress;
678 size_t size = hnd->size;
679 hw_module_t *pmodule = NULL;
680 private_module_t *m = NULL;
681
682 if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
683 {
684 m = reinterpret_cast<private_module_t *>(pmodule);
685 }
686 else
687 {
688 AERR("Could not get gralloc module for handle: %p", hnd);
689 retval = -errno;
690 break;
691 }
692
693 /* the test condition is set to m->ion_client <= 0 here, because:
694 * 1) module structure are initialized to 0 if no initial value is applied
695 * 2) a second user process should get a ion fd greater than 0.
696 */
697 if (m->ion_client <= 0)
698 {
699 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
700 m->ion_client = ion_open();
701
702 if (m->ion_client < 0)
703 {
704 AERR("Could not open ion device for handle: %p", hnd);
705 retval = -errno;
706 break;
707 }
708 }
709
710 mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
711
712 if (MAP_FAILED == mappedAddress)
713 {
714 AERR("mmap( share_fd:%d ) failed with %s", hnd->share_fd, strerror(errno));
715 retval = -errno;
716 break;
717 }
718
719 hnd->base = (void *)(uintptr_t(mappedAddress) + hnd->offset);
720 retval = 0;
721 break;
722 }
723
724 return retval;
725 }
726
mali_gralloc_ion_unmap(private_handle_t * hnd)727 void mali_gralloc_ion_unmap(private_handle_t *hnd)
728 {
729 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
730 {
731 case private_handle_t::PRIV_FLAGS_USES_ION:
732 void *base = (void *)hnd->base;
733 size_t size = hnd->size;
734
735 if (munmap(base, size) < 0)
736 {
737 AERR("Could not munmap base:%p size:%zd '%s'", base, size, strerror(errno));
738 }
739
740 break;
741 }
742 }
743
mali_gralloc_ion_device_close(struct hw_device_t * device)744 int mali_gralloc_ion_device_close(struct hw_device_t *device)
745 {
746 #if GRALLOC_USE_GRALLOC1_API == 1
747 gralloc1_device_t *dev = reinterpret_cast<gralloc1_device_t *>(device);
748 #else
749 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
750 #endif
751
752 if (dev)
753 {
754 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
755
756 if (m->ion_client != -1)
757 {
758 if (0 != ion_close(m->ion_client))
759 {
760 AERR("Failed to close ion_client: %d err=%s", m->ion_client, strerror(errno));
761 }
762
763 m->ion_client = -1;
764 }
765
766 delete dev;
767 }
768
769 return 0;
770 }
771