1 /*
2  * Copyright (C) 2010 ARM Limited. All rights reserved.
3  *
4  * Copyright (C) 2008 The Android Open Source Project
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include <cstdlib>
20 #include <string.h>
21 #include <errno.h>
22 #include <pthread.h>
23 #include <sys/types.h>
24 #include <sys/stat.h>
25 #include <fcntl.h>
26 
27 #include <cutils/log.h>
28 #include <cutils/atomic.h>
29 #include <hardware/hardware.h>
30 #include <hardware/gralloc.h>
31 
32 #include <sys/ioctl.h>
33 
34 #include "alloc_device.h"
35 #include "gralloc_priv.h"
36 #include "gralloc_helper.h"
37 #include "framebuffer_device.h"
38 
39 #if GRALLOC_ARM_UMP_MODULE
40 #include <ump/ump.h>
41 #include <ump/ump_ref_drv.h>
42 #endif
43 
44 #if GRALLOC_ARM_DMA_BUF_MODULE
45 #include <ion/ion.h>
46 #include "ion_4.12.h"
47 #include "dma-heap.h"
48 
49 #define ION_SYSTEM	(char*)"ion_system_heap"
50 #define ION_CMA		(char*)"linux,cma"
51 
52 #define DMABUF_SYSTEM	(char*)"system"
53 #define DMABUF_CMA	(char*)"linux,cma"
54 static enum {
55 	INTERFACE_UNKNOWN,
56 	INTERFACE_ION_LEGACY,
57 	INTERFACE_ION_MODERN,
58 	INTERFACE_DMABUF_HEAPS
59 } interface_ver;
60 
61 static int system_heap_id;
62 static int cma_heap_id;
63 #endif
64 
65 #if GRALLOC_SIMULATE_FAILURES
66 #include <cutils/properties.h>
67 
68 /* system property keys for controlling simulated UMP allocation failures */
69 #define PROP_MALI_TEST_GRALLOC_FAIL_FIRST     "mali.test.gralloc.fail_first"
70 #define PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL  "mali.test.gralloc.fail_interval"
71 
__ump_alloc_should_fail()72 static int __ump_alloc_should_fail()
73 {
74 
75 	static unsigned int call_count  = 0;
76 	unsigned int        first_fail  = 0;
77 	int                 fail_period = 0;
78 	int                 fail        = 0;
79 
80 	++call_count;
81 
82 	/* read the system properties that control failure simulation */
83 	{
84 		char prop_value[PROPERTY_VALUE_MAX];
85 
86 		if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0)
87 		{
88 			sscanf(prop_value, "%11u", &first_fail);
89 		}
90 
91 		if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0)
92 		{
93 			sscanf(prop_value, "%11u", &fail_period);
94 		}
95 	}
96 
97 	/* failure simulation is enabled by setting the first_fail property to non-zero */
98 	if (first_fail > 0)
99 	{
100 		LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period);
101 
102 		fail = (call_count == first_fail) ||
103 		       (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period);
104 
105 		if (fail)
106 		{
107 			AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count);
108 		}
109 	}
110 
111 	return fail;
112 }
113 #endif
114 
115 #ifdef FBIOGET_DMABUF
fb_get_framebuffer_dmabuf(private_module_t * m,private_handle_t * hnd)116 static int fb_get_framebuffer_dmabuf(private_module_t *m, private_handle_t *hnd)
117 {
118 	struct fb_dmabuf_export fb_dma_buf;
119 	int res;
120 	res = ioctl(m->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf);
121 
122 	if (res == 0)
123 	{
124 		hnd->share_fd = fb_dma_buf.fd;
125 		return 0;
126 	}
127 	else
128 	{
129 		AINF("FBIOGET_DMABUF ioctl failed(%d). See gralloc_priv.h and the integration manual for vendor framebuffer "
130 		     "integration",
131 		     res);
132 		return -1;
133 	}
134 }
135 #endif
136 
137 #if GRALLOC_ARM_DMA_BUF_MODULE
138 #define DEVPATH "/dev/dma_heap"
dma_heap_open(const char * name)139 int dma_heap_open(const char* name)
140 {
141 	int ret, fd;
142 	char buf[256];
143 
144 	ret = sprintf(buf, "%s/%s", DEVPATH, name);
145 	if (ret < 0) {
146 		AERR("sprintf failed!\n");
147 		return ret;
148 	}
149 
150 	fd = open(buf, O_RDWR);
151 	if (fd < 0)
152 		AERR("open %s failed!\n", buf);
153 	return fd;
154 }
155 
dma_heap_alloc(int fd,size_t len,unsigned int flags,int * dmabuf_fd)156 int dma_heap_alloc(int fd, size_t len, unsigned int flags, int *dmabuf_fd)
157 {
158 	struct dma_heap_allocation_data data = {
159 		.len = len,
160 		.fd_flags = O_RDWR | O_CLOEXEC,
161 		.heap_flags = flags,
162 	};
163 	int ret;
164 
165 	if (dmabuf_fd == NULL)
166 		return -EINVAL;
167 
168 	ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
169 	if (ret < 0)
170 		return ret;
171 	*dmabuf_fd = (int)data.fd;
172 	return ret;
173 }
174 
alloc_ion_fd(int ion_fd,size_t size,unsigned int heap_mask,unsigned int flags,int * shared_fd)175 static int alloc_ion_fd(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *shared_fd)
176 {
177 	int heap;
178 
179 	if (interface_ver == INTERFACE_DMABUF_HEAPS) {
180 		int fd = system_heap_id;
181 		unsigned long flg = 0;
182 		if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
183 			fd = cma_heap_id;
184 
185 		return dma_heap_alloc(fd, size, flg, shared_fd);
186 	}
187 
188 	if (interface_ver == INTERFACE_ION_MODERN) {
189 		heap = 1 << system_heap_id;
190 		if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
191 			heap = 1 << cma_heap_id;
192 	} else {
193 		heap = heap_mask;
194 	}
195 	return ion_alloc_fd(ion_fd, size, 0, heap, flags, shared_fd);
196 }
197 #endif
198 
gralloc_alloc_buffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)199 static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
200 {
201 #if GRALLOC_ARM_DMA_BUF_MODULE
202 	{
203 		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
204 		void *cpu_ptr = MAP_FAILED;
205 		int shared_fd;
206 		int ret;
207 		unsigned int heap_mask;
208 		int lock_state = 0;
209 		int map_mask = 0;
210 
211 		if (usage & GRALLOC_USAGE_PROTECTED) {
212 #if defined(ION_HEAP_SECURE_MASK)
213 			heap_mask = ION_HEAP_SECURE_MASK;
214 #else
215 			AERR("The platform does NOT support protected ION memory.");
216 			return -1;
217 #endif
218 		}
219 		else if (usage & GRALLOC_USAGE_HW_FB) {
220 			heap_mask = ION_HEAP_TYPE_DMA_MASK;
221 		}
222 		else {
223 			heap_mask = ION_HEAP_SYSTEM_MASK;
224 		}
225 
226 		ret = alloc_ion_fd(m->ion_client, size, heap_mask, 0, &shared_fd);
227 		if (ret != 0) {
228 			AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
229 			return -1;
230 		}
231 
232 		if (!(usage & GRALLOC_USAGE_PROTECTED))
233 		{
234 			map_mask = PROT_READ | PROT_WRITE;
235 		}
236 		else
237 		{
238 			map_mask = PROT_WRITE;
239 		}
240 
241 		cpu_ptr = mmap(NULL, size, map_mask, MAP_SHARED, shared_fd, 0);
242 
243 		if (MAP_FAILED == cpu_ptr)
244 		{
245 			AERR("ion_map( %d ) failed", m->ion_client);
246 
247 			close(shared_fd);
248 			return -1;
249 		}
250 
251 		lock_state = private_handle_t::LOCK_STATE_MAPPED;
252 
253 		private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, cpu_ptr, lock_state);
254 
255 		if (NULL != hnd)
256 		{
257 			hnd->share_fd = shared_fd;
258 			*pHandle = hnd;
259 			return 0;
260 		}
261 		else
262 		{
263 			AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
264 		}
265 
266 		close(shared_fd);
267 
268 		ret = munmap(cpu_ptr, size);
269 
270 		if (0 != ret)
271 		{
272 			AERR("munmap failed for base:%p size: %lu", cpu_ptr, (unsigned long)size);
273 		}
274 
275 		return -1;
276 	}
277 #endif
278 
279 #if GRALLOC_ARM_UMP_MODULE
280 	MALI_IGNORE(dev);
281 	{
282 		ump_handle ump_mem_handle;
283 		void *cpu_ptr;
284 		ump_secure_id ump_id;
285 		ump_alloc_constraints constraints;
286 
287 		size = round_up_to_page_size(size);
288 
289 		if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
290 		{
291 			constraints =  UMP_REF_DRV_CONSTRAINT_USE_CACHE;
292 		}
293 		else
294 		{
295 			constraints = UMP_REF_DRV_CONSTRAINT_NONE;
296 		}
297 
298 #ifdef GRALLOC_SIMULATE_FAILURES
299 
300 		/* if the failure condition matches, fail this iteration */
301 		if (__ump_alloc_should_fail())
302 		{
303 			ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
304 		}
305 		else
306 #endif
307 		{
308 			if (usage & GRALLOC_USAGE_PROTECTED)
309 			{
310 				AERR("gralloc_alloc_buffer() does not support to allocate protected UMP memory.");
311 			}
312 			else
313 			{
314 				ump_mem_handle = ump_ref_drv_allocate(size, constraints);
315 
316 				if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
317 				{
318 					cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);
319 
320 					if (NULL != cpu_ptr)
321 					{
322 						ump_id = ump_secure_id_get(ump_mem_handle);
323 
324 						if (UMP_INVALID_SECURE_ID != ump_id)
325 						{
326 							private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, cpu_ptr,
327 							        private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);
328 
329 							if (NULL != hnd)
330 							{
331 								*pHandle = hnd;
332 								return 0;
333 							}
334 							else
335 							{
336 								AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
337 							}
338 						}
339 						else
340 						{
341 							AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
342 						}
343 
344 						ump_mapped_pointer_release(ump_mem_handle);
345 					}
346 					else
347 					{
348 						AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
349 					}
350 
351 					ump_reference_release(ump_mem_handle);
352 				}
353 				else
354 				{
355 					AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
356 				}
357 			}
358 		}
359 
360 		return -1;
361 	}
362 #endif
363 
364 }
365 
366 #ifndef DISABLE_FRAMEBUFFER_HAL
gralloc_alloc_framebuffer_locked(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)367 static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
368 {
369 	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
370 
371 	// allocate the framebuffer
372 	if (m->framebuffer == NULL)
373 	{
374 		// initialize the framebuffer, the framebuffer is mapped once and forever.
375 		int err = init_frame_buffer_locked(m);
376 
377 		if (err < 0)
378 		{
379 			return err;
380 		}
381 	}
382 
383 	uint32_t bufferMask = m->bufferMask;
384 	const uint32_t numBuffers = m->numBuffers;
385 	const size_t bufferSize = m->finfo.line_length * m->info.yres;
386 
387 	if (numBuffers == 1)
388 	{
389 		// If we have only one buffer, we never use page-flipping. Instead,
390 		// we return a regular buffer which will be memcpy'ed to the main
391 		// screen when post is called.
392 		int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
393 		AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres);
394 		return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
395 	}
396 
397 	if (bufferMask >= ((1LU << numBuffers) - 1))
398 	{
399 		// We ran out of buffers, reset bufferMask.
400 		bufferMask = 0;
401 		m->bufferMask = 0;
402 	}
403 
404 	void *vaddr = m->framebuffer->base;
405 
406 	// find a free slot
407 	for (uint32_t i = 0 ; i < numBuffers ; i++)
408 	{
409 		if ((bufferMask & (1LU << i)) == 0)
410 		{
411 			m->bufferMask |= (1LU << i);
412 			break;
413 		}
414 
415 		vaddr = (void *)((uintptr_t)vaddr + bufferSize);
416 	}
417 
418 	// The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory
419 	private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr,
420 	        0, m->framebuffer->fd, (uintptr_t)vaddr - (uintptr_t) m->framebuffer->base, m->framebuffer->fb_paddr);
421 
422 #if GRALLOC_ARM_UMP_MODULE
423 	hnd->ump_id = m->framebuffer->ump_id;
424 
425 	/* create a backing ump memory handle if the framebuffer is exposed as a secure ID */
426 	if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id)
427 	{
428 		hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);
429 
430 		if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle)
431 		{
432 			AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id);
433 		}
434 	}
435 
436 #endif
437 
438 #if GRALLOC_ARM_DMA_BUF_MODULE
439 	{
440 #ifdef FBIOGET_DMABUF
441 		/*
442 		 * Perform allocator specific actions. If these fail we fall back to a regular buffer
443 		 * which will be memcpy'ed to the main screen when fb_post is called.
444 		 */
445 		if (fb_get_framebuffer_dmabuf(m, hnd) == -1)
446 		{
447 			int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
448 
449 			AINF("Fallback to single buffering. Unable to map framebuffer memory to handle:%p", hnd);
450 			return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
451 		}
452 #endif
453 	}
454 
455 	// correct numFds/numInts when there is no dmabuf fd
456 	if (hnd->share_fd < 0)
457 	{
458 		hnd->numFds--;
459 		hnd->numInts++;
460 	}
461 #endif
462 
463 	*pHandle = hnd;
464 
465 	return 0;
466 }
467 
gralloc_alloc_framebuffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)468 static int gralloc_alloc_framebuffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
469 {
470 	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
471 	pthread_mutex_lock(&m->lock);
472 	int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
473 	pthread_mutex_unlock(&m->lock);
474 	return err;
475 }
476 #endif /* DISABLE_FRAMEBUFFER_HAL */
477 
alloc_device_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)478 static int alloc_device_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *pHandle, int *pStride)
479 {
480 	if (!pHandle || !pStride)
481 	{
482 		return -EINVAL;
483 	}
484 
485 	size_t size;
486 	size_t stride;
487 	int bpp = 1;
488 
489 	if (format == HAL_PIXEL_FORMAT_YCrCb_420_SP || format == HAL_PIXEL_FORMAT_YV12
490 	        /* HAL_PIXEL_FORMAT_YCbCr_420_SP, HAL_PIXEL_FORMAT_YCbCr_420_P, HAL_PIXEL_FORMAT_YCbCr_422_I are not defined in Android.
491 	         * To enable Mali DDK EGLImage support for those formats, firstly, you have to add them in Android system/core/include/system/graphics.h.
492 	         * Then, define SUPPORT_LEGACY_FORMAT in the same header file(Mali DDK will also check this definition).
493 	         */
494 #ifdef SUPPORT_LEGACY_FORMAT
495 	        || format == HAL_PIXEL_FORMAT_YCbCr_420_SP || format == HAL_PIXEL_FORMAT_YCbCr_420_P || format == HAL_PIXEL_FORMAT_YCbCr_422_I
496 #endif
497 	   )
498 	{
499 		switch (format)
500 		{
501 			case HAL_PIXEL_FORMAT_YCrCb_420_SP:
502 				stride = GRALLOC_ALIGN(w, 16);
503 				size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
504 				break;
505 
506 			case HAL_PIXEL_FORMAT_YV12:
507 #ifdef SUPPORT_LEGACY_FORMAT
508 			case HAL_PIXEL_FORMAT_YCbCr_420_P:
509 #endif
510 				/*
511 				 * Since Utgard has limitation that "64-byte alignment is enforced on texture and mipmap addresses", here to make sure
512 				 * the v, u plane start addresses are 64-byte aligned.
513 				 */
514 				stride = GRALLOC_ALIGN(w, (h % 8 == 0) ? GRALLOC_ALIGN_BASE_16 :
515 										 ((h % 4 == 0) ? GRALLOC_ALIGN_BASE_64 : GRALLOC_ALIGN_BASE_128));
516 				size = GRALLOC_ALIGN(h, 2) * (stride + GRALLOC_ALIGN(stride / 2, 16));
517 
518 				break;
519 #ifdef SUPPORT_LEGACY_FORMAT
520 
521 			case HAL_PIXEL_FORMAT_YCbCr_420_SP:
522 				stride = GRALLOC_ALIGN(w, 16);
523 				size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
524 				break;
525 
526 			case HAL_PIXEL_FORMAT_YCbCr_422_I:
527 				stride = GRALLOC_ALIGN(w, 16);
528 				size = h * stride * 2;
529 
530 				break;
531 #endif
532 
533 			default:
534 				return -EINVAL;
535 		}
536 	}
537 	else
538 	{
539 
540 		switch (format)
541 		{
542 			case HAL_PIXEL_FORMAT_RGBA_8888:
543 			case HAL_PIXEL_FORMAT_RGBX_8888:
544 			case HAL_PIXEL_FORMAT_BGRA_8888:
545 				bpp = 4;
546 				break;
547 
548 			case HAL_PIXEL_FORMAT_RGB_888:
549 				bpp = 3;
550 				break;
551 
552 			case HAL_PIXEL_FORMAT_RGB_565:
553 #if PLATFORM_SDK_VERSION < 19
554 			case HAL_PIXEL_FORMAT_RGBA_5551:
555 			case HAL_PIXEL_FORMAT_RGBA_4444:
556 #endif
557 				bpp = 2;
558 				break;
559 
560 			case HAL_PIXEL_FORMAT_BLOB:
561 				if (h != 1) {
562 					AERR("Height for HAL_PIXEL_FORMAT_BLOB must be 1. h=%d", h);
563 					return -EINVAL;
564 				}
565 				break;
566 
567 			default:
568 				AERR("The format is not supported yet: format=%d\n",  format);
569 				return -EINVAL;
570 		}
571 
572 		if (format == HAL_PIXEL_FORMAT_BLOB) {
573 			stride = 0; /* No 'rows', it's effectively a long one dimensional array */
574 			size = w;
575 		}else{
576 			size_t bpr = GRALLOC_ALIGN(w * bpp, 64);
577 			size = bpr * h;
578 			stride = bpr / bpp;
579 		}
580 	}
581 
582 	int err;
583 
584 #ifndef DISABLE_FRAMEBUFFER_HAL
585 
586 	if (usage & GRALLOC_USAGE_HW_FB)
587 	{
588 		err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
589 	}
590 	else
591 #endif
592 
593 	{
594 		err = gralloc_alloc_buffer(dev, size, usage, pHandle);
595 	}
596 
597 	if (err < 0)
598 	{
599 		return err;
600 	}
601 
602 	/* match the framebuffer format */
603 	if (usage & GRALLOC_USAGE_HW_FB)
604 	{
605 #ifdef GRALLOC_16_BITS
606 		format = HAL_PIXEL_FORMAT_RGB_565;
607 #else
608 		format = HAL_PIXEL_FORMAT_BGRA_8888;
609 #endif
610 	}
611 
612 	private_handle_t *hnd = (private_handle_t *)*pHandle;
613 	int               private_usage = usage & (GRALLOC_USAGE_PRIVATE_0 |
614 	                                  GRALLOC_USAGE_PRIVATE_1);
615 
616 	switch (private_usage)
617 	{
618 		case 0:
619 			hnd->yuv_info = MALI_YUV_BT601_NARROW;
620 			break;
621 
622 		case GRALLOC_USAGE_PRIVATE_1:
623 			hnd->yuv_info = MALI_YUV_BT601_WIDE;
624 			break;
625 
626 		case GRALLOC_USAGE_PRIVATE_0:
627 			hnd->yuv_info = MALI_YUV_BT709_NARROW;
628 			break;
629 
630 		case (GRALLOC_USAGE_PRIVATE_0 | GRALLOC_USAGE_PRIVATE_1):
631 			hnd->yuv_info = MALI_YUV_BT709_WIDE;
632 			break;
633 	}
634 
635 	hnd->width = w;
636 	hnd->height = h;
637 	hnd->format = format;
638 	hnd->stride = stride;
639 	hnd->byte_stride = GRALLOC_ALIGN(w*bpp,64);
640 	*pStride = stride;
641 	return 0;
642 }
643 
alloc_device_free(alloc_device_t __unused * dev,buffer_handle_t handle)644 static int alloc_device_free(alloc_device_t __unused *dev, buffer_handle_t handle)
645 {
646 	if (private_handle_t::validate(handle) < 0)
647 	{
648 		return -EINVAL;
649 	}
650 
651 	private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle);
652 
653 	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
654 	{
655 #if GRALLOC_ARM_UMP_MODULE
656 
657 		if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
658 		{
659 			ump_reference_release((ump_handle)hnd->ump_mem_handle);
660 		}
661 
662 #endif
663 	}
664 	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
665 	{
666 #if GRALLOC_ARM_UMP_MODULE
667 
668 		/* Buffer might be unregistered so we need to check for invalid ump handle*/
669 		if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
670 		{
671 			ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
672 			ump_reference_release((ump_handle)hnd->ump_mem_handle);
673 		}
674 
675 #else
676 		AERR("Can't free ump memory for handle:%p. Not supported.", hnd);
677 #endif
678 	}
679 	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
680 	{
681 #if GRALLOC_ARM_DMA_BUF_MODULE
682 		/* Buffer might be unregistered so we need to check for invalid ump handle*/
683 		if (0 != hnd->base)
684 		{
685 			if (0 != munmap((void *)hnd->base, hnd->size))
686 			{
687 				AERR("Failed to munmap handle %p", hnd);
688 			}
689 		}
690 
691 		close(hnd->share_fd);
692 
693 		memset((void *)hnd, 0, sizeof(*hnd));
694 #else
695 		AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd);
696 #endif
697 
698 	}
699 
700 	delete hnd;
701 
702 	return 0;
703 }
704 
alloc_device_close(struct hw_device_t * device)705 static int alloc_device_close(struct hw_device_t *device)
706 {
707 	alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
708 
709 	if (dev)
710 	{
711 #if GRALLOC_ARM_DMA_BUF_MODULE
712 		private_module_t *m = reinterpret_cast<private_module_t *>(device);
713 
714 		if (0 != ion_close(m->ion_client))
715 		{
716 			AERR("Failed to close ion_client: %d", m->ion_client);
717 		}
718 
719 		close(m->ion_client);
720 #endif
721 		delete dev;
722 #if GRALLOC_ARM_UMP_MODULE
723 		ump_close(); // Our UMP memory refs will be released automatically here...
724 #endif
725 	}
726 
727 	return 0;
728 }
729 
730 #if GRALLOC_ARM_DMA_BUF_MODULE
find_heap_id(int ion_client,char * name)731 static int find_heap_id(int ion_client, char* name)
732 {
733 	int i, ret, cnt, heap_id = -1;
734 	struct ion_heap_data *data;
735 
736 	ret = ion_query_heap_cnt(ion_client, &cnt);
737 
738 	if (ret)
739 	{
740 		AERR("ion count query failed with %s", strerror(errno));
741 		return -1;
742 	}
743 
744 	data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
745 	if (!data)
746 	{
747 		AERR("Error allocating data %s\n", strerror(errno));
748 		return -1;
749 	}
750 
751 	ret = ion_query_get_heaps(ion_client, cnt, data);
752 	if (ret)
753 	{
754 		AERR("Error querying heaps from ion %s", strerror(errno));
755 	}
756 	else
757 	{
758 		for (i = 0; i < cnt; i++) {
759 			struct ion_heap_data *dat = (struct ion_heap_data *)data;
760 			if (strcmp(dat[i].name, name) == 0) {
761 				heap_id = dat[i].heap_id;
762 				break;
763 			}
764 		}
765 
766 		if (i > cnt)
767 		{
768 			AERR("No System Heap Found amongst %d heaps\n", cnt);
769 			heap_id = -1;
770 		}
771 	}
772 
773 	free(data);
774 	return heap_id;
775 }
776 #endif
777 
initialize_interface(private_module_t * m)778 static int initialize_interface(private_module_t *m)
779 {
780 	int fd;
781 
782 	if (interface_ver != INTERFACE_UNKNOWN)
783 		return 0;
784 
785 	/* test for dma-heaps*/
786 	fd = dma_heap_open(DMABUF_SYSTEM);
787 	if (fd >= 0) {
788 		AINF("Using DMA-BUF Heaps.\n");
789 		interface_ver = INTERFACE_DMABUF_HEAPS;
790 		system_heap_id = fd;
791 		cma_heap_id = dma_heap_open(DMABUF_CMA);
792 		/* Open other dma heaps here */
793 		return 0;
794 	}
795 
796 	/* test for modern vs legacy ION */
797 	m->ion_client = ion_open();
798 	if (m->ion_client < 0) {
799 		AERR("ion_open failed with %s", strerror(errno));
800 		return -1;
801 	}
802 	if (!ion_is_legacy(m->ion_client)) {
803 		system_heap_id = find_heap_id(m->ion_client, ION_SYSTEM);
804 		cma_heap_id = find_heap_id(m->ion_client, ION_CMA);
805 		if (system_heap_id < 0) {
806 			ion_close(m->ion_client);
807 			m->ion_client = -1;
808 			AERR( "ion_open failed: no system heap found" );
809 			return -1;
810 		}
811 		if (cma_heap_id < 0) {
812 			AERR("No cma heap found, falling back to system");
813 			cma_heap_id = system_heap_id;
814 		}
815 		AINF("Using ION Modern interface.\n");
816 		interface_ver = INTERFACE_ION_MODERN;
817 	} else {
818 		AINF("Using ION Legacy interface.\n");
819 		interface_ver = INTERFACE_ION_LEGACY;
820 	}
821 	return 0;
822 }
823 
alloc_device_open(hw_module_t const * module,const char * name,hw_device_t ** device)824 int alloc_device_open(hw_module_t const *module, const char *name, hw_device_t **device)
825 {
826 	MALI_IGNORE(name);
827 	alloc_device_t *dev;
828 
829 	dev = new alloc_device_t;
830 
831 	if (NULL == dev)
832 	{
833 		return -1;
834 	}
835 
836 #if GRALLOC_ARM_UMP_MODULE
837 	ump_result ump_res = ump_open();
838 
839 	if (UMP_OK != ump_res)
840 	{
841 		AERR("UMP open failed with %d", ump_res);
842 		delete dev;
843 		return -1;
844 	}
845 
846 #endif
847 
848 	/* initialize our state here */
849 	memset(dev, 0, sizeof(*dev));
850 
851 	/* initialize the procs */
852 	dev->common.tag = HARDWARE_DEVICE_TAG;
853 	dev->common.version = 0;
854 	dev->common.module = const_cast<hw_module_t *>(module);
855 	dev->common.close = alloc_device_close;
856 	dev->alloc = alloc_device_alloc;
857 	dev->free = alloc_device_free;
858 
859 #if GRALLOC_ARM_DMA_BUF_MODULE
860 	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
861 
862 	if (initialize_interface(m) < 0) {
863 		delete dev;
864 		return -1;
865 	}
866 #endif
867 
868 	*device = &dev->common;
869 
870 	return 0;
871 }
872