1 /*
2 * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
3
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above
10 * copyright notice, this list of conditions and the following
11 * disclaimer in the documentation and/or other materials provided
12 * with the distribution.
13 * * Neither the name of The Linux Foundation nor the names of its
14 * contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <unistd.h>
31
32 #include <algorithm>
33 #include <vector>
34
35 #include <log/log.h>
36
37 #include "gr_utils.h"
38 #include "gr_allocator.h"
39 #include "gr_adreno_info.h"
40 #include "gralloc_priv.h"
41
42 #include "qd_utils.h"
43 #include "qdMetaData.h"
44
45 #define ASTC_BLOCK_SIZE 16
46
47 #ifndef ION_FLAG_CP_PIXEL
48 #define ION_FLAG_CP_PIXEL 0
49 #endif
50
51 #ifndef ION_FLAG_ALLOW_NON_CONTIG
52 #define ION_FLAG_ALLOW_NON_CONTIG 0
53 #endif
54
55 #ifndef ION_FLAG_CP_CAMERA_PREVIEW
56 #define ION_FLAG_CP_CAMERA_PREVIEW 0
57 #endif
58
59 #ifdef MASTER_SIDE_CP
60 #define CP_HEAP_ID ION_SECURE_HEAP_ID
61 #define SD_HEAP_ID ION_SECURE_DISPLAY_HEAP_ID
62 #define ION_CP_FLAGS (ION_SECURE | ION_FLAG_CP_PIXEL)
63 #define ION_SD_FLAGS (ION_SECURE | ION_FLAG_CP_SEC_DISPLAY)
64 #define ION_SC_FLAGS (ION_SECURE | ION_FLAG_CP_CAMERA)
65 #define ION_SC_PREVIEW_FLAGS (ION_SECURE | ION_FLAG_CP_CAMERA_PREVIEW)
66 #else // SLAVE_SIDE_CP
67 #define CP_HEAP_ID ION_CP_MM_HEAP_ID
68 #define SD_HEAP_ID CP_HEAP_ID
69 #define ION_CP_FLAGS (ION_SECURE | ION_FLAG_ALLOW_NON_CONTIG)
70 #define ION_SD_FLAGS ION_SECURE
71 #define ION_SC_FLAGS ION_SECURE
72 #define ION_SC_PREVIEW_FLAGS ION_SECURE
73 #endif
74
75 using std::vector;
76 using std::shared_ptr;
77
78 namespace gralloc1 {
79
Allocator()80 Allocator::Allocator() : ion_allocator_(NULL), adreno_helper_(NULL) {
81 }
82
Init()83 bool Allocator::Init() {
84 ion_allocator_ = new IonAlloc();
85 if (!ion_allocator_->Init()) {
86 return false;
87 }
88
89 adreno_helper_ = new AdrenoMemInfo();
90 adreno_helper_->Init();
91
92 return true;
93 }
94
~Allocator()95 Allocator::~Allocator() {
96 if (ion_allocator_) {
97 delete ion_allocator_;
98 }
99
100 if (adreno_helper_) {
101 delete adreno_helper_;
102 }
103 }
104
AllocateMem(AllocData * alloc_data,gralloc1_producer_usage_t prod_usage,gralloc1_consumer_usage_t cons_usage)105 int Allocator::AllocateMem(AllocData *alloc_data, gralloc1_producer_usage_t prod_usage,
106 gralloc1_consumer_usage_t cons_usage) {
107 int ret;
108 alloc_data->uncached = UseUncached(prod_usage, cons_usage);
109
110 // After this point we should have the right heap set, there is no fallback
111 GetIonHeapInfo(prod_usage, cons_usage, &alloc_data->heap_id, &alloc_data->alloc_type,
112 &alloc_data->flags);
113
114 ret = ion_allocator_->AllocBuffer(alloc_data);
115 if (ret >= 0) {
116 alloc_data->alloc_type |= private_handle_t::PRIV_FLAGS_USES_ION;
117 } else {
118 ALOGE("%s: Failed to allocate buffer - heap: 0x%x flags: 0x%x", __FUNCTION__,
119 alloc_data->heap_id, alloc_data->flags);
120 }
121
122 return ret;
123 }
124
MapBuffer(void ** base,unsigned int size,unsigned int offset,int fd)125 int Allocator::MapBuffer(void **base, unsigned int size, unsigned int offset, int fd) {
126 if (ion_allocator_) {
127 return ion_allocator_->MapBuffer(base, size, offset, fd);
128 }
129
130 return -EINVAL;
131 }
132
ImportBuffer(int fd)133 int Allocator::ImportBuffer(int fd) {
134 if (ion_allocator_) {
135 return ion_allocator_->ImportBuffer(fd);
136 }
137 return -EINVAL;
138 }
139
FreeBuffer(void * base,unsigned int size,unsigned int offset,int fd,int handle)140 int Allocator::FreeBuffer(void *base, unsigned int size, unsigned int offset, int fd,
141 int handle) {
142 if (ion_allocator_) {
143 return ion_allocator_->FreeBuffer(base, size, offset, fd, handle);
144 }
145
146 return -EINVAL;
147 }
148
CleanBuffer(void * base,unsigned int size,unsigned int offset,int handle,int op)149 int Allocator::CleanBuffer(void *base, unsigned int size, unsigned int offset, int handle, int op) {
150 if (ion_allocator_) {
151 return ion_allocator_->CleanBuffer(base, size, offset, handle, op);
152 }
153
154 return -EINVAL;
155 }
156
CheckForBufferSharing(uint32_t num_descriptors,const vector<shared_ptr<BufferDescriptor>> & descriptors,ssize_t * max_index)157 bool Allocator::CheckForBufferSharing(uint32_t num_descriptors,
158 const vector<shared_ptr<BufferDescriptor>>& descriptors,
159 ssize_t *max_index) {
160 unsigned int cur_heap_id = 0, prev_heap_id = 0;
161 unsigned int cur_alloc_type = 0, prev_alloc_type = 0;
162 unsigned int cur_ion_flags = 0, prev_ion_flags = 0;
163 bool cur_uncached = false, prev_uncached = false;
164 unsigned int alignedw, alignedh;
165 unsigned int max_size = 0;
166
167 *max_index = -1;
168 for (uint32_t i = 0; i < num_descriptors; i++) {
169 // Check Cached vs non-cached and all the ION flags
170 cur_uncached = UseUncached(descriptors[i]->GetProducerUsage(),
171 descriptors[i]->GetConsumerUsage());
172 GetIonHeapInfo(descriptors[i]->GetProducerUsage(), descriptors[i]->GetConsumerUsage(),
173 &cur_heap_id, &cur_alloc_type, &cur_ion_flags);
174
175 if (i > 0 && (cur_heap_id != prev_heap_id || cur_alloc_type != prev_alloc_type ||
176 cur_ion_flags != prev_ion_flags)) {
177 return false;
178 }
179
180 // For same format type, find the descriptor with bigger size
181 GetAlignedWidthAndHeight(*descriptors[i], &alignedw, &alignedh);
182 unsigned int size = GetSize(*descriptors[i], alignedw, alignedh);
183 if (max_size < size) {
184 *max_index = INT(i);
185 max_size = size;
186 }
187
188 prev_heap_id = cur_heap_id;
189 prev_uncached = cur_uncached;
190 prev_ion_flags = cur_ion_flags;
191 prev_alloc_type = cur_alloc_type;
192 }
193
194 return true;
195 }
196
GetDataAlignment(int format,gralloc1_producer_usage_t prod_usage,gralloc1_consumer_usage_t cons_usage)197 uint32_t Allocator::GetDataAlignment(int format, gralloc1_producer_usage_t prod_usage,
198 gralloc1_consumer_usage_t cons_usage) {
199 uint32_t align = UINT(getpagesize());
200 if (format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED) {
201 align = SZ_8K;
202 }
203
204 if (prod_usage & GRALLOC1_PRODUCER_USAGE_PROTECTED) {
205 if ((prod_usage & GRALLOC1_PRODUCER_USAGE_CAMERA) ||
206 (cons_usage & GRALLOC1_CONSUMER_USAGE_PRIVATE_SECURE_DISPLAY)) {
207 // The alignment here reflects qsee mmu V7L/V8L requirement
208 align = SZ_2M;
209 } else {
210 align = SECURE_ALIGN;
211 }
212 }
213
214 return align;
215 }
216
217 // helper function
GetSize(const BufferDescriptor & descriptor,unsigned int alignedw,unsigned int alignedh)218 unsigned int Allocator::GetSize(const BufferDescriptor &descriptor, unsigned int alignedw,
219 unsigned int alignedh) {
220 unsigned int size = 0;
221 int format = descriptor.GetFormat();
222 int width = descriptor.GetWidth();
223 int height = descriptor.GetHeight();
224 int layer_count = descriptor.GetLayerCount();
225 gralloc1_producer_usage_t prod_usage = descriptor.GetProducerUsage();
226 gralloc1_consumer_usage_t cons_usage = descriptor.GetConsumerUsage();
227
228 if (IsUBwcEnabled(format, prod_usage, cons_usage)) {
229 size = GetUBwcSize(width, height, format, alignedw, alignedh);
230 } else if (IsUncompressedRGBFormat(format)) {
231 uint32_t bpp = GetBppForUncompressedRGB(format);
232 size = alignedw * alignedh * bpp;
233 } else if (IsCompressedRGBFormat(format)) {
234 size = alignedw * alignedh * ASTC_BLOCK_SIZE;
235 } else {
236 switch (format) {
237 case HAL_PIXEL_FORMAT_RAW16:
238 size = alignedw * alignedh * 2;
239 break;
240 case HAL_PIXEL_FORMAT_RAW10:
241 case HAL_PIXEL_FORMAT_RAW12:
242 size = ALIGN(alignedw * alignedh, SIZE_4K);
243 break;
244 case HAL_PIXEL_FORMAT_Y8:
245 case HAL_PIXEL_FORMAT_RAW8:
246 size = alignedw * alignedh * 1;
247 break;
248
249 // adreno formats
250 case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO: // NV21
251 size = ALIGN(alignedw * alignedh, SIZE_4K);
252 size += (unsigned int)ALIGN(2 * ALIGN(width / 2, 32) * ALIGN(height / 2, 32), SIZE_4K);
253 break;
254 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: // NV12
255 // The chroma plane is subsampled,
256 // but the pitch in bytes is unchanged
257 // The GPU needs 4K alignment, but the video decoder needs 8K
258 size = ALIGN(alignedw * alignedh, SIZE_8K);
259 size += ALIGN(alignedw * (unsigned int)ALIGN(height / 2, 32), SIZE_8K);
260 break;
261 case HAL_PIXEL_FORMAT_YV12:
262 if ((format == HAL_PIXEL_FORMAT_YV12) && ((width & 1) || (height & 1))) {
263 ALOGE("w or h is odd for the YV12 format");
264 return 0;
265 }
266 size = alignedw * alignedh + (ALIGN(alignedw / 2, 16) * (alignedh / 2)) * 2;
267 size = ALIGN(size, (unsigned int)SIZE_4K);
268 break;
269 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
270 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
271 size = ALIGN((alignedw * alignedh) + (alignedw * alignedh) / 2 + 1, SIZE_4K);
272 break;
273 case HAL_PIXEL_FORMAT_YCbCr_420_P010:
274 size = ALIGN((alignedw * alignedh * 2) + (alignedw * alignedh) + 1, SIZE_4K);
275 break;
276 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
277 case HAL_PIXEL_FORMAT_YCrCb_422_SP:
278 case HAL_PIXEL_FORMAT_YCbCr_422_I:
279 case HAL_PIXEL_FORMAT_YCrCb_422_I:
280 if (width & 1) {
281 ALOGE("width is odd for the YUV422_SP format");
282 return 0;
283 }
284 size = ALIGN(alignedw * alignedh * 2, SIZE_4K);
285 break;
286 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
287 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
288 size = VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
289 break;
290 case HAL_PIXEL_FORMAT_YCrCb_420_SP_VENUS:
291 size = VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
292 break;
293 case HAL_PIXEL_FORMAT_BLOB:
294 case HAL_PIXEL_FORMAT_RAW_OPAQUE:
295 if (height != 1) {
296 ALOGE("%s: Buffers with HAL_PIXEL_FORMAT_BLOB must have height 1 ", __FUNCTION__);
297 return 0;
298 }
299 size = (unsigned int)width;
300 break;
301 case HAL_PIXEL_FORMAT_NV21_ZSL:
302 size = ALIGN((alignedw * alignedh) + (alignedw * alignedh) / 2, SIZE_4K);
303 break;
304 default:
305 ALOGE("%s: Unrecognized pixel format: 0x%x", __FUNCTION__, format);
306 return 0;
307 }
308 }
309 uint32_t align = GetDataAlignment(format, prod_usage, cons_usage);
310 size = ALIGN(size, align) * layer_count;
311 return size;
312 }
313
GetBufferSizeAndDimensions(int width,int height,int format,unsigned int * size,unsigned int * alignedw,unsigned int * alignedh)314 void Allocator::GetBufferSizeAndDimensions(int width, int height, int format, unsigned int *size,
315 unsigned int *alignedw, unsigned int *alignedh) {
316 BufferDescriptor descriptor = BufferDescriptor(width, height, format);
317 GetAlignedWidthAndHeight(descriptor, alignedw, alignedh);
318
319 *size = GetSize(descriptor, *alignedw, *alignedh);
320 }
321
GetBufferSizeAndDimensions(const BufferDescriptor & descriptor,unsigned int * size,unsigned int * alignedw,unsigned int * alignedh)322 void Allocator::GetBufferSizeAndDimensions(const BufferDescriptor &descriptor, unsigned int *size,
323 unsigned int *alignedw, unsigned int *alignedh) {
324 GetAlignedWidthAndHeight(descriptor, alignedw, alignedh);
325
326 *size = GetSize(descriptor, *alignedw, *alignedh);
327 }
328
GetYuvUbwcSPPlaneInfo(uint64_t base,uint32_t width,uint32_t height,int color_format,struct android_ycbcr * ycbcr)329 void Allocator::GetYuvUbwcSPPlaneInfo(uint64_t base, uint32_t width, uint32_t height,
330 int color_format, struct android_ycbcr *ycbcr) {
331 // UBWC buffer has these 4 planes in the following sequence:
332 // Y_Meta_Plane, Y_Plane, UV_Meta_Plane, UV_Plane
333 unsigned int y_meta_stride, y_meta_height, y_meta_size;
334 unsigned int y_stride, y_height, y_size;
335 unsigned int c_meta_stride, c_meta_height, c_meta_size;
336 unsigned int alignment = 4096;
337
338 y_meta_stride = VENUS_Y_META_STRIDE(color_format, INT(width));
339 y_meta_height = VENUS_Y_META_SCANLINES(color_format, INT(height));
340 y_meta_size = ALIGN((y_meta_stride * y_meta_height), alignment);
341
342 y_stride = VENUS_Y_STRIDE(color_format, INT(width));
343 y_height = VENUS_Y_SCANLINES(color_format, INT(height));
344 y_size = ALIGN((y_stride * y_height), alignment);
345
346 c_meta_stride = VENUS_UV_META_STRIDE(color_format, INT(width));
347 c_meta_height = VENUS_UV_META_SCANLINES(color_format, INT(height));
348 c_meta_size = ALIGN((c_meta_stride * c_meta_height), alignment);
349
350 ycbcr->y = reinterpret_cast<void *>(base + y_meta_size);
351 ycbcr->cb = reinterpret_cast<void *>(base + y_meta_size + y_size + c_meta_size);
352 ycbcr->cr = reinterpret_cast<void *>(base + y_meta_size + y_size + c_meta_size + 1);
353 ycbcr->ystride = y_stride;
354 ycbcr->cstride = VENUS_UV_STRIDE(color_format, INT(width));
355 }
356
GetYuvSPPlaneInfo(uint64_t base,uint32_t width,uint32_t height,uint32_t bpp,struct android_ycbcr * ycbcr)357 void Allocator::GetYuvSPPlaneInfo(uint64_t base, uint32_t width, uint32_t height, uint32_t bpp,
358 struct android_ycbcr *ycbcr) {
359 unsigned int ystride, cstride;
360
361 ystride = cstride = UINT(width) * bpp;
362 ycbcr->y = reinterpret_cast<void *>(base);
363 ycbcr->cb = reinterpret_cast<void *>(base + ystride * UINT(height));
364 ycbcr->cr = reinterpret_cast<void *>(base + ystride * UINT(height) + 1);
365 ycbcr->ystride = ystride;
366 ycbcr->cstride = cstride;
367 ycbcr->chroma_step = 2 * bpp;
368 }
369
GetYUVPlaneInfo(const private_handle_t * hnd,struct android_ycbcr * ycbcr)370 int Allocator::GetYUVPlaneInfo(const private_handle_t *hnd, struct android_ycbcr *ycbcr) {
371 int err = 0;
372 uint32_t width = UINT(hnd->width);
373 uint32_t height = UINT(hnd->height);
374 int format = hnd->format;
375 gralloc1_producer_usage_t prod_usage = hnd->GetProducerUsage();
376 gralloc1_consumer_usage_t cons_usage = hnd->GetConsumerUsage();
377 unsigned int ystride, cstride;
378
379 memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
380
381 // Check if UBWC buffer has been rendered in linear format.
382 int linear_format = 0;
383 if (getMetaData(const_cast<private_handle_t*>(hnd), GET_LINEAR_FORMAT, &linear_format) == 0) {
384 format = linear_format;
385 }
386
387 // Check metadata if the geometry has been updated.
388 BufferDim_t buffer_dim = {};
389 if (getMetaData(const_cast<private_handle_t*>(hnd), GET_BUFFER_GEOMETRY, &buffer_dim) == 0) {
390 int usage = 0;
391
392 if (hnd->flags & private_handle_t::PRIV_FLAGS_UBWC_ALIGNED) {
393 usage = GRALLOC1_PRODUCER_USAGE_PRIVATE_ALLOC_UBWC;
394 }
395
396 BufferDescriptor descriptor =
397 BufferDescriptor(buffer_dim.sliceWidth, buffer_dim.sliceHeight, format,
398 prod_usage, cons_usage);
399 GetAlignedWidthAndHeight(descriptor, &width, &height);
400 }
401
402 // Get the chroma offsets from the handle width/height. We take advantage
403 // of the fact the width _is_ the stride
404 switch (format) {
405 // Semiplanar
406 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
407 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
408 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
409 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
410 // Same as YCbCr_420_SP_VENUS
411 GetYuvSPPlaneInfo(hnd->base, width, height, 1, ycbcr);
412 break;
413
414 case HAL_PIXEL_FORMAT_YCbCr_420_P010:
415 GetYuvSPPlaneInfo(hnd->base, width, height, 2, ycbcr);
416 break;
417
418 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC:
419 GetYuvUbwcSPPlaneInfo(hnd->base, width, height, COLOR_FMT_NV12_UBWC, ycbcr);
420 ycbcr->chroma_step = 2;
421 break;
422
423 case HAL_PIXEL_FORMAT_YCbCr_420_TP10_UBWC:
424 GetYuvUbwcSPPlaneInfo(hnd->base, width, height, COLOR_FMT_NV12_BPP10_UBWC, ycbcr);
425 ycbcr->chroma_step = 3;
426 break;
427
428 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
429 case HAL_PIXEL_FORMAT_YCrCb_422_SP:
430 case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:
431 case HAL_PIXEL_FORMAT_YCrCb_420_SP_VENUS:
432 case HAL_PIXEL_FORMAT_NV21_ZSL:
433 case HAL_PIXEL_FORMAT_RAW16:
434 case HAL_PIXEL_FORMAT_RAW10:
435 case HAL_PIXEL_FORMAT_RAW8:
436 GetYuvSPPlaneInfo(hnd->base, width, height, 1, ycbcr);
437 std::swap(ycbcr->cb, ycbcr->cr);
438 break;
439
440 // Planar
441 case HAL_PIXEL_FORMAT_YV12:
442 ystride = width;
443 cstride = ALIGN(width / 2, 16);
444 ycbcr->y = reinterpret_cast<void *>(hnd->base);
445 ycbcr->cr = reinterpret_cast<void *>(hnd->base + ystride * height);
446 ycbcr->cb = reinterpret_cast<void *>(hnd->base + ystride * height + cstride * height / 2);
447 ycbcr->ystride = ystride;
448 ycbcr->cstride = cstride;
449 ycbcr->chroma_step = 1;
450 break;
451
452 // Unsupported formats
453 case HAL_PIXEL_FORMAT_YCbCr_422_I:
454 case HAL_PIXEL_FORMAT_YCrCb_422_I:
455 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
456 default:
457 ALOGD("%s: Invalid format passed: 0x%x", __FUNCTION__, format);
458 err = -EINVAL;
459 }
460
461 return err;
462 }
463
GetImplDefinedFormat(gralloc1_producer_usage_t prod_usage,gralloc1_consumer_usage_t cons_usage,int format)464 int Allocator::GetImplDefinedFormat(gralloc1_producer_usage_t prod_usage,
465 gralloc1_consumer_usage_t cons_usage, int format) {
466 int gr_format = format;
467
468 // If input format is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED then based on
469 // the usage bits, gralloc assigns a format.
470 if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED ||
471 format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
472 if (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_ALLOC_UBWC) {
473 gr_format = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC;
474 } else if (cons_usage & GRALLOC1_CONSUMER_USAGE_VIDEO_ENCODER) {
475 gr_format = HAL_PIXEL_FORMAT_NV12_ENCODEABLE; // NV12
476 } else if (cons_usage & GRALLOC1_CONSUMER_USAGE_CAMERA) {
477 if (prod_usage & GRALLOC1_PRODUCER_USAGE_CAMERA) {
478 // Assumed ZSL if both producer and consumer camera flags set
479 gr_format = HAL_PIXEL_FORMAT_NV21_ZSL; // NV21
480 } else {
481 gr_format = HAL_PIXEL_FORMAT_YCrCb_420_SP; // NV21
482 }
483 } else if (prod_usage & GRALLOC1_PRODUCER_USAGE_CAMERA) {
484 if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
485 gr_format = HAL_PIXEL_FORMAT_NV21_ZSL; // NV21
486 } else {
487 gr_format = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS; // NV12 preview
488 }
489 } else if (cons_usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER) {
490 // XXX: If we still haven't set a format, default to RGBA8888
491 gr_format = HAL_PIXEL_FORMAT_RGBA_8888;
492 } else if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
493 // If no other usage flags are detected, default the
494 // flexible YUV format to NV21_ZSL
495 gr_format = HAL_PIXEL_FORMAT_NV21_ZSL;
496 }
497 }
498
499 return gr_format;
500 }
501
502 // Explicitly defined UBWC formats
IsUBwcFormat(int format)503 bool Allocator::IsUBwcFormat(int format) {
504 switch (format) {
505 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC:
506 case HAL_PIXEL_FORMAT_YCbCr_420_TP10_UBWC:
507 return true;
508 default:
509 return false;
510 }
511 }
512
IsUBwcSupported(int format)513 bool Allocator::IsUBwcSupported(int format) {
514 // Existing HAL formats with UBWC support
515 switch (format) {
516 case HAL_PIXEL_FORMAT_BGR_565:
517 case HAL_PIXEL_FORMAT_RGBA_8888:
518 case HAL_PIXEL_FORMAT_RGBX_8888:
519 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
520 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
521 case HAL_PIXEL_FORMAT_RGBA_1010102:
522 case HAL_PIXEL_FORMAT_RGBX_1010102:
523 return true;
524 default:
525 break;
526 }
527
528 return false;
529 }
530
531 /* The default policy is to return cached buffers unless the client explicity
532 * sets the PRIVATE_UNCACHED flag or indicates that the buffer will be rarely
533 * read or written in software. */
534 // TODO(user) : As of now relying only on producer usage
UseUncached(gralloc1_producer_usage_t prod_usage,gralloc1_consumer_usage_t cons_usage)535 bool Allocator::UseUncached(gralloc1_producer_usage_t prod_usage,
536 gralloc1_consumer_usage_t cons_usage) {
537 if ((prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_UNCACHED) ||
538 (prod_usage & GRALLOC1_PRODUCER_USAGE_PROTECTED)) {
539 return true;
540 }
541
542 // CPU read rarely
543 if ((prod_usage & GRALLOC1_PRODUCER_USAGE_CPU_READ_OFTEN)
544 == GRALLOC1_PRODUCER_USAGE_CPU_READ) {
545 return true;
546 }
547
548 // CPU write rarely
549 if ((prod_usage & GRALLOC1_PRODUCER_USAGE_CPU_WRITE_OFTEN)
550 == GRALLOC1_PRODUCER_USAGE_CPU_WRITE) {
551 return true;
552 }
553
554 if ((prod_usage & GRALLOC1_PRODUCER_USAGE_SENSOR_DIRECT_DATA) ||
555 (cons_usage & GRALLOC1_CONSUMER_USAGE_GPU_DATA_BUFFER)) {
556 return true;
557 }
558
559 return false;
560 }
561
GetIonHeapInfo(gralloc1_producer_usage_t prod_usage,gralloc1_consumer_usage_t cons_usage,unsigned int * ion_heap_id,unsigned int * alloc_type,unsigned int * ion_flags)562 void Allocator::GetIonHeapInfo(gralloc1_producer_usage_t prod_usage,
563 gralloc1_consumer_usage_t cons_usage, unsigned int *ion_heap_id,
564 unsigned int *alloc_type, unsigned int *ion_flags) {
565 unsigned int heap_id = 0;
566 unsigned int type = 0;
567 uint32_t flags = 0;
568 if (prod_usage & GRALLOC1_PRODUCER_USAGE_PROTECTED) {
569 if (cons_usage & GRALLOC1_CONSUMER_USAGE_PRIVATE_SECURE_DISPLAY) {
570 heap_id = ION_HEAP(SD_HEAP_ID);
571 /*
572 * There is currently no flag in ION for Secure Display
573 * VM. Please add it to the define once available.
574 */
575 flags |= UINT(ION_SD_FLAGS);
576 } else if (prod_usage & GRALLOC1_PRODUCER_USAGE_CAMERA) {
577 heap_id = ION_HEAP(SD_HEAP_ID);
578 if (cons_usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER) {
579 flags |= UINT(ION_SC_PREVIEW_FLAGS);
580 } else {
581 flags |= UINT(ION_SC_FLAGS);
582 }
583 } else {
584 heap_id = ION_HEAP(CP_HEAP_ID);
585 flags |= UINT(ION_CP_FLAGS);
586 }
587 } else if (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_MM_HEAP) {
588 // MM Heap is exclusively a secure heap.
589 // If it is used for non secure cases, fallback to IOMMU heap
590 ALOGW("MM_HEAP cannot be used as an insecure heap. Using system heap instead!!");
591 heap_id |= ION_HEAP(ION_SYSTEM_HEAP_ID);
592 }
593
594 if (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_CAMERA_HEAP) {
595 heap_id |= ION_HEAP(ION_CAMERA_HEAP_ID);
596 }
597
598 if (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_ADSP_HEAP ||
599 prod_usage & GRALLOC1_PRODUCER_USAGE_SENSOR_DIRECT_DATA) {
600 heap_id |= ION_HEAP(ION_ADSP_HEAP_ID);
601 }
602
603 if (flags & UINT(ION_SECURE)) {
604 type |= private_handle_t::PRIV_FLAGS_SECURE_BUFFER;
605 }
606
607 // if no ion heap flags are set, default to system heap
608 if (!heap_id) {
609 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
610 }
611
612 *alloc_type = type;
613 *ion_flags = flags;
614 *ion_heap_id = heap_id;
615
616 return;
617 }
618
IsUBwcEnabled(int format,gralloc1_producer_usage_t prod_usage,gralloc1_consumer_usage_t cons_usage)619 bool Allocator::IsUBwcEnabled(int format, gralloc1_producer_usage_t prod_usage,
620 gralloc1_consumer_usage_t cons_usage) {
621 // Allow UBWC, if client is using an explicitly defined UBWC pixel format.
622 if (IsUBwcFormat(format)) {
623 return true;
624 }
625
626 if ((prod_usage & GRALLOC1_PRODUCER_USAGE_SENSOR_DIRECT_DATA) ||
627 (cons_usage & GRALLOC1_CONSUMER_USAGE_GPU_DATA_BUFFER)) {
628 return false;
629 }
630
631 // Allow UBWC, if an OpenGL client sets UBWC usage flag and GPU plus MDP
632 // support the format. OR if a non-OpenGL client like Rotator, sets UBWC
633 // usage flag and MDP supports the format.
634 if (IsUBwcSupported(format)) {
635 bool enable = (prod_usage & GRALLOC1_PRODUCER_USAGE_PRIVATE_ALLOC_UBWC) |
636 (cons_usage & GRALLOC1_CONSUMER_USAGE_CLIENT_TARGET);
637 // Query GPU for UBWC only if buffer is intended to be used by GPU.
638 if (enable && ((cons_usage & GRALLOC1_CONSUMER_USAGE_GPU_TEXTURE) ||
639 (prod_usage & GRALLOC1_PRODUCER_USAGE_GPU_RENDER_TARGET))) {
640 enable = adreno_helper_->IsUBWCSupportedByGPU(format);
641 }
642
643 // Allow UBWC, only if CPU usage flags are not set
644 if (enable && !(CpuCanAccess(prod_usage, cons_usage))) {
645 return true;
646 }
647 }
648
649 return false;
650 }
651
GetYuvUBwcWidthAndHeight(int width,int height,int format,unsigned int * aligned_w,unsigned int * aligned_h)652 void Allocator::GetYuvUBwcWidthAndHeight(int width, int height, int format, unsigned int *aligned_w,
653 unsigned int *aligned_h) {
654 switch (format) {
655 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
656 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
657 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC:
658 *aligned_w = VENUS_Y_STRIDE(COLOR_FMT_NV12_UBWC, width);
659 *aligned_h = VENUS_Y_SCANLINES(COLOR_FMT_NV12_UBWC, height);
660 break;
661 case HAL_PIXEL_FORMAT_YCbCr_420_TP10_UBWC:
662 // The macro returns the stride which is 4/3 times the width, hence * 3/4
663 *aligned_w = (VENUS_Y_STRIDE(COLOR_FMT_NV12_BPP10_UBWC, width) * 3) / 4;
664 *aligned_h = VENUS_Y_SCANLINES(COLOR_FMT_NV12_BPP10_UBWC, height);
665 break;
666 default:
667 ALOGE("%s: Unsupported pixel format: 0x%x", __FUNCTION__, format);
668 *aligned_w = 0;
669 *aligned_h = 0;
670 break;
671 }
672 }
673
GetRgbUBwcBlockSize(uint32_t bpp,int * block_width,int * block_height)674 void Allocator::GetRgbUBwcBlockSize(uint32_t bpp, int *block_width, int *block_height) {
675 *block_width = 0;
676 *block_height = 0;
677
678 switch (bpp) {
679 case 2:
680 case 4:
681 *block_width = 16;
682 *block_height = 4;
683 break;
684 case 8:
685 *block_width = 8;
686 *block_height = 4;
687 break;
688 case 16:
689 *block_width = 4;
690 *block_height = 4;
691 break;
692 default:
693 ALOGE("%s: Unsupported bpp: %d", __FUNCTION__, bpp);
694 break;
695 }
696 }
697
GetRgbUBwcMetaBufferSize(int width,int height,uint32_t bpp)698 unsigned int Allocator::GetRgbUBwcMetaBufferSize(int width, int height, uint32_t bpp) {
699 unsigned int size = 0;
700 int meta_width, meta_height;
701 int block_width, block_height;
702
703 GetRgbUBwcBlockSize(bpp, &block_width, &block_height);
704 if (!block_width || !block_height) {
705 ALOGE("%s: Unsupported bpp: %d", __FUNCTION__, bpp);
706 return size;
707 }
708
709 // Align meta buffer height to 16 blocks
710 meta_height = ALIGN(((height + block_height - 1) / block_height), 16);
711
712 // Align meta buffer width to 64 blocks
713 meta_width = ALIGN(((width + block_width - 1) / block_width), 64);
714
715 // Align meta buffer size to 4K
716 size = (unsigned int)ALIGN((meta_width * meta_height), 4096);
717
718 return size;
719 }
720
GetUBwcSize(int width,int height,int format,unsigned int alignedw,unsigned int alignedh)721 unsigned int Allocator::GetUBwcSize(int width, int height, int format, unsigned int alignedw,
722 unsigned int alignedh) {
723 unsigned int size = 0;
724 uint32_t bpp = 0;
725 switch (format) {
726 case HAL_PIXEL_FORMAT_BGR_565:
727 case HAL_PIXEL_FORMAT_RGBA_8888:
728 case HAL_PIXEL_FORMAT_RGBX_8888:
729 case HAL_PIXEL_FORMAT_RGBA_1010102:
730 case HAL_PIXEL_FORMAT_RGBX_1010102:
731 bpp = GetBppForUncompressedRGB(format);
732 size = alignedw * alignedh * bpp;
733 size += GetRgbUBwcMetaBufferSize(width, height, bpp);
734 break;
735 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
736 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
737 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC:
738 size = VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
739 break;
740 case HAL_PIXEL_FORMAT_YCbCr_420_TP10_UBWC:
741 size = VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
742 break;
743 default:
744 ALOGE("%s: Unsupported pixel format: 0x%x", __FUNCTION__, format);
745 break;
746 }
747
748 return size;
749 }
750
GetRgbDataAddress(private_handle_t * hnd,void ** rgb_data)751 int Allocator::GetRgbDataAddress(private_handle_t *hnd, void **rgb_data) {
752 int err = 0;
753
754 // This api is for RGB* formats
755 if (!gralloc1::IsUncompressedRGBFormat(hnd->format)) {
756 return -EINVAL;
757 }
758
759 // linear buffer, nothing to do further
760 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_UBWC_ALIGNED)) {
761 *rgb_data = reinterpret_cast<void *>(hnd->base);
762 return err;
763 }
764
765 unsigned int meta_size = 0;
766 uint32_t bpp = GetBppForUncompressedRGB(hnd->format);
767 switch (hnd->format) {
768 case HAL_PIXEL_FORMAT_BGR_565:
769 case HAL_PIXEL_FORMAT_RGBA_8888:
770 case HAL_PIXEL_FORMAT_RGBX_8888:
771 case HAL_PIXEL_FORMAT_RGBA_1010102:
772 case HAL_PIXEL_FORMAT_RGBX_1010102:
773 meta_size = GetRgbUBwcMetaBufferSize(hnd->width, hnd->height, bpp);
774 break;
775 default:
776 ALOGE("%s:Unsupported RGB format: 0x%x", __FUNCTION__, hnd->format);
777 err = -EINVAL;
778 break;
779 }
780 *rgb_data = reinterpret_cast<void *>(hnd->base + meta_size);
781
782 return err;
783 }
784
GetAlignedWidthAndHeight(const BufferDescriptor & descriptor,unsigned int * alignedw,unsigned int * alignedh)785 void Allocator::GetAlignedWidthAndHeight(const BufferDescriptor &descriptor, unsigned int *alignedw,
786 unsigned int *alignedh) {
787 int width = descriptor.GetWidth();
788 int height = descriptor.GetHeight();
789 int format = descriptor.GetFormat();
790 gralloc1_producer_usage_t prod_usage = descriptor.GetProducerUsage();
791 gralloc1_consumer_usage_t cons_usage = descriptor.GetConsumerUsage();
792
793 // Currently surface padding is only computed for RGB* surfaces.
794 bool ubwc_enabled = IsUBwcEnabled(format, prod_usage, cons_usage);
795 int tile = ubwc_enabled;
796
797 if (IsUncompressedRGBFormat(format)) {
798 adreno_helper_->AlignUnCompressedRGB(width, height, format, tile, alignedw, alignedh);
799 return;
800 }
801
802 if (ubwc_enabled) {
803 GetYuvUBwcWidthAndHeight(width, height, format, alignedw, alignedh);
804 return;
805 }
806
807 if (IsCompressedRGBFormat(format)) {
808 adreno_helper_->AlignCompressedRGB(width, height, format, alignedw, alignedh);
809 return;
810 }
811
812 int aligned_w = width;
813 int aligned_h = height;
814 unsigned int alignment = 32;
815
816 // Below should be only YUV family
817 switch (format) {
818 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
819 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
820 alignment = adreno_helper_->GetGpuPixelAlignment();
821 aligned_w = ALIGN(width, alignment);
822 break;
823 case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:
824 aligned_w = ALIGN(width, alignment);
825 break;
826 case HAL_PIXEL_FORMAT_RAW16:
827 aligned_w = ALIGN(width, 16);
828 break;
829 case HAL_PIXEL_FORMAT_RAW12:
830 aligned_w = ALIGN(width * 12 / 8, 8);
831 break;
832 case HAL_PIXEL_FORMAT_RAW10:
833 {
834 const unsigned int gpu_alignment =
835 adreno_helper_->GetGpuPixelAlignment();
836 // gpu_alignment can return 1. Make sure it's at least 8.
837 const unsigned int raw10_alignment = std::max(gpu_alignment, 8u);
838 aligned_w = ALIGN(width * 10 / 8, raw10_alignment);
839 }
840 break;
841 case HAL_PIXEL_FORMAT_RAW8:
842 aligned_w = ALIGN(width, 8);
843 break;
844 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
845 aligned_w = ALIGN(width, 128);
846 break;
847 case HAL_PIXEL_FORMAT_YV12:
848 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
849 case HAL_PIXEL_FORMAT_YCrCb_422_SP:
850 case HAL_PIXEL_FORMAT_YCbCr_422_I:
851 case HAL_PIXEL_FORMAT_YCrCb_422_I:
852 case HAL_PIXEL_FORMAT_YCbCr_420_P010:
853 aligned_w = ALIGN(width, 16);
854 break;
855 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
856 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
857 aligned_w = INT(VENUS_Y_STRIDE(COLOR_FMT_NV12, width));
858 aligned_h = INT(VENUS_Y_SCANLINES(COLOR_FMT_NV12, height));
859 break;
860 case HAL_PIXEL_FORMAT_YCrCb_420_SP_VENUS:
861 aligned_w = INT(VENUS_Y_STRIDE(COLOR_FMT_NV21, width));
862 aligned_h = INT(VENUS_Y_SCANLINES(COLOR_FMT_NV21, height));
863 break;
864 case HAL_PIXEL_FORMAT_BLOB:
865 case HAL_PIXEL_FORMAT_RAW_OPAQUE:
866 break;
867 case HAL_PIXEL_FORMAT_NV21_ZSL:
868 case HAL_PIXEL_FORMAT_Y8:
869 aligned_w = ALIGN(width, 64);
870 aligned_h = ALIGN(height, 64);
871 break;
872 default:
873 break;
874 }
875
876 *alignedw = (unsigned int)aligned_w;
877 *alignedh = (unsigned int)aligned_h;
878 }
879
880 } // namespace gralloc1
881