1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18
19 #define LOG_TAG "BufferQueueProducer"
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21 //#define LOG_NDEBUG 0
22
23 #if DEBUG_ONLY_CODE
24 #define VALIDATE_CONSISTENCY() do { mCore->validateConsistencyLocked(); } while (0)
25 #else
26 #define VALIDATE_CONSISTENCY()
27 #endif
28
29 #define EGL_EGLEXT_PROTOTYPES
30
31 #include <binder/IPCThreadState.h>
32 #include <gui/BufferItem.h>
33 #include <gui/BufferQueueCore.h>
34 #include <gui/BufferQueueProducer.h>
35 #include <gui/GLConsumer.h>
36 #include <gui/IConsumerListener.h>
37 #include <gui/IProducerListener.h>
38 #include <private/gui/BufferQueueThreadState.h>
39
40 #include <utils/Log.h>
41 #include <utils/Trace.h>
42
43 #include <system/window.h>
44
45 namespace android {
46
47 static constexpr uint32_t BQ_LAYER_COUNT = 1;
48
BufferQueueProducer(const sp<BufferQueueCore> & core,bool consumerIsSurfaceFlinger)49 BufferQueueProducer::BufferQueueProducer(const sp<BufferQueueCore>& core,
50 bool consumerIsSurfaceFlinger) :
51 mCore(core),
52 mSlots(core->mSlots),
53 mConsumerName(),
54 mStickyTransform(0),
55 mConsumerIsSurfaceFlinger(consumerIsSurfaceFlinger),
56 mLastQueueBufferFence(Fence::NO_FENCE),
57 mLastQueuedTransform(0),
58 mCallbackMutex(),
59 mNextCallbackTicket(0),
60 mCurrentCallbackTicket(0),
61 mCallbackCondition(),
62 mDequeueTimeout(-1),
63 mDequeueWaitingForAllocation(false) {}
64
~BufferQueueProducer()65 BufferQueueProducer::~BufferQueueProducer() {}
66
requestBuffer(int slot,sp<GraphicBuffer> * buf)67 status_t BufferQueueProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
68 ATRACE_CALL();
69 BQ_LOGV("requestBuffer: slot %d", slot);
70 std::lock_guard<std::mutex> lock(mCore->mMutex);
71
72 if (mCore->mIsAbandoned) {
73 BQ_LOGE("requestBuffer: BufferQueue has been abandoned");
74 return NO_INIT;
75 }
76
77 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
78 BQ_LOGE("requestBuffer: BufferQueue has no connected producer");
79 return NO_INIT;
80 }
81
82 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
83 BQ_LOGE("requestBuffer: slot index %d out of range [0, %d)",
84 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
85 return BAD_VALUE;
86 } else if (!mSlots[slot].mBufferState.isDequeued()) {
87 BQ_LOGE("requestBuffer: slot %d is not owned by the producer "
88 "(state = %s)", slot, mSlots[slot].mBufferState.string());
89 return BAD_VALUE;
90 }
91
92 mSlots[slot].mRequestBufferCalled = true;
93 *buf = mSlots[slot].mGraphicBuffer;
94 return NO_ERROR;
95 }
96
setMaxDequeuedBufferCount(int maxDequeuedBuffers)97 status_t BufferQueueProducer::setMaxDequeuedBufferCount(
98 int maxDequeuedBuffers) {
99 ATRACE_CALL();
100 BQ_LOGV("setMaxDequeuedBufferCount: maxDequeuedBuffers = %d",
101 maxDequeuedBuffers);
102
103 sp<IConsumerListener> listener;
104 { // Autolock scope
105 std::unique_lock<std::mutex> lock(mCore->mMutex);
106 mCore->waitWhileAllocatingLocked(lock);
107
108 if (mCore->mIsAbandoned) {
109 BQ_LOGE("setMaxDequeuedBufferCount: BufferQueue has been "
110 "abandoned");
111 return NO_INIT;
112 }
113
114 if (maxDequeuedBuffers == mCore->mMaxDequeuedBufferCount) {
115 return NO_ERROR;
116 }
117
118 // The new maxDequeuedBuffer count should not be violated by the number
119 // of currently dequeued buffers
120 int dequeuedCount = 0;
121 for (int s : mCore->mActiveBuffers) {
122 if (mSlots[s].mBufferState.isDequeued()) {
123 dequeuedCount++;
124 }
125 }
126 if (dequeuedCount > maxDequeuedBuffers) {
127 BQ_LOGE("setMaxDequeuedBufferCount: the requested maxDequeuedBuffer"
128 "count (%d) exceeds the current dequeued buffer count (%d)",
129 maxDequeuedBuffers, dequeuedCount);
130 return BAD_VALUE;
131 }
132
133 int bufferCount = mCore->getMinUndequeuedBufferCountLocked();
134 bufferCount += maxDequeuedBuffers;
135
136 if (bufferCount > BufferQueueDefs::NUM_BUFFER_SLOTS) {
137 BQ_LOGE("setMaxDequeuedBufferCount: bufferCount %d too large "
138 "(max %d)", bufferCount, BufferQueueDefs::NUM_BUFFER_SLOTS);
139 return BAD_VALUE;
140 }
141
142 const int minBufferSlots = mCore->getMinMaxBufferCountLocked();
143 if (bufferCount < minBufferSlots) {
144 BQ_LOGE("setMaxDequeuedBufferCount: requested buffer count %d is "
145 "less than minimum %d", bufferCount, minBufferSlots);
146 return BAD_VALUE;
147 }
148
149 if (bufferCount > mCore->mMaxBufferCount) {
150 BQ_LOGE("setMaxDequeuedBufferCount: %d dequeued buffers would "
151 "exceed the maxBufferCount (%d) (maxAcquired %d async %d "
152 "mDequeuedBufferCannotBlock %d)", maxDequeuedBuffers,
153 mCore->mMaxBufferCount, mCore->mMaxAcquiredBufferCount,
154 mCore->mAsyncMode, mCore->mDequeueBufferCannotBlock);
155 return BAD_VALUE;
156 }
157
158 int delta = maxDequeuedBuffers - mCore->mMaxDequeuedBufferCount;
159 if (!mCore->adjustAvailableSlotsLocked(delta)) {
160 return BAD_VALUE;
161 }
162 mCore->mMaxDequeuedBufferCount = maxDequeuedBuffers;
163 VALIDATE_CONSISTENCY();
164 if (delta < 0) {
165 listener = mCore->mConsumerListener;
166 }
167 mCore->mDequeueCondition.notify_all();
168 } // Autolock scope
169
170 // Call back without lock held
171 if (listener != nullptr) {
172 listener->onBuffersReleased();
173 }
174
175 return NO_ERROR;
176 }
177
setAsyncMode(bool async)178 status_t BufferQueueProducer::setAsyncMode(bool async) {
179 ATRACE_CALL();
180 BQ_LOGV("setAsyncMode: async = %d", async);
181
182 sp<IConsumerListener> listener;
183 { // Autolock scope
184 std::unique_lock<std::mutex> lock(mCore->mMutex);
185 mCore->waitWhileAllocatingLocked(lock);
186
187 if (mCore->mIsAbandoned) {
188 BQ_LOGE("setAsyncMode: BufferQueue has been abandoned");
189 return NO_INIT;
190 }
191
192 if (async == mCore->mAsyncMode) {
193 return NO_ERROR;
194 }
195
196 if ((mCore->mMaxAcquiredBufferCount + mCore->mMaxDequeuedBufferCount +
197 (async || mCore->mDequeueBufferCannotBlock ? 1 : 0)) >
198 mCore->mMaxBufferCount) {
199 BQ_LOGE("setAsyncMode(%d): this call would cause the "
200 "maxBufferCount (%d) to be exceeded (maxAcquired %d "
201 "maxDequeued %d mDequeueBufferCannotBlock %d)", async,
202 mCore->mMaxBufferCount, mCore->mMaxAcquiredBufferCount,
203 mCore->mMaxDequeuedBufferCount,
204 mCore->mDequeueBufferCannotBlock);
205 return BAD_VALUE;
206 }
207
208 int delta = mCore->getMaxBufferCountLocked(async,
209 mCore->mDequeueBufferCannotBlock, mCore->mMaxBufferCount)
210 - mCore->getMaxBufferCountLocked();
211
212 if (!mCore->adjustAvailableSlotsLocked(delta)) {
213 BQ_LOGE("setAsyncMode: BufferQueue failed to adjust the number of "
214 "available slots. Delta = %d", delta);
215 return BAD_VALUE;
216 }
217 mCore->mAsyncMode = async;
218 VALIDATE_CONSISTENCY();
219 mCore->mDequeueCondition.notify_all();
220 if (delta < 0) {
221 listener = mCore->mConsumerListener;
222 }
223 } // Autolock scope
224
225 // Call back without lock held
226 if (listener != nullptr) {
227 listener->onBuffersReleased();
228 }
229 return NO_ERROR;
230 }
231
getFreeBufferLocked() const232 int BufferQueueProducer::getFreeBufferLocked() const {
233 if (mCore->mFreeBuffers.empty()) {
234 return BufferQueueCore::INVALID_BUFFER_SLOT;
235 }
236 int slot = mCore->mFreeBuffers.front();
237 mCore->mFreeBuffers.pop_front();
238 return slot;
239 }
240
getFreeSlotLocked() const241 int BufferQueueProducer::getFreeSlotLocked() const {
242 if (mCore->mFreeSlots.empty()) {
243 return BufferQueueCore::INVALID_BUFFER_SLOT;
244 }
245 int slot = *(mCore->mFreeSlots.begin());
246 mCore->mFreeSlots.erase(slot);
247 return slot;
248 }
249
waitForFreeSlotThenRelock(FreeSlotCaller caller,std::unique_lock<std::mutex> & lock,int * found) const250 status_t BufferQueueProducer::waitForFreeSlotThenRelock(FreeSlotCaller caller,
251 std::unique_lock<std::mutex>& lock, int* found) const {
252 auto callerString = (caller == FreeSlotCaller::Dequeue) ?
253 "dequeueBuffer" : "attachBuffer";
254 bool tryAgain = true;
255 while (tryAgain) {
256 if (mCore->mIsAbandoned) {
257 BQ_LOGE("%s: BufferQueue has been abandoned", callerString);
258 return NO_INIT;
259 }
260
261 int dequeuedCount = 0;
262 int acquiredCount = 0;
263 for (int s : mCore->mActiveBuffers) {
264 if (mSlots[s].mBufferState.isDequeued()) {
265 ++dequeuedCount;
266 }
267 if (mSlots[s].mBufferState.isAcquired()) {
268 ++acquiredCount;
269 }
270 }
271
272 // Producers are not allowed to dequeue more than
273 // mMaxDequeuedBufferCount buffers.
274 // This check is only done if a buffer has already been queued
275 if (mCore->mBufferHasBeenQueued &&
276 dequeuedCount >= mCore->mMaxDequeuedBufferCount) {
277 // Supress error logs when timeout is non-negative.
278 if (mDequeueTimeout < 0) {
279 BQ_LOGE("%s: attempting to exceed the max dequeued buffer "
280 "count (%d)", callerString,
281 mCore->mMaxDequeuedBufferCount);
282 }
283 return INVALID_OPERATION;
284 }
285
286 *found = BufferQueueCore::INVALID_BUFFER_SLOT;
287
288 // If we disconnect and reconnect quickly, we can be in a state where
289 // our slots are empty but we have many buffers in the queue. This can
290 // cause us to run out of memory if we outrun the consumer. Wait here if
291 // it looks like we have too many buffers queued up.
292 const int maxBufferCount = mCore->getMaxBufferCountLocked();
293 bool tooManyBuffers = mCore->mQueue.size()
294 > static_cast<size_t>(maxBufferCount);
295 if (tooManyBuffers) {
296 BQ_LOGV("%s: queue size is %zu, waiting", callerString,
297 mCore->mQueue.size());
298 } else {
299 // If in shared buffer mode and a shared buffer exists, always
300 // return it.
301 if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot !=
302 BufferQueueCore::INVALID_BUFFER_SLOT) {
303 *found = mCore->mSharedBufferSlot;
304 } else {
305 if (caller == FreeSlotCaller::Dequeue) {
306 // If we're calling this from dequeue, prefer free buffers
307 int slot = getFreeBufferLocked();
308 if (slot != BufferQueueCore::INVALID_BUFFER_SLOT) {
309 *found = slot;
310 } else if (mCore->mAllowAllocation) {
311 *found = getFreeSlotLocked();
312 }
313 } else {
314 // If we're calling this from attach, prefer free slots
315 int slot = getFreeSlotLocked();
316 if (slot != BufferQueueCore::INVALID_BUFFER_SLOT) {
317 *found = slot;
318 } else {
319 *found = getFreeBufferLocked();
320 }
321 }
322 }
323 }
324
325 // If no buffer is found, or if the queue has too many buffers
326 // outstanding, wait for a buffer to be acquired or released, or for the
327 // max buffer count to change.
328 tryAgain = (*found == BufferQueueCore::INVALID_BUFFER_SLOT) ||
329 tooManyBuffers;
330 if (tryAgain) {
331 // Return an error if we're in non-blocking mode (producer and
332 // consumer are controlled by the application).
333 // However, the consumer is allowed to briefly acquire an extra
334 // buffer (which could cause us to have to wait here), which is
335 // okay, since it is only used to implement an atomic acquire +
336 // release (e.g., in GLConsumer::updateTexImage())
337 if ((mCore->mDequeueBufferCannotBlock || mCore->mAsyncMode) &&
338 (acquiredCount <= mCore->mMaxAcquiredBufferCount)) {
339 return WOULD_BLOCK;
340 }
341 if (mDequeueTimeout >= 0) {
342 std::cv_status result = mCore->mDequeueCondition.wait_for(lock,
343 std::chrono::nanoseconds(mDequeueTimeout));
344 if (result == std::cv_status::timeout) {
345 return TIMED_OUT;
346 }
347 } else {
348 mCore->mDequeueCondition.wait(lock);
349 }
350 }
351 } // while (tryAgain)
352
353 return NO_ERROR;
354 }
355
dequeueBuffer(int * outSlot,sp<android::Fence> * outFence,uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,uint64_t * outBufferAge,FrameEventHistoryDelta * outTimestamps)356 status_t BufferQueueProducer::dequeueBuffer(int* outSlot, sp<android::Fence>* outFence,
357 uint32_t width, uint32_t height, PixelFormat format,
358 uint64_t usage, uint64_t* outBufferAge,
359 FrameEventHistoryDelta* outTimestamps) {
360 ATRACE_CALL();
361 { // Autolock scope
362 std::lock_guard<std::mutex> lock(mCore->mMutex);
363 mConsumerName = mCore->mConsumerName;
364
365 if (mCore->mIsAbandoned) {
366 BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
367 return NO_INIT;
368 }
369
370 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
371 BQ_LOGE("dequeueBuffer: BufferQueue has no connected producer");
372 return NO_INIT;
373 }
374 } // Autolock scope
375
376 BQ_LOGV("dequeueBuffer: w=%u h=%u format=%#x, usage=%#" PRIx64, width, height, format, usage);
377
378 if ((width && !height) || (!width && height)) {
379 BQ_LOGE("dequeueBuffer: invalid size: w=%u h=%u", width, height);
380 return BAD_VALUE;
381 }
382
383 status_t returnFlags = NO_ERROR;
384 EGLDisplay eglDisplay = EGL_NO_DISPLAY;
385 EGLSyncKHR eglFence = EGL_NO_SYNC_KHR;
386 bool attachedByConsumer = false;
387
388 { // Autolock scope
389 std::unique_lock<std::mutex> lock(mCore->mMutex);
390
391 // If we don't have a free buffer, but we are currently allocating, we wait until allocation
392 // is finished such that we don't allocate in parallel.
393 if (mCore->mFreeBuffers.empty() && mCore->mIsAllocating) {
394 mDequeueWaitingForAllocation = true;
395 mCore->waitWhileAllocatingLocked(lock);
396 mDequeueWaitingForAllocation = false;
397 mDequeueWaitingForAllocationCondition.notify_all();
398 }
399
400 if (format == 0) {
401 format = mCore->mDefaultBufferFormat;
402 }
403
404 // Enable the usage bits the consumer requested
405 usage |= mCore->mConsumerUsageBits;
406
407 const bool useDefaultSize = !width && !height;
408 if (useDefaultSize) {
409 width = mCore->mDefaultWidth;
410 height = mCore->mDefaultHeight;
411 }
412
413 int found = BufferItem::INVALID_BUFFER_SLOT;
414 while (found == BufferItem::INVALID_BUFFER_SLOT) {
415 status_t status = waitForFreeSlotThenRelock(FreeSlotCaller::Dequeue, lock, &found);
416 if (status != NO_ERROR) {
417 return status;
418 }
419
420 // This should not happen
421 if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
422 BQ_LOGE("dequeueBuffer: no available buffer slots");
423 return -EBUSY;
424 }
425
426 const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
427
428 // If we are not allowed to allocate new buffers,
429 // waitForFreeSlotThenRelock must have returned a slot containing a
430 // buffer. If this buffer would require reallocation to meet the
431 // requested attributes, we free it and attempt to get another one.
432 if (!mCore->mAllowAllocation) {
433 if (buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) {
434 if (mCore->mSharedBufferSlot == found) {
435 BQ_LOGE("dequeueBuffer: cannot re-allocate a sharedbuffer");
436 return BAD_VALUE;
437 }
438 mCore->mFreeSlots.insert(found);
439 mCore->clearBufferSlotLocked(found);
440 found = BufferItem::INVALID_BUFFER_SLOT;
441 continue;
442 }
443 }
444 }
445
446 const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
447 if (mCore->mSharedBufferSlot == found &&
448 buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) {
449 BQ_LOGE("dequeueBuffer: cannot re-allocate a shared"
450 "buffer");
451
452 return BAD_VALUE;
453 }
454
455 if (mCore->mSharedBufferSlot != found) {
456 mCore->mActiveBuffers.insert(found);
457 }
458 *outSlot = found;
459 ATRACE_BUFFER_INDEX(found);
460
461 attachedByConsumer = mSlots[found].mNeedsReallocation;
462 mSlots[found].mNeedsReallocation = false;
463
464 mSlots[found].mBufferState.dequeue();
465
466 if ((buffer == nullptr) ||
467 buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage))
468 {
469 mSlots[found].mAcquireCalled = false;
470 mSlots[found].mGraphicBuffer = nullptr;
471 mSlots[found].mRequestBufferCalled = false;
472 mSlots[found].mEglDisplay = EGL_NO_DISPLAY;
473 mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
474 mSlots[found].mFence = Fence::NO_FENCE;
475 mCore->mBufferAge = 0;
476 mCore->mIsAllocating = true;
477
478 returnFlags |= BUFFER_NEEDS_REALLOCATION;
479 } else {
480 // We add 1 because that will be the frame number when this buffer
481 // is queued
482 mCore->mBufferAge = mCore->mFrameCounter + 1 - mSlots[found].mFrameNumber;
483 }
484
485 BQ_LOGV("dequeueBuffer: setting buffer age to %" PRIu64,
486 mCore->mBufferAge);
487
488 if (CC_UNLIKELY(mSlots[found].mFence == nullptr)) {
489 BQ_LOGE("dequeueBuffer: about to return a NULL fence - "
490 "slot=%d w=%d h=%d format=%u",
491 found, buffer->width, buffer->height, buffer->format);
492 }
493
494 eglDisplay = mSlots[found].mEglDisplay;
495 eglFence = mSlots[found].mEglFence;
496 // Don't return a fence in shared buffer mode, except for the first
497 // frame.
498 *outFence = (mCore->mSharedBufferMode &&
499 mCore->mSharedBufferSlot == found) ?
500 Fence::NO_FENCE : mSlots[found].mFence;
501 mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
502 mSlots[found].mFence = Fence::NO_FENCE;
503
504 // If shared buffer mode has just been enabled, cache the slot of the
505 // first buffer that is dequeued and mark it as the shared buffer.
506 if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
507 BufferQueueCore::INVALID_BUFFER_SLOT) {
508 mCore->mSharedBufferSlot = found;
509 mSlots[found].mBufferState.mShared = true;
510 }
511 } // Autolock scope
512
513 if (returnFlags & BUFFER_NEEDS_REALLOCATION) {
514 BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot);
515 sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
516 width, height, format, BQ_LAYER_COUNT, usage,
517 {mConsumerName.string(), mConsumerName.size()});
518
519 status_t error = graphicBuffer->initCheck();
520
521 { // Autolock scope
522 std::lock_guard<std::mutex> lock(mCore->mMutex);
523
524 if (error == NO_ERROR && !mCore->mIsAbandoned) {
525 graphicBuffer->setGenerationNumber(mCore->mGenerationNumber);
526 mSlots[*outSlot].mGraphicBuffer = graphicBuffer;
527 }
528
529 mCore->mIsAllocating = false;
530 mCore->mIsAllocatingCondition.notify_all();
531
532 if (error != NO_ERROR) {
533 mCore->mFreeSlots.insert(*outSlot);
534 mCore->clearBufferSlotLocked(*outSlot);
535 BQ_LOGE("dequeueBuffer: createGraphicBuffer failed");
536 return error;
537 }
538
539 if (mCore->mIsAbandoned) {
540 mCore->mFreeSlots.insert(*outSlot);
541 mCore->clearBufferSlotLocked(*outSlot);
542 BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
543 return NO_INIT;
544 }
545
546 VALIDATE_CONSISTENCY();
547 } // Autolock scope
548 }
549
550 if (attachedByConsumer) {
551 returnFlags |= BUFFER_NEEDS_REALLOCATION;
552 }
553
554 if (eglFence != EGL_NO_SYNC_KHR) {
555 EGLint result = eglClientWaitSyncKHR(eglDisplay, eglFence, 0,
556 1000000000);
557 // If something goes wrong, log the error, but return the buffer without
558 // synchronizing access to it. It's too late at this point to abort the
559 // dequeue operation.
560 if (result == EGL_FALSE) {
561 BQ_LOGE("dequeueBuffer: error %#x waiting for fence",
562 eglGetError());
563 } else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
564 BQ_LOGE("dequeueBuffer: timeout waiting for fence");
565 }
566 eglDestroySyncKHR(eglDisplay, eglFence);
567 }
568
569 BQ_LOGV("dequeueBuffer: returning slot=%d/%" PRIu64 " buf=%p flags=%#x",
570 *outSlot,
571 mSlots[*outSlot].mFrameNumber,
572 mSlots[*outSlot].mGraphicBuffer->handle, returnFlags);
573
574 if (outBufferAge) {
575 *outBufferAge = mCore->mBufferAge;
576 }
577 addAndGetFrameTimestamps(nullptr, outTimestamps);
578
579 return returnFlags;
580 }
581
detachBuffer(int slot)582 status_t BufferQueueProducer::detachBuffer(int slot) {
583 ATRACE_CALL();
584 ATRACE_BUFFER_INDEX(slot);
585 BQ_LOGV("detachBuffer: slot %d", slot);
586
587 sp<IConsumerListener> listener;
588 {
589 std::lock_guard<std::mutex> lock(mCore->mMutex);
590
591 if (mCore->mIsAbandoned) {
592 BQ_LOGE("detachBuffer: BufferQueue has been abandoned");
593 return NO_INIT;
594 }
595
596 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
597 BQ_LOGE("detachBuffer: BufferQueue has no connected producer");
598 return NO_INIT;
599 }
600
601 if (mCore->mSharedBufferMode || mCore->mSharedBufferSlot == slot) {
602 BQ_LOGE("detachBuffer: cannot detach a buffer in shared buffer mode");
603 return BAD_VALUE;
604 }
605
606 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
607 BQ_LOGE("detachBuffer: slot index %d out of range [0, %d)",
608 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
609 return BAD_VALUE;
610 } else if (!mSlots[slot].mBufferState.isDequeued()) {
611 BQ_LOGE("detachBuffer: slot %d is not owned by the producer "
612 "(state = %s)", slot, mSlots[slot].mBufferState.string());
613 return BAD_VALUE;
614 } else if (!mSlots[slot].mRequestBufferCalled) {
615 BQ_LOGE("detachBuffer: buffer in slot %d has not been requested",
616 slot);
617 return BAD_VALUE;
618 }
619
620 mSlots[slot].mBufferState.detachProducer();
621 mCore->mActiveBuffers.erase(slot);
622 mCore->mFreeSlots.insert(slot);
623 mCore->clearBufferSlotLocked(slot);
624 mCore->mDequeueCondition.notify_all();
625 VALIDATE_CONSISTENCY();
626 listener = mCore->mConsumerListener;
627 }
628
629 if (listener != nullptr) {
630 listener->onBuffersReleased();
631 }
632
633 return NO_ERROR;
634 }
635
detachNextBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence)636 status_t BufferQueueProducer::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
637 sp<Fence>* outFence) {
638 ATRACE_CALL();
639
640 if (outBuffer == nullptr) {
641 BQ_LOGE("detachNextBuffer: outBuffer must not be NULL");
642 return BAD_VALUE;
643 } else if (outFence == nullptr) {
644 BQ_LOGE("detachNextBuffer: outFence must not be NULL");
645 return BAD_VALUE;
646 }
647
648 sp<IConsumerListener> listener;
649 {
650 std::unique_lock<std::mutex> lock(mCore->mMutex);
651
652 if (mCore->mIsAbandoned) {
653 BQ_LOGE("detachNextBuffer: BufferQueue has been abandoned");
654 return NO_INIT;
655 }
656
657 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
658 BQ_LOGE("detachNextBuffer: BufferQueue has no connected producer");
659 return NO_INIT;
660 }
661
662 if (mCore->mSharedBufferMode) {
663 BQ_LOGE("detachNextBuffer: cannot detach a buffer in shared buffer "
664 "mode");
665 return BAD_VALUE;
666 }
667
668 mCore->waitWhileAllocatingLocked(lock);
669
670 if (mCore->mFreeBuffers.empty()) {
671 return NO_MEMORY;
672 }
673
674 int found = mCore->mFreeBuffers.front();
675 mCore->mFreeBuffers.remove(found);
676 mCore->mFreeSlots.insert(found);
677
678 BQ_LOGV("detachNextBuffer detached slot %d", found);
679
680 *outBuffer = mSlots[found].mGraphicBuffer;
681 *outFence = mSlots[found].mFence;
682 mCore->clearBufferSlotLocked(found);
683 VALIDATE_CONSISTENCY();
684 listener = mCore->mConsumerListener;
685 }
686
687 if (listener != nullptr) {
688 listener->onBuffersReleased();
689 }
690
691 return NO_ERROR;
692 }
693
attachBuffer(int * outSlot,const sp<android::GraphicBuffer> & buffer)694 status_t BufferQueueProducer::attachBuffer(int* outSlot,
695 const sp<android::GraphicBuffer>& buffer) {
696 ATRACE_CALL();
697
698 if (outSlot == nullptr) {
699 BQ_LOGE("attachBuffer: outSlot must not be NULL");
700 return BAD_VALUE;
701 } else if (buffer == nullptr) {
702 BQ_LOGE("attachBuffer: cannot attach NULL buffer");
703 return BAD_VALUE;
704 }
705
706 std::unique_lock<std::mutex> lock(mCore->mMutex);
707
708 if (mCore->mIsAbandoned) {
709 BQ_LOGE("attachBuffer: BufferQueue has been abandoned");
710 return NO_INIT;
711 }
712
713 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
714 BQ_LOGE("attachBuffer: BufferQueue has no connected producer");
715 return NO_INIT;
716 }
717
718 if (mCore->mSharedBufferMode) {
719 BQ_LOGE("attachBuffer: cannot attach a buffer in shared buffer mode");
720 return BAD_VALUE;
721 }
722
723 if (buffer->getGenerationNumber() != mCore->mGenerationNumber) {
724 BQ_LOGE("attachBuffer: generation number mismatch [buffer %u] "
725 "[queue %u]", buffer->getGenerationNumber(),
726 mCore->mGenerationNumber);
727 return BAD_VALUE;
728 }
729
730 mCore->waitWhileAllocatingLocked(lock);
731
732 status_t returnFlags = NO_ERROR;
733 int found;
734 status_t status = waitForFreeSlotThenRelock(FreeSlotCaller::Attach, lock, &found);
735 if (status != NO_ERROR) {
736 return status;
737 }
738
739 // This should not happen
740 if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
741 BQ_LOGE("attachBuffer: no available buffer slots");
742 return -EBUSY;
743 }
744
745 *outSlot = found;
746 ATRACE_BUFFER_INDEX(*outSlot);
747 BQ_LOGV("attachBuffer: returning slot %d flags=%#x",
748 *outSlot, returnFlags);
749
750 mSlots[*outSlot].mGraphicBuffer = buffer;
751 mSlots[*outSlot].mBufferState.attachProducer();
752 mSlots[*outSlot].mEglFence = EGL_NO_SYNC_KHR;
753 mSlots[*outSlot].mFence = Fence::NO_FENCE;
754 mSlots[*outSlot].mRequestBufferCalled = true;
755 mSlots[*outSlot].mAcquireCalled = false;
756 mSlots[*outSlot].mNeedsReallocation = false;
757 mCore->mActiveBuffers.insert(found);
758 VALIDATE_CONSISTENCY();
759
760 return returnFlags;
761 }
762
queueBuffer(int slot,const QueueBufferInput & input,QueueBufferOutput * output)763 status_t BufferQueueProducer::queueBuffer(int slot,
764 const QueueBufferInput &input, QueueBufferOutput *output) {
765 ATRACE_CALL();
766 ATRACE_BUFFER_INDEX(slot);
767
768 int64_t requestedPresentTimestamp;
769 bool isAutoTimestamp;
770 android_dataspace dataSpace;
771 Rect crop(Rect::EMPTY_RECT);
772 int scalingMode;
773 uint32_t transform;
774 uint32_t stickyTransform;
775 sp<Fence> acquireFence;
776 bool getFrameTimestamps = false;
777 input.deflate(&requestedPresentTimestamp, &isAutoTimestamp, &dataSpace,
778 &crop, &scalingMode, &transform, &acquireFence, &stickyTransform,
779 &getFrameTimestamps);
780 const Region& surfaceDamage = input.getSurfaceDamage();
781 const HdrMetadata& hdrMetadata = input.getHdrMetadata();
782
783 if (acquireFence == nullptr) {
784 BQ_LOGE("queueBuffer: fence is NULL");
785 return BAD_VALUE;
786 }
787
788 auto acquireFenceTime = std::make_shared<FenceTime>(acquireFence);
789
790 switch (scalingMode) {
791 case NATIVE_WINDOW_SCALING_MODE_FREEZE:
792 case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
793 case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
794 case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
795 break;
796 default:
797 BQ_LOGE("queueBuffer: unknown scaling mode %d", scalingMode);
798 return BAD_VALUE;
799 }
800
801 sp<IConsumerListener> frameAvailableListener;
802 sp<IConsumerListener> frameReplacedListener;
803 int callbackTicket = 0;
804 uint64_t currentFrameNumber = 0;
805 BufferItem item;
806 { // Autolock scope
807 std::lock_guard<std::mutex> lock(mCore->mMutex);
808
809 if (mCore->mIsAbandoned) {
810 BQ_LOGE("queueBuffer: BufferQueue has been abandoned");
811 return NO_INIT;
812 }
813
814 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
815 BQ_LOGE("queueBuffer: BufferQueue has no connected producer");
816 return NO_INIT;
817 }
818
819 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
820 BQ_LOGE("queueBuffer: slot index %d out of range [0, %d)",
821 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
822 return BAD_VALUE;
823 } else if (!mSlots[slot].mBufferState.isDequeued()) {
824 BQ_LOGE("queueBuffer: slot %d is not owned by the producer "
825 "(state = %s)", slot, mSlots[slot].mBufferState.string());
826 return BAD_VALUE;
827 } else if (!mSlots[slot].mRequestBufferCalled) {
828 BQ_LOGE("queueBuffer: slot %d was queued without requesting "
829 "a buffer", slot);
830 return BAD_VALUE;
831 }
832
833 // If shared buffer mode has just been enabled, cache the slot of the
834 // first buffer that is queued and mark it as the shared buffer.
835 if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
836 BufferQueueCore::INVALID_BUFFER_SLOT) {
837 mCore->mSharedBufferSlot = slot;
838 mSlots[slot].mBufferState.mShared = true;
839 }
840
841 BQ_LOGV("queueBuffer: slot=%d/%" PRIu64 " time=%" PRIu64 " dataSpace=%d"
842 " validHdrMetadataTypes=0x%x crop=[%d,%d,%d,%d] transform=%#x scale=%s",
843 slot, mCore->mFrameCounter + 1, requestedPresentTimestamp, dataSpace,
844 hdrMetadata.validTypes, crop.left, crop.top, crop.right, crop.bottom,
845 transform,
846 BufferItem::scalingModeName(static_cast<uint32_t>(scalingMode)));
847
848 const sp<GraphicBuffer>& graphicBuffer(mSlots[slot].mGraphicBuffer);
849 Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
850 Rect croppedRect(Rect::EMPTY_RECT);
851 crop.intersect(bufferRect, &croppedRect);
852 if (croppedRect != crop) {
853 BQ_LOGE("queueBuffer: crop rect is not contained within the "
854 "buffer in slot %d", slot);
855 return BAD_VALUE;
856 }
857
858 // Override UNKNOWN dataspace with consumer default
859 if (dataSpace == HAL_DATASPACE_UNKNOWN) {
860 dataSpace = mCore->mDefaultBufferDataSpace;
861 }
862
863 mSlots[slot].mFence = acquireFence;
864 mSlots[slot].mBufferState.queue();
865
866 // Increment the frame counter and store a local version of it
867 // for use outside the lock on mCore->mMutex.
868 ++mCore->mFrameCounter;
869 currentFrameNumber = mCore->mFrameCounter;
870 mSlots[slot].mFrameNumber = currentFrameNumber;
871
872 item.mAcquireCalled = mSlots[slot].mAcquireCalled;
873 item.mGraphicBuffer = mSlots[slot].mGraphicBuffer;
874 item.mCrop = crop;
875 item.mTransform = transform &
876 ~static_cast<uint32_t>(NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY);
877 item.mTransformToDisplayInverse =
878 (transform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) != 0;
879 item.mScalingMode = static_cast<uint32_t>(scalingMode);
880 item.mTimestamp = requestedPresentTimestamp;
881 item.mIsAutoTimestamp = isAutoTimestamp;
882 item.mDataSpace = dataSpace;
883 item.mHdrMetadata = hdrMetadata;
884 item.mFrameNumber = currentFrameNumber;
885 item.mSlot = slot;
886 item.mFence = acquireFence;
887 item.mFenceTime = acquireFenceTime;
888 item.mIsDroppable = mCore->mAsyncMode ||
889 (mConsumerIsSurfaceFlinger && mCore->mQueueBufferCanDrop) ||
890 (mCore->mLegacyBufferDrop && mCore->mQueueBufferCanDrop) ||
891 (mCore->mSharedBufferMode && mCore->mSharedBufferSlot == slot);
892 item.mSurfaceDamage = surfaceDamage;
893 item.mQueuedBuffer = true;
894 item.mAutoRefresh = mCore->mSharedBufferMode && mCore->mAutoRefresh;
895 item.mApi = mCore->mConnectedApi;
896
897 mStickyTransform = stickyTransform;
898
899 // Cache the shared buffer data so that the BufferItem can be recreated.
900 if (mCore->mSharedBufferMode) {
901 mCore->mSharedBufferCache.crop = crop;
902 mCore->mSharedBufferCache.transform = transform;
903 mCore->mSharedBufferCache.scalingMode = static_cast<uint32_t>(
904 scalingMode);
905 mCore->mSharedBufferCache.dataspace = dataSpace;
906 }
907
908 output->bufferReplaced = false;
909 if (mCore->mQueue.empty()) {
910 // When the queue is empty, we can ignore mDequeueBufferCannotBlock
911 // and simply queue this buffer
912 mCore->mQueue.push_back(item);
913 frameAvailableListener = mCore->mConsumerListener;
914 } else {
915 // When the queue is not empty, we need to look at the last buffer
916 // in the queue to see if we need to replace it
917 const BufferItem& last = mCore->mQueue.itemAt(
918 mCore->mQueue.size() - 1);
919 if (last.mIsDroppable) {
920
921 if (!last.mIsStale) {
922 mSlots[last.mSlot].mBufferState.freeQueued();
923
924 // After leaving shared buffer mode, the shared buffer will
925 // still be around. Mark it as no longer shared if this
926 // operation causes it to be free.
927 if (!mCore->mSharedBufferMode &&
928 mSlots[last.mSlot].mBufferState.isFree()) {
929 mSlots[last.mSlot].mBufferState.mShared = false;
930 }
931 // Don't put the shared buffer on the free list.
932 if (!mSlots[last.mSlot].mBufferState.isShared()) {
933 mCore->mActiveBuffers.erase(last.mSlot);
934 mCore->mFreeBuffers.push_back(last.mSlot);
935 output->bufferReplaced = true;
936 }
937 }
938
939 // Make sure to merge the damage rect from the frame we're about
940 // to drop into the new frame's damage rect.
941 if (last.mSurfaceDamage.bounds() == Rect::INVALID_RECT ||
942 item.mSurfaceDamage.bounds() == Rect::INVALID_RECT) {
943 item.mSurfaceDamage = Region::INVALID_REGION;
944 } else {
945 item.mSurfaceDamage |= last.mSurfaceDamage;
946 }
947
948 // Overwrite the droppable buffer with the incoming one
949 mCore->mQueue.editItemAt(mCore->mQueue.size() - 1) = item;
950 frameReplacedListener = mCore->mConsumerListener;
951 } else {
952 mCore->mQueue.push_back(item);
953 frameAvailableListener = mCore->mConsumerListener;
954 }
955 }
956
957 mCore->mBufferHasBeenQueued = true;
958 mCore->mDequeueCondition.notify_all();
959 mCore->mLastQueuedSlot = slot;
960
961 output->width = mCore->mDefaultWidth;
962 output->height = mCore->mDefaultHeight;
963 output->transformHint = mCore->mTransformHint;
964 output->numPendingBuffers = static_cast<uint32_t>(mCore->mQueue.size());
965 output->nextFrameNumber = mCore->mFrameCounter + 1;
966
967 ATRACE_INT(mCore->mConsumerName.string(),
968 static_cast<int32_t>(mCore->mQueue.size()));
969 mCore->mOccupancyTracker.registerOccupancyChange(mCore->mQueue.size());
970
971 // Take a ticket for the callback functions
972 callbackTicket = mNextCallbackTicket++;
973
974 VALIDATE_CONSISTENCY();
975 } // Autolock scope
976
977 // It is okay not to clear the GraphicBuffer when the consumer is SurfaceFlinger because
978 // it is guaranteed that the BufferQueue is inside SurfaceFlinger's process and
979 // there will be no Binder call
980 if (!mConsumerIsSurfaceFlinger) {
981 item.mGraphicBuffer.clear();
982 }
983
984 // Update and get FrameEventHistory.
985 nsecs_t postedTime = systemTime(SYSTEM_TIME_MONOTONIC);
986 NewFrameEventsEntry newFrameEventsEntry = {
987 currentFrameNumber,
988 postedTime,
989 requestedPresentTimestamp,
990 std::move(acquireFenceTime)
991 };
992 addAndGetFrameTimestamps(&newFrameEventsEntry,
993 getFrameTimestamps ? &output->frameTimestamps : nullptr);
994
995 // Call back without the main BufferQueue lock held, but with the callback
996 // lock held so we can ensure that callbacks occur in order
997
998 int connectedApi;
999 sp<Fence> lastQueuedFence;
1000
1001 { // scope for the lock
1002 std::unique_lock<std::mutex> lock(mCallbackMutex);
1003 while (callbackTicket != mCurrentCallbackTicket) {
1004 mCallbackCondition.wait(lock);
1005 }
1006
1007 if (frameAvailableListener != nullptr) {
1008 frameAvailableListener->onFrameAvailable(item);
1009 } else if (frameReplacedListener != nullptr) {
1010 frameReplacedListener->onFrameReplaced(item);
1011 }
1012
1013 connectedApi = mCore->mConnectedApi;
1014 lastQueuedFence = std::move(mLastQueueBufferFence);
1015
1016 mLastQueueBufferFence = std::move(acquireFence);
1017 mLastQueuedCrop = item.mCrop;
1018 mLastQueuedTransform = item.mTransform;
1019
1020 ++mCurrentCallbackTicket;
1021 mCallbackCondition.notify_all();
1022 }
1023
1024 // Wait without lock held
1025 if (connectedApi == NATIVE_WINDOW_API_EGL) {
1026 // Waiting here allows for two full buffers to be queued but not a
1027 // third. In the event that frames take varying time, this makes a
1028 // small trade-off in favor of latency rather than throughput.
1029 lastQueuedFence->waitForever("Throttling EGL Production");
1030 }
1031
1032 return NO_ERROR;
1033 }
1034
cancelBuffer(int slot,const sp<Fence> & fence)1035 status_t BufferQueueProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
1036 ATRACE_CALL();
1037 BQ_LOGV("cancelBuffer: slot %d", slot);
1038 std::lock_guard<std::mutex> lock(mCore->mMutex);
1039
1040 if (mCore->mIsAbandoned) {
1041 BQ_LOGE("cancelBuffer: BufferQueue has been abandoned");
1042 return NO_INIT;
1043 }
1044
1045 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
1046 BQ_LOGE("cancelBuffer: BufferQueue has no connected producer");
1047 return NO_INIT;
1048 }
1049
1050 if (mCore->mSharedBufferMode) {
1051 BQ_LOGE("cancelBuffer: cannot cancel a buffer in shared buffer mode");
1052 return BAD_VALUE;
1053 }
1054
1055 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
1056 BQ_LOGE("cancelBuffer: slot index %d out of range [0, %d)",
1057 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
1058 return BAD_VALUE;
1059 } else if (!mSlots[slot].mBufferState.isDequeued()) {
1060 BQ_LOGE("cancelBuffer: slot %d is not owned by the producer "
1061 "(state = %s)", slot, mSlots[slot].mBufferState.string());
1062 return BAD_VALUE;
1063 } else if (fence == nullptr) {
1064 BQ_LOGE("cancelBuffer: fence is NULL");
1065 return BAD_VALUE;
1066 }
1067
1068 mSlots[slot].mBufferState.cancel();
1069
1070 // After leaving shared buffer mode, the shared buffer will still be around.
1071 // Mark it as no longer shared if this operation causes it to be free.
1072 if (!mCore->mSharedBufferMode && mSlots[slot].mBufferState.isFree()) {
1073 mSlots[slot].mBufferState.mShared = false;
1074 }
1075
1076 // Don't put the shared buffer on the free list.
1077 if (!mSlots[slot].mBufferState.isShared()) {
1078 mCore->mActiveBuffers.erase(slot);
1079 mCore->mFreeBuffers.push_back(slot);
1080 }
1081
1082 mSlots[slot].mFence = fence;
1083 mCore->mDequeueCondition.notify_all();
1084 VALIDATE_CONSISTENCY();
1085
1086 return NO_ERROR;
1087 }
1088
query(int what,int * outValue)1089 int BufferQueueProducer::query(int what, int *outValue) {
1090 ATRACE_CALL();
1091 std::lock_guard<std::mutex> lock(mCore->mMutex);
1092
1093 if (outValue == nullptr) {
1094 BQ_LOGE("query: outValue was NULL");
1095 return BAD_VALUE;
1096 }
1097
1098 if (mCore->mIsAbandoned) {
1099 BQ_LOGE("query: BufferQueue has been abandoned");
1100 return NO_INIT;
1101 }
1102
1103 int value;
1104 switch (what) {
1105 case NATIVE_WINDOW_WIDTH:
1106 value = static_cast<int32_t>(mCore->mDefaultWidth);
1107 break;
1108 case NATIVE_WINDOW_HEIGHT:
1109 value = static_cast<int32_t>(mCore->mDefaultHeight);
1110 break;
1111 case NATIVE_WINDOW_FORMAT:
1112 value = static_cast<int32_t>(mCore->mDefaultBufferFormat);
1113 break;
1114 case NATIVE_WINDOW_LAYER_COUNT:
1115 // All BufferQueue buffers have a single layer.
1116 value = BQ_LAYER_COUNT;
1117 break;
1118 case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
1119 value = mCore->getMinUndequeuedBufferCountLocked();
1120 break;
1121 case NATIVE_WINDOW_STICKY_TRANSFORM:
1122 value = static_cast<int32_t>(mStickyTransform);
1123 break;
1124 case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND:
1125 value = (mCore->mQueue.size() > 1);
1126 break;
1127 case NATIVE_WINDOW_CONSUMER_USAGE_BITS:
1128 // deprecated; higher 32 bits are truncated
1129 value = static_cast<int32_t>(mCore->mConsumerUsageBits);
1130 break;
1131 case NATIVE_WINDOW_DEFAULT_DATASPACE:
1132 value = static_cast<int32_t>(mCore->mDefaultBufferDataSpace);
1133 break;
1134 case NATIVE_WINDOW_BUFFER_AGE:
1135 if (mCore->mBufferAge > INT32_MAX) {
1136 value = 0;
1137 } else {
1138 value = static_cast<int32_t>(mCore->mBufferAge);
1139 }
1140 break;
1141 case NATIVE_WINDOW_CONSUMER_IS_PROTECTED:
1142 value = static_cast<int32_t>(mCore->mConsumerIsProtected);
1143 break;
1144 case NATIVE_WINDOW_MAX_BUFFER_COUNT:
1145 value = static_cast<int32_t>(mCore->mMaxBufferCount);
1146 break;
1147 default:
1148 return BAD_VALUE;
1149 }
1150
1151 BQ_LOGV("query: %d? %d", what, value);
1152 *outValue = value;
1153 return NO_ERROR;
1154 }
1155
connect(const sp<IProducerListener> & listener,int api,bool producerControlledByApp,QueueBufferOutput * output)1156 status_t BufferQueueProducer::connect(const sp<IProducerListener>& listener,
1157 int api, bool producerControlledByApp, QueueBufferOutput *output) {
1158 ATRACE_CALL();
1159 std::lock_guard<std::mutex> lock(mCore->mMutex);
1160 mConsumerName = mCore->mConsumerName;
1161 BQ_LOGV("connect: api=%d producerControlledByApp=%s", api,
1162 producerControlledByApp ? "true" : "false");
1163
1164 if (mCore->mIsAbandoned) {
1165 BQ_LOGE("connect: BufferQueue has been abandoned");
1166 return NO_INIT;
1167 }
1168
1169 if (mCore->mConsumerListener == nullptr) {
1170 BQ_LOGE("connect: BufferQueue has no consumer");
1171 return NO_INIT;
1172 }
1173
1174 if (output == nullptr) {
1175 BQ_LOGE("connect: output was NULL");
1176 return BAD_VALUE;
1177 }
1178
1179 if (mCore->mConnectedApi != BufferQueueCore::NO_CONNECTED_API) {
1180 BQ_LOGE("connect: already connected (cur=%d req=%d)",
1181 mCore->mConnectedApi, api);
1182 return BAD_VALUE;
1183 }
1184
1185 int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode,
1186 mDequeueTimeout < 0 ?
1187 mCore->mConsumerControlledByApp && producerControlledByApp : false,
1188 mCore->mMaxBufferCount) -
1189 mCore->getMaxBufferCountLocked();
1190 if (!mCore->adjustAvailableSlotsLocked(delta)) {
1191 BQ_LOGE("connect: BufferQueue failed to adjust the number of available "
1192 "slots. Delta = %d", delta);
1193 return BAD_VALUE;
1194 }
1195
1196 int status = NO_ERROR;
1197 switch (api) {
1198 case NATIVE_WINDOW_API_EGL:
1199 case NATIVE_WINDOW_API_CPU:
1200 case NATIVE_WINDOW_API_MEDIA:
1201 case NATIVE_WINDOW_API_CAMERA:
1202 mCore->mConnectedApi = api;
1203
1204 output->width = mCore->mDefaultWidth;
1205 output->height = mCore->mDefaultHeight;
1206 output->transformHint = mCore->mTransformHint;
1207 output->numPendingBuffers =
1208 static_cast<uint32_t>(mCore->mQueue.size());
1209 output->nextFrameNumber = mCore->mFrameCounter + 1;
1210 output->bufferReplaced = false;
1211
1212 if (listener != nullptr) {
1213 // Set up a death notification so that we can disconnect
1214 // automatically if the remote producer dies
1215 if (IInterface::asBinder(listener)->remoteBinder() != nullptr) {
1216 status = IInterface::asBinder(listener)->linkToDeath(
1217 static_cast<IBinder::DeathRecipient*>(this));
1218 if (status != NO_ERROR) {
1219 BQ_LOGE("connect: linkToDeath failed: %s (%d)",
1220 strerror(-status), status);
1221 }
1222 mCore->mLinkedToDeath = listener;
1223 }
1224 mCore->mConnectedProducerListener = listener;
1225 mCore->mBufferReleasedCbEnabled = listener->needsReleaseNotify();
1226 }
1227 break;
1228 default:
1229 BQ_LOGE("connect: unknown API %d", api);
1230 status = BAD_VALUE;
1231 break;
1232 }
1233 mCore->mConnectedPid = BufferQueueThreadState::getCallingPid();
1234 mCore->mBufferHasBeenQueued = false;
1235 mCore->mDequeueBufferCannotBlock = false;
1236 mCore->mQueueBufferCanDrop = false;
1237 mCore->mLegacyBufferDrop = true;
1238 if (mCore->mConsumerControlledByApp && producerControlledByApp) {
1239 mCore->mDequeueBufferCannotBlock = mDequeueTimeout < 0;
1240 mCore->mQueueBufferCanDrop = mDequeueTimeout <= 0;
1241 }
1242
1243 mCore->mAllowAllocation = true;
1244 VALIDATE_CONSISTENCY();
1245 return status;
1246 }
1247
disconnect(int api,DisconnectMode mode)1248 status_t BufferQueueProducer::disconnect(int api, DisconnectMode mode) {
1249 ATRACE_CALL();
1250 BQ_LOGV("disconnect: api %d", api);
1251
1252 int status = NO_ERROR;
1253 sp<IConsumerListener> listener;
1254 { // Autolock scope
1255 std::unique_lock<std::mutex> lock(mCore->mMutex);
1256
1257 if (mode == DisconnectMode::AllLocal) {
1258 if (BufferQueueThreadState::getCallingPid() != mCore->mConnectedPid) {
1259 return NO_ERROR;
1260 }
1261 api = BufferQueueCore::CURRENTLY_CONNECTED_API;
1262 }
1263
1264 mCore->waitWhileAllocatingLocked(lock);
1265
1266 if (mCore->mIsAbandoned) {
1267 // It's not really an error to disconnect after the surface has
1268 // been abandoned; it should just be a no-op.
1269 return NO_ERROR;
1270 }
1271
1272 if (api == BufferQueueCore::CURRENTLY_CONNECTED_API) {
1273 if (mCore->mConnectedApi == NATIVE_WINDOW_API_MEDIA) {
1274 ALOGD("About to force-disconnect API_MEDIA, mode=%d", mode);
1275 }
1276 api = mCore->mConnectedApi;
1277 // If we're asked to disconnect the currently connected api but
1278 // nobody is connected, it's not really an error.
1279 if (api == BufferQueueCore::NO_CONNECTED_API) {
1280 return NO_ERROR;
1281 }
1282 }
1283
1284 switch (api) {
1285 case NATIVE_WINDOW_API_EGL:
1286 case NATIVE_WINDOW_API_CPU:
1287 case NATIVE_WINDOW_API_MEDIA:
1288 case NATIVE_WINDOW_API_CAMERA:
1289 if (mCore->mConnectedApi == api) {
1290 mCore->freeAllBuffersLocked();
1291
1292 // Remove our death notification callback if we have one
1293 if (mCore->mLinkedToDeath != nullptr) {
1294 sp<IBinder> token =
1295 IInterface::asBinder(mCore->mLinkedToDeath);
1296 // This can fail if we're here because of the death
1297 // notification, but we just ignore it
1298 token->unlinkToDeath(
1299 static_cast<IBinder::DeathRecipient*>(this));
1300 }
1301 mCore->mSharedBufferSlot =
1302 BufferQueueCore::INVALID_BUFFER_SLOT;
1303 mCore->mLinkedToDeath = nullptr;
1304 mCore->mConnectedProducerListener = nullptr;
1305 mCore->mConnectedApi = BufferQueueCore::NO_CONNECTED_API;
1306 mCore->mConnectedPid = -1;
1307 mCore->mSidebandStream.clear();
1308 mCore->mDequeueCondition.notify_all();
1309 listener = mCore->mConsumerListener;
1310 } else if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
1311 BQ_LOGE("disconnect: not connected (req=%d)", api);
1312 status = NO_INIT;
1313 } else {
1314 BQ_LOGE("disconnect: still connected to another API "
1315 "(cur=%d req=%d)", mCore->mConnectedApi, api);
1316 status = BAD_VALUE;
1317 }
1318 break;
1319 default:
1320 BQ_LOGE("disconnect: unknown API %d", api);
1321 status = BAD_VALUE;
1322 break;
1323 }
1324 } // Autolock scope
1325
1326 // Call back without lock held
1327 if (listener != nullptr) {
1328 listener->onBuffersReleased();
1329 listener->onDisconnect();
1330 }
1331
1332 return status;
1333 }
1334
setSidebandStream(const sp<NativeHandle> & stream)1335 status_t BufferQueueProducer::setSidebandStream(const sp<NativeHandle>& stream) {
1336 sp<IConsumerListener> listener;
1337 { // Autolock scope
1338 std::lock_guard<std::mutex> _l(mCore->mMutex);
1339 mCore->mSidebandStream = stream;
1340 listener = mCore->mConsumerListener;
1341 } // Autolock scope
1342
1343 if (listener != nullptr) {
1344 listener->onSidebandStreamChanged();
1345 }
1346 return NO_ERROR;
1347 }
1348
allocateBuffers(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage)1349 void BufferQueueProducer::allocateBuffers(uint32_t width, uint32_t height,
1350 PixelFormat format, uint64_t usage) {
1351 ATRACE_CALL();
1352 while (true) {
1353 size_t newBufferCount = 0;
1354 uint32_t allocWidth = 0;
1355 uint32_t allocHeight = 0;
1356 PixelFormat allocFormat = PIXEL_FORMAT_UNKNOWN;
1357 uint64_t allocUsage = 0;
1358 std::string allocName;
1359 { // Autolock scope
1360 std::unique_lock<std::mutex> lock(mCore->mMutex);
1361 mCore->waitWhileAllocatingLocked(lock);
1362
1363 if (!mCore->mAllowAllocation) {
1364 BQ_LOGE("allocateBuffers: allocation is not allowed for this "
1365 "BufferQueue");
1366 return;
1367 }
1368
1369 // Only allocate one buffer at a time to reduce risks of overlapping an allocation from
1370 // both allocateBuffers and dequeueBuffer.
1371 newBufferCount = mCore->mFreeSlots.empty() ? 0 : 1;
1372 if (newBufferCount == 0) {
1373 return;
1374 }
1375
1376 allocWidth = width > 0 ? width : mCore->mDefaultWidth;
1377 allocHeight = height > 0 ? height : mCore->mDefaultHeight;
1378 allocFormat = format != 0 ? format : mCore->mDefaultBufferFormat;
1379 allocUsage = usage | mCore->mConsumerUsageBits;
1380 allocName.assign(mCore->mConsumerName.string(), mCore->mConsumerName.size());
1381
1382 mCore->mIsAllocating = true;
1383 } // Autolock scope
1384
1385 Vector<sp<GraphicBuffer>> buffers;
1386 for (size_t i = 0; i < newBufferCount; ++i) {
1387 sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
1388 allocWidth, allocHeight, allocFormat, BQ_LAYER_COUNT,
1389 allocUsage, allocName);
1390
1391 status_t result = graphicBuffer->initCheck();
1392
1393 if (result != NO_ERROR) {
1394 BQ_LOGE("allocateBuffers: failed to allocate buffer (%u x %u, format"
1395 " %u, usage %#" PRIx64 ")", width, height, format, usage);
1396 std::lock_guard<std::mutex> lock(mCore->mMutex);
1397 mCore->mIsAllocating = false;
1398 mCore->mIsAllocatingCondition.notify_all();
1399 return;
1400 }
1401 buffers.push_back(graphicBuffer);
1402 }
1403
1404 { // Autolock scope
1405 std::unique_lock<std::mutex> lock(mCore->mMutex);
1406 uint32_t checkWidth = width > 0 ? width : mCore->mDefaultWidth;
1407 uint32_t checkHeight = height > 0 ? height : mCore->mDefaultHeight;
1408 PixelFormat checkFormat = format != 0 ?
1409 format : mCore->mDefaultBufferFormat;
1410 uint64_t checkUsage = usage | mCore->mConsumerUsageBits;
1411 if (checkWidth != allocWidth || checkHeight != allocHeight ||
1412 checkFormat != allocFormat || checkUsage != allocUsage) {
1413 // Something changed while we released the lock. Retry.
1414 BQ_LOGV("allocateBuffers: size/format/usage changed while allocating. Retrying.");
1415 mCore->mIsAllocating = false;
1416 mCore->mIsAllocatingCondition.notify_all();
1417 continue;
1418 }
1419
1420 for (size_t i = 0; i < newBufferCount; ++i) {
1421 if (mCore->mFreeSlots.empty()) {
1422 BQ_LOGV("allocateBuffers: a slot was occupied while "
1423 "allocating. Dropping allocated buffer.");
1424 continue;
1425 }
1426 auto slot = mCore->mFreeSlots.begin();
1427 mCore->clearBufferSlotLocked(*slot); // Clean up the slot first
1428 mSlots[*slot].mGraphicBuffer = buffers[i];
1429 mSlots[*slot].mFence = Fence::NO_FENCE;
1430
1431 // freeBufferLocked puts this slot on the free slots list. Since
1432 // we then attached a buffer, move the slot to free buffer list.
1433 mCore->mFreeBuffers.push_front(*slot);
1434
1435 BQ_LOGV("allocateBuffers: allocated a new buffer in slot %d",
1436 *slot);
1437
1438 // Make sure the erase is done after all uses of the slot
1439 // iterator since it will be invalid after this point.
1440 mCore->mFreeSlots.erase(slot);
1441 }
1442
1443 mCore->mIsAllocating = false;
1444 mCore->mIsAllocatingCondition.notify_all();
1445 VALIDATE_CONSISTENCY();
1446
1447 // If dequeue is waiting for to allocate a buffer, release the lock until it's not
1448 // waiting anymore so it can use the buffer we just allocated.
1449 while (mDequeueWaitingForAllocation) {
1450 mDequeueWaitingForAllocationCondition.wait(lock);
1451 }
1452 } // Autolock scope
1453 }
1454 }
1455
allowAllocation(bool allow)1456 status_t BufferQueueProducer::allowAllocation(bool allow) {
1457 ATRACE_CALL();
1458 BQ_LOGV("allowAllocation: %s", allow ? "true" : "false");
1459
1460 std::lock_guard<std::mutex> lock(mCore->mMutex);
1461 mCore->mAllowAllocation = allow;
1462 return NO_ERROR;
1463 }
1464
setGenerationNumber(uint32_t generationNumber)1465 status_t BufferQueueProducer::setGenerationNumber(uint32_t generationNumber) {
1466 ATRACE_CALL();
1467 BQ_LOGV("setGenerationNumber: %u", generationNumber);
1468
1469 std::lock_guard<std::mutex> lock(mCore->mMutex);
1470 mCore->mGenerationNumber = generationNumber;
1471 return NO_ERROR;
1472 }
1473
getConsumerName() const1474 String8 BufferQueueProducer::getConsumerName() const {
1475 ATRACE_CALL();
1476 std::lock_guard<std::mutex> lock(mCore->mMutex);
1477 BQ_LOGV("getConsumerName: %s", mConsumerName.string());
1478 return mConsumerName;
1479 }
1480
setSharedBufferMode(bool sharedBufferMode)1481 status_t BufferQueueProducer::setSharedBufferMode(bool sharedBufferMode) {
1482 ATRACE_CALL();
1483 BQ_LOGV("setSharedBufferMode: %d", sharedBufferMode);
1484
1485 std::lock_guard<std::mutex> lock(mCore->mMutex);
1486 if (!sharedBufferMode) {
1487 mCore->mSharedBufferSlot = BufferQueueCore::INVALID_BUFFER_SLOT;
1488 }
1489 mCore->mSharedBufferMode = sharedBufferMode;
1490 return NO_ERROR;
1491 }
1492
setAutoRefresh(bool autoRefresh)1493 status_t BufferQueueProducer::setAutoRefresh(bool autoRefresh) {
1494 ATRACE_CALL();
1495 BQ_LOGV("setAutoRefresh: %d", autoRefresh);
1496
1497 std::lock_guard<std::mutex> lock(mCore->mMutex);
1498
1499 mCore->mAutoRefresh = autoRefresh;
1500 return NO_ERROR;
1501 }
1502
setDequeueTimeout(nsecs_t timeout)1503 status_t BufferQueueProducer::setDequeueTimeout(nsecs_t timeout) {
1504 ATRACE_CALL();
1505 BQ_LOGV("setDequeueTimeout: %" PRId64, timeout);
1506
1507 std::lock_guard<std::mutex> lock(mCore->mMutex);
1508 bool dequeueBufferCannotBlock =
1509 timeout >= 0 ? false : mCore->mDequeueBufferCannotBlock;
1510 int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode, dequeueBufferCannotBlock,
1511 mCore->mMaxBufferCount) - mCore->getMaxBufferCountLocked();
1512 if (!mCore->adjustAvailableSlotsLocked(delta)) {
1513 BQ_LOGE("setDequeueTimeout: BufferQueue failed to adjust the number of "
1514 "available slots. Delta = %d", delta);
1515 return BAD_VALUE;
1516 }
1517
1518 mDequeueTimeout = timeout;
1519 mCore->mDequeueBufferCannotBlock = dequeueBufferCannotBlock;
1520 if (timeout > 0) {
1521 mCore->mQueueBufferCanDrop = false;
1522 }
1523
1524 VALIDATE_CONSISTENCY();
1525 return NO_ERROR;
1526 }
1527
setLegacyBufferDrop(bool drop)1528 status_t BufferQueueProducer::setLegacyBufferDrop(bool drop) {
1529 ATRACE_CALL();
1530 BQ_LOGV("setLegacyBufferDrop: drop = %d", drop);
1531
1532 std::lock_guard<std::mutex> lock(mCore->mMutex);
1533 mCore->mLegacyBufferDrop = drop;
1534 return NO_ERROR;
1535 }
1536
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,float outTransformMatrix[16])1537 status_t BufferQueueProducer::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
1538 sp<Fence>* outFence, float outTransformMatrix[16]) {
1539 ATRACE_CALL();
1540 BQ_LOGV("getLastQueuedBuffer");
1541
1542 std::lock_guard<std::mutex> lock(mCore->mMutex);
1543 if (mCore->mLastQueuedSlot == BufferItem::INVALID_BUFFER_SLOT) {
1544 *outBuffer = nullptr;
1545 *outFence = Fence::NO_FENCE;
1546 return NO_ERROR;
1547 }
1548
1549 *outBuffer = mSlots[mCore->mLastQueuedSlot].mGraphicBuffer;
1550 *outFence = mLastQueueBufferFence;
1551
1552 // Currently only SurfaceFlinger internally ever changes
1553 // GLConsumer's filtering mode, so we just use 'true' here as
1554 // this is slightly specialized for the current client of this API,
1555 // which does want filtering.
1556 GLConsumer::computeTransformMatrix(outTransformMatrix,
1557 mSlots[mCore->mLastQueuedSlot].mGraphicBuffer, mLastQueuedCrop,
1558 mLastQueuedTransform, true /* filter */);
1559
1560 return NO_ERROR;
1561 }
1562
getFrameTimestamps(FrameEventHistoryDelta * outDelta)1563 void BufferQueueProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
1564 addAndGetFrameTimestamps(nullptr, outDelta);
1565 }
1566
addAndGetFrameTimestamps(const NewFrameEventsEntry * newTimestamps,FrameEventHistoryDelta * outDelta)1567 void BufferQueueProducer::addAndGetFrameTimestamps(
1568 const NewFrameEventsEntry* newTimestamps,
1569 FrameEventHistoryDelta* outDelta) {
1570 if (newTimestamps == nullptr && outDelta == nullptr) {
1571 return;
1572 }
1573
1574 ATRACE_CALL();
1575 BQ_LOGV("addAndGetFrameTimestamps");
1576 sp<IConsumerListener> listener;
1577 {
1578 std::lock_guard<std::mutex> lock(mCore->mMutex);
1579 listener = mCore->mConsumerListener;
1580 }
1581 if (listener != nullptr) {
1582 listener->addAndGetFrameTimestamps(newTimestamps, outDelta);
1583 }
1584 }
1585
binderDied(const wp<android::IBinder> &)1586 void BufferQueueProducer::binderDied(const wp<android::IBinder>& /* who */) {
1587 // If we're here, it means that a producer we were connected to died.
1588 // We're guaranteed that we are still connected to it because we remove
1589 // this callback upon disconnect. It's therefore safe to read mConnectedApi
1590 // without synchronization here.
1591 int api = mCore->mConnectedApi;
1592 disconnect(api);
1593 }
1594
getUniqueId(uint64_t * outId) const1595 status_t BufferQueueProducer::getUniqueId(uint64_t* outId) const {
1596 BQ_LOGV("getUniqueId");
1597
1598 *outId = mCore->mUniqueId;
1599 return NO_ERROR;
1600 }
1601
getConsumerUsage(uint64_t * outUsage) const1602 status_t BufferQueueProducer::getConsumerUsage(uint64_t* outUsage) const {
1603 BQ_LOGV("getConsumerUsage");
1604
1605 std::lock_guard<std::mutex> lock(mCore->mMutex);
1606 *outUsage = mCore->mConsumerUsageBits;
1607 return NO_ERROR;
1608 }
1609
1610 } // namespace android
1611