1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ReliableSurface.h"
18
19 #include <private/android/AHardwareBufferHelpers.h>
20
21 namespace android::uirenderer::renderthread {
22
23 // TODO: Re-enable after addressing more of the TODO's
24 // With this disabled we won't have a good up-front signal that the surface is no longer valid,
25 // however we can at least handle that reactively post-draw. There's just not a good mechanism
26 // to propagate this error back to the caller
27 constexpr bool DISABLE_BUFFER_PREFETCH = true;
28
29 // TODO: Make surface less protected
30 // This exists because perform is a varargs, and ANativeWindow has no va_list perform.
31 // So wrapping/chaining that is hard. Telling the compiler to ignore protected is easy, so we do
32 // that instead
33 struct SurfaceExposer : Surface {
34 // Make warnings happy
35 SurfaceExposer() = delete;
36
37 using Surface::cancelBuffer;
38 using Surface::dequeueBuffer;
39 using Surface::lockBuffer_DEPRECATED;
40 using Surface::perform;
41 using Surface::queueBuffer;
42 using Surface::setBufferCount;
43 using Surface::setSwapInterval;
44 };
45
46 #define callProtected(surface, func, ...) ((*surface).*&SurfaceExposer::func)(__VA_ARGS__)
47
ReliableSurface(sp<Surface> && surface)48 ReliableSurface::ReliableSurface(sp<Surface>&& surface) : mSurface(std::move(surface)) {
49 LOG_ALWAYS_FATAL_IF(!mSurface, "Error, unable to wrap a nullptr");
50
51 ANativeWindow::setSwapInterval = hook_setSwapInterval;
52 ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
53 ANativeWindow::cancelBuffer = hook_cancelBuffer;
54 ANativeWindow::queueBuffer = hook_queueBuffer;
55 ANativeWindow::query = hook_query;
56 ANativeWindow::perform = hook_perform;
57
58 ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED;
59 ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED;
60 ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED;
61 ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED;
62 }
63
~ReliableSurface()64 ReliableSurface::~ReliableSurface() {
65 clearReservedBuffer();
66 }
67
perform(int operation,va_list args)68 void ReliableSurface::perform(int operation, va_list args) {
69 std::lock_guard _lock{mMutex};
70
71 switch (operation) {
72 case NATIVE_WINDOW_SET_USAGE:
73 mUsage = va_arg(args, uint32_t);
74 break;
75 case NATIVE_WINDOW_SET_USAGE64:
76 mUsage = va_arg(args, uint64_t);
77 break;
78 case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
79 /* width */ va_arg(args, uint32_t);
80 /* height */ va_arg(args, uint32_t);
81 mFormat = va_arg(args, PixelFormat);
82 break;
83 case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
84 mFormat = va_arg(args, PixelFormat);
85 break;
86 }
87 }
88
reserveNext()89 int ReliableSurface::reserveNext() {
90 {
91 std::lock_guard _lock{mMutex};
92 if (mReservedBuffer) {
93 ALOGW("reserveNext called but there was already a buffer reserved?");
94 return OK;
95 }
96 if (mInErrorState) {
97 return UNKNOWN_ERROR;
98 }
99 if (mHasDequeuedBuffer) {
100 return OK;
101 }
102 if constexpr (DISABLE_BUFFER_PREFETCH) {
103 return OK;
104 }
105 }
106
107 // TODO: Update this to better handle when requested dimensions have changed
108 // Currently the driver does this via query + perform but that's after we've already
109 // reserved a buffer. Should we do that logic instead? Or should we drop
110 // the backing Surface to the ground and go full manual on the IGraphicBufferProducer instead?
111
112 int fenceFd = -1;
113 ANativeWindowBuffer* buffer = nullptr;
114 int result = callProtected(mSurface, dequeueBuffer, &buffer, &fenceFd);
115
116 {
117 std::lock_guard _lock{mMutex};
118 LOG_ALWAYS_FATAL_IF(mReservedBuffer, "race condition in reserveNext");
119 mReservedBuffer = buffer;
120 mReservedFenceFd.reset(fenceFd);
121 }
122
123 return result;
124 }
125
clearReservedBuffer()126 void ReliableSurface::clearReservedBuffer() {
127 ANativeWindowBuffer* buffer = nullptr;
128 int releaseFd = -1;
129 {
130 std::lock_guard _lock{mMutex};
131 if (mReservedBuffer) {
132 ALOGW("Reserved buffer %p was never used", mReservedBuffer);
133 buffer = mReservedBuffer;
134 releaseFd = mReservedFenceFd.release();
135 }
136 mReservedBuffer = nullptr;
137 mReservedFenceFd.reset();
138 mHasDequeuedBuffer = false;
139 }
140 if (buffer) {
141 callProtected(mSurface, cancelBuffer, buffer, releaseFd);
142 }
143 }
144
cancelBuffer(ANativeWindowBuffer * buffer,int fenceFd)145 int ReliableSurface::cancelBuffer(ANativeWindowBuffer* buffer, int fenceFd) {
146 clearReservedBuffer();
147 if (isFallbackBuffer(buffer)) {
148 if (fenceFd > 0) {
149 close(fenceFd);
150 }
151 return OK;
152 }
153 int result = callProtected(mSurface, cancelBuffer, buffer, fenceFd);
154 return result;
155 }
156
dequeueBuffer(ANativeWindowBuffer ** buffer,int * fenceFd)157 int ReliableSurface::dequeueBuffer(ANativeWindowBuffer** buffer, int* fenceFd) {
158 {
159 std::lock_guard _lock{mMutex};
160 if (mReservedBuffer) {
161 *buffer = mReservedBuffer;
162 *fenceFd = mReservedFenceFd.release();
163 mReservedBuffer = nullptr;
164 return OK;
165 }
166 }
167
168 int result = callProtected(mSurface, dequeueBuffer, buffer, fenceFd);
169 if (result != OK) {
170 ALOGW("dequeueBuffer failed, error = %d; switching to fallback", result);
171 *buffer = acquireFallbackBuffer();
172 *fenceFd = -1;
173 return *buffer ? OK : INVALID_OPERATION;
174 } else {
175 std::lock_guard _lock{mMutex};
176 mHasDequeuedBuffer = true;
177 }
178 return OK;
179 }
180
queueBuffer(ANativeWindowBuffer * buffer,int fenceFd)181 int ReliableSurface::queueBuffer(ANativeWindowBuffer* buffer, int fenceFd) {
182 clearReservedBuffer();
183
184 if (isFallbackBuffer(buffer)) {
185 if (fenceFd > 0) {
186 close(fenceFd);
187 }
188 return OK;
189 }
190
191 int result = callProtected(mSurface, queueBuffer, buffer, fenceFd);
192 return result;
193 }
194
isFallbackBuffer(const ANativeWindowBuffer * windowBuffer) const195 bool ReliableSurface::isFallbackBuffer(const ANativeWindowBuffer* windowBuffer) const {
196 if (!mScratchBuffer || !windowBuffer) {
197 return false;
198 }
199 ANativeWindowBuffer* scratchBuffer =
200 AHardwareBuffer_to_ANativeWindowBuffer(mScratchBuffer.get());
201 return windowBuffer == scratchBuffer;
202 }
203
acquireFallbackBuffer()204 ANativeWindowBuffer* ReliableSurface::acquireFallbackBuffer() {
205 std::lock_guard _lock{mMutex};
206 mInErrorState = true;
207
208 if (mScratchBuffer) {
209 return AHardwareBuffer_to_ANativeWindowBuffer(mScratchBuffer.get());
210 }
211
212 AHardwareBuffer_Desc desc;
213 desc.usage = mUsage;
214 desc.format = mFormat;
215 desc.width = 1;
216 desc.height = 1;
217 desc.layers = 1;
218 desc.rfu0 = 0;
219 desc.rfu1 = 0;
220 AHardwareBuffer* newBuffer = nullptr;
221 int err = AHardwareBuffer_allocate(&desc, &newBuffer);
222 if (err) {
223 // Allocate failed, that sucks
224 ALOGW("Failed to allocate scratch buffer, error=%d", err);
225 return nullptr;
226 }
227 mScratchBuffer.reset(newBuffer);
228 return AHardwareBuffer_to_ANativeWindowBuffer(newBuffer);
229 }
230
getWrapped(const ANativeWindow * window)231 Surface* ReliableSurface::getWrapped(const ANativeWindow* window) {
232 return getSelf(window)->mSurface.get();
233 }
234
hook_setSwapInterval(ANativeWindow * window,int interval)235 int ReliableSurface::hook_setSwapInterval(ANativeWindow* window, int interval) {
236 return callProtected(getWrapped(window), setSwapInterval, interval);
237 }
238
hook_dequeueBuffer(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)239 int ReliableSurface::hook_dequeueBuffer(ANativeWindow* window, ANativeWindowBuffer** buffer,
240 int* fenceFd) {
241 return getSelf(window)->dequeueBuffer(buffer, fenceFd);
242 }
243
hook_cancelBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)244 int ReliableSurface::hook_cancelBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
245 int fenceFd) {
246 return getSelf(window)->cancelBuffer(buffer, fenceFd);
247 }
248
hook_queueBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)249 int ReliableSurface::hook_queueBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
250 int fenceFd) {
251 return getSelf(window)->queueBuffer(buffer, fenceFd);
252 }
253
hook_dequeueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer ** buffer)254 int ReliableSurface::hook_dequeueBuffer_DEPRECATED(ANativeWindow* window,
255 ANativeWindowBuffer** buffer) {
256 ANativeWindowBuffer* buf;
257 int fenceFd = -1;
258 int result = window->dequeueBuffer(window, &buf, &fenceFd);
259 if (result != OK) {
260 return result;
261 }
262 sp<Fence> fence(new Fence(fenceFd));
263 int waitResult = fence->waitForever("dequeueBuffer_DEPRECATED");
264 if (waitResult != OK) {
265 ALOGE("dequeueBuffer_DEPRECATED: Fence::wait returned an error: %d", waitResult);
266 window->cancelBuffer(window, buf, -1);
267 return waitResult;
268 }
269 *buffer = buf;
270 return result;
271 }
272
hook_cancelBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)273 int ReliableSurface::hook_cancelBuffer_DEPRECATED(ANativeWindow* window,
274 ANativeWindowBuffer* buffer) {
275 return window->cancelBuffer(window, buffer, -1);
276 }
277
hook_lockBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)278 int ReliableSurface::hook_lockBuffer_DEPRECATED(ANativeWindow* window,
279 ANativeWindowBuffer* buffer) {
280 // This method is a no-op in Surface as well
281 return OK;
282 }
283
hook_queueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)284 int ReliableSurface::hook_queueBuffer_DEPRECATED(ANativeWindow* window,
285 ANativeWindowBuffer* buffer) {
286 return window->queueBuffer(window, buffer, -1);
287 }
288
hook_query(const ANativeWindow * window,int what,int * value)289 int ReliableSurface::hook_query(const ANativeWindow* window, int what, int* value) {
290 return getWrapped(window)->query(what, value);
291 }
292
hook_perform(ANativeWindow * window,int operation,...)293 int ReliableSurface::hook_perform(ANativeWindow* window, int operation, ...) {
294 // Drop the reserved buffer if there is one since this (probably) mutated buffer dimensions
295 // TODO: Filter to things that only affect the reserved buffer
296 // TODO: Can we mutate the reserved buffer in some cases?
297 getSelf(window)->clearReservedBuffer();
298 va_list args;
299 va_start(args, operation);
300 int result = callProtected(getWrapped(window), perform, operation, args);
301 va_end(args);
302
303 va_start(args, operation);
304 getSelf(window)->perform(operation, args);
305 va_end(args);
306
307 return result;
308 }
309
310 }; // namespace android::uirenderer::renderthread