1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "HostConnection.h"
17
18 #include "cutils/properties.h"
19
20 #ifdef GOLDFISH_NO_GL
21 struct gl_client_context_t {
22 int placeholder;
23 };
24 class GLEncoder : public gl_client_context_t {
25 public:
GLEncoder(IOStream *,ChecksumCalculator *)26 GLEncoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl_client_context_t * ())27 void setContextAccessor(gl_client_context_t *()) { }
28 };
29 struct gl2_client_context_t {
30 int placeholder;
31 };
32 class GL2Encoder : public gl2_client_context_t {
33 public:
GL2Encoder(IOStream *,ChecksumCalculator *)34 GL2Encoder(IOStream*, ChecksumCalculator*) { }
setContextAccessor(gl2_client_context_t * ())35 void setContextAccessor(gl2_client_context_t *()) { }
setNoHostError(bool)36 void setNoHostError(bool) { }
setDrawCallFlushInterval(uint32_t)37 void setDrawCallFlushInterval(uint32_t) { }
setHasAsyncUnmapBuffer(int)38 void setHasAsyncUnmapBuffer(int) { }
39 };
40 #else
41 #include "GLEncoder.h"
42 #include "GL2Encoder.h"
43 #endif
44
45 #ifdef GOLDFISH_VULKAN
46 #include "VkEncoder.h"
47 #include "AddressSpaceStream.h"
48 #else
49 namespace goldfish_vk {
50 struct VkEncoder {
VkEncodergoldfish_vk::VkEncoder51 VkEncoder(IOStream*) { }
52 int placeholder;
53 };
54 } // namespace goldfish_vk
55 class QemuPipeStream;
56 typedef QemuPipeStream AddressSpaceStream;
createAddressSpaceStream(size_t bufSize)57 AddressSpaceStream* createAddressSpaceStream(size_t bufSize) {
58 ALOGE("%s: FATAL: Trying to create ASG stream in unsupported build\n", __func__);
59 abort();
60 }
createVirtioGpuAddressSpaceStream(size_t bufSize)61 AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t bufSize) {
62 ALOGE("%s: FATAL: Trying to create virtgpu ASG stream in unsupported build\n", __func__);
63 abort();
64 }
65 #endif
66
67 using goldfish_vk::VkEncoder;
68
69 #include "ProcessPipe.h"
70 #include "QemuPipeStream.h"
71 #include "TcpStream.h"
72 #include "ThreadInfo.h"
73 #include <gralloc_cb_bp.h>
74 #include <unistd.h>
75
76 #ifdef VIRTIO_GPU
77
78 #include "VirtioGpuStream.h"
79 #include "VirtioGpuPipeStream.h"
80
81 #include <cros_gralloc_handle.h>
82 #include <drm/virtgpu_drm.h>
83 #include <xf86drm.h>
84
85 #endif
86
87 #undef LOG_TAG
88 #define LOG_TAG "HostConnection"
89 #if PLATFORM_SDK_VERSION < 26
90 #include <cutils/log.h>
91 #else
92 #include <log/log.h>
93 #endif
94
95 #define STREAM_BUFFER_SIZE (4*1024*1024)
96 #define STREAM_PORT_NUM 22468
97
getConnectionTypeFromProperty()98 static HostConnectionType getConnectionTypeFromProperty() {
99 #ifdef __Fuchsia__
100 return HOST_CONNECTION_ADDRESS_SPACE;
101 #else
102 char transportValue[PROPERTY_VALUE_MAX] = "";
103 property_get("ro.kernel.qemu.gltransport", transportValue, "");
104
105 bool isValid = transportValue[0] != '\0';
106
107 if (!isValid) {
108 property_get("ro.boot.hardware.gltransport", transportValue, "");
109 isValid = transportValue[0] != '\0';
110 }
111
112 if (!isValid) return HOST_CONNECTION_QEMU_PIPE;
113
114 if (!strcmp("tcp", transportValue)) return HOST_CONNECTION_TCP;
115 if (!strcmp("pipe", transportValue)) return HOST_CONNECTION_QEMU_PIPE;
116 if (!strcmp("virtio-gpu", transportValue)) return HOST_CONNECTION_VIRTIO_GPU;
117 if (!strcmp("asg", transportValue)) return HOST_CONNECTION_ADDRESS_SPACE;
118 if (!strcmp("virtio-gpu-pipe", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_PIPE;
119 if (!strcmp("virtio-gpu-asg", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
120
121 return HOST_CONNECTION_QEMU_PIPE;
122 #endif
123 }
124
getDrawCallFlushIntervalFromProperty()125 static uint32_t getDrawCallFlushIntervalFromProperty() {
126 char flushValue[PROPERTY_VALUE_MAX] = "";
127 property_get("ro.kernel.qemu.gltransport.drawFlushInterval", flushValue, "");
128
129 bool isValid = flushValue[0] != '\0';
130 if (!isValid) return 800;
131
132 long interval = strtol(flushValue, 0, 10);
133
134 if (!interval) return 800;
135
136 return (uint32_t)interval;
137 }
138
getGrallocTypeFromProperty()139 static GrallocType getGrallocTypeFromProperty() {
140 char prop[PROPERTY_VALUE_MAX] = "";
141 property_get("ro.hardware.gralloc", prop, "");
142
143 bool isValid = prop[0] != '\0';
144
145 if (!isValid) return GRALLOC_TYPE_RANCHU;
146
147 if (!strcmp("ranchu", prop)) return GRALLOC_TYPE_RANCHU;
148 if (!strcmp("minigbm", prop)) return GRALLOC_TYPE_MINIGBM;
149 return GRALLOC_TYPE_RANCHU;
150 }
151
152 class GoldfishGralloc : public Gralloc
153 {
154 public:
createColorBuffer(ExtendedRCEncoderContext * rcEnc,int width,int height,uint32_t glformat)155 virtual uint32_t createColorBuffer(
156 ExtendedRCEncoderContext* rcEnc,
157 int width, int height, uint32_t glformat) {
158 return rcEnc->rcCreateColorBuffer(
159 rcEnc, width, height, glformat);
160 }
161
getHostHandle(native_handle_t const * handle)162 virtual uint32_t getHostHandle(native_handle_t const* handle)
163 {
164 return cb_handle_t::from(handle)->hostHandle;
165 }
166
getFormat(native_handle_t const * handle)167 virtual int getFormat(native_handle_t const* handle)
168 {
169 return cb_handle_t::from(handle)->format;
170 }
171
getAllocatedSize(native_handle_t const * handle)172 virtual size_t getAllocatedSize(native_handle_t const* handle)
173 {
174 return static_cast<size_t>(cb_handle_t::from(handle)->allocatedSize());
175 }
176 };
177
align_up(uint32_t n,uint32_t a)178 static inline uint32_t align_up(uint32_t n, uint32_t a) {
179 return ((n + a - 1) / a) * a;
180 }
181
182 #ifdef VIRTIO_GPU
183
184 class MinigbmGralloc : public Gralloc {
185 public:
createColorBuffer(ExtendedRCEncoderContext *,int width,int height,uint32_t glformat)186 virtual uint32_t createColorBuffer(
187 ExtendedRCEncoderContext*,
188 int width, int height, uint32_t glformat) {
189
190 // Only supported format for pbuffers in gfxstream
191 // should be RGBA8
192 const uint32_t kGlRGB = 0x1907;
193 const uint32_t kGlRGBA = 0x1908;
194 const uint32_t kVirglFormatRGBA = 67; // VIRGL_FORMAT_R8G8B8A8_UNORM;
195 uint32_t virtgpu_format = 0;
196 uint32_t bpp = 0;
197 switch (glformat) {
198 case kGlRGB:
199 ALOGD("Note: egl wanted GL_RGB, still using RGBA");
200 virtgpu_format = kVirglFormatRGBA;
201 bpp = 4;
202 break;
203 case kGlRGBA:
204 virtgpu_format = kVirglFormatRGBA;
205 bpp = 4;
206 break;
207 default:
208 ALOGD("Note: egl wanted 0x%x, still using RGBA", glformat);
209 virtgpu_format = kVirglFormatRGBA;
210 bpp = 4;
211 break;
212 }
213 const uint32_t kPipeTexture2D = 2; // PIPE_TEXTURE_2D
214 const uint32_t kBindRenderTarget = 1 << 1; // VIRGL_BIND_RENDER_TARGET
215 struct drm_virtgpu_resource_create res_create;
216 memset(&res_create, 0, sizeof(res_create));
217 res_create.target = kPipeTexture2D;
218 res_create.format = virtgpu_format;
219 res_create.bind = kBindRenderTarget;
220 res_create.width = width;
221 res_create.height = height;
222 res_create.depth = 1;
223 res_create.array_size = 1;
224 res_create.last_level = 0;
225 res_create.nr_samples = 0;
226 res_create.stride = bpp * width;
227 res_create.size = align_up(bpp * width * height, PAGE_SIZE);
228
229 int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
230 if (ret) {
231 ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s (%d)\n", __func__,
232 strerror(errno), errno);
233 abort();
234 }
235
236 return res_create.res_handle;
237 }
238
getHostHandle(native_handle_t const * handle)239 virtual uint32_t getHostHandle(native_handle_t const* handle) {
240 struct drm_virtgpu_resource_info info;
241 if (!getResInfo(handle, &info)) {
242 ALOGE("%s: failed to get resource info\n", __func__);
243 return 0;
244 }
245
246 return info.res_handle;
247 }
248
getFormat(native_handle_t const * handle)249 virtual int getFormat(native_handle_t const* handle) {
250 return ((cros_gralloc_handle *)handle)->droid_format;
251 }
252
getAllocatedSize(native_handle_t const * handle)253 virtual size_t getAllocatedSize(native_handle_t const* handle) {
254 struct drm_virtgpu_resource_info info;
255 if (!getResInfo(handle, &info)) {
256 ALOGE("%s: failed to get resource info\n", __func__);
257 return 0;
258 }
259
260 return info.size;
261 }
262
setFd(int fd)263 void setFd(int fd) { m_fd = fd; }
264
265 private:
266
getResInfo(native_handle_t const * handle,struct drm_virtgpu_resource_info * info)267 bool getResInfo(native_handle_t const* handle,
268 struct drm_virtgpu_resource_info* info) {
269 memset(info, 0x0, sizeof(*info));
270 if (m_fd < 0) {
271 ALOGE("%s: Error, rendernode fd missing\n", __func__);
272 return false;
273 }
274
275 struct drm_gem_close gem_close;
276 memset(&gem_close, 0x0, sizeof(gem_close));
277
278 cros_gralloc_handle const* cros_handle =
279 reinterpret_cast<cros_gralloc_handle const*>(handle);
280
281 uint32_t prime_handle;
282 int ret = drmPrimeFDToHandle(m_fd, cros_handle->fds[0], &prime_handle);
283 if (ret) {
284 ALOGE("%s: DRM_IOCTL_PRIME_FD_TO_HANDLE failed: %s (errno %d)\n",
285 __func__, strerror(errno), errno);
286 return false;
287 }
288
289 info->bo_handle = prime_handle;
290 gem_close.handle = prime_handle;
291
292 ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, info);
293 if (ret) {
294 ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed: %s (errno %d)\n",
295 __func__, strerror(errno), errno);
296 drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
297 return false;
298 }
299
300 drmIoctl(m_fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
301 return true;
302 }
303
304 int m_fd = -1;
305 };
306
307 #else
308
309 class MinigbmGralloc : public Gralloc {
310 public:
createColorBuffer(ExtendedRCEncoderContext *,int width,int height,uint32_t glformat)311 virtual uint32_t createColorBuffer(
312 ExtendedRCEncoderContext*,
313 int width, int height, uint32_t glformat) {
314 ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
315 return 0;
316 }
317
getHostHandle(native_handle_t const * handle)318 virtual uint32_t getHostHandle(native_handle_t const* handle) {
319 ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
320 return 0;
321 }
322
getFormat(native_handle_t const * handle)323 virtual int getFormat(native_handle_t const* handle) {
324 ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
325 return 0;
326 }
327
getAllocatedSize(native_handle_t const * handle)328 virtual size_t getAllocatedSize(native_handle_t const* handle) {
329 ALOGE("%s: Error: using minigbm without -DVIRTIO_GPU\n", __func__);
330 return 0;
331 }
332
setFd(int fd)333 void setFd(int fd) { m_fd = fd; }
334
335 private:
336
337 int m_fd = -1;
338 };
339
340 #endif
341
342 class GoldfishProcessPipe : public ProcessPipe
343 {
344 public:
processPipeInit(HostConnectionType connType,renderControl_encoder_context_t * rcEnc)345 bool processPipeInit(HostConnectionType connType, renderControl_encoder_context_t *rcEnc)
346 {
347 return ::processPipeInit(connType, rcEnc);
348 }
349 };
350
351 static GoldfishGralloc m_goldfishGralloc;
352 static GoldfishProcessPipe m_goldfishProcessPipe;
353
HostConnection()354 HostConnection::HostConnection() :
355 m_checksumHelper(),
356 m_glExtensions(),
357 m_grallocOnly(true),
358 m_noHostError(false),
359 m_rendernodeFd(-1),
360 m_rendernodeFdOwned(false) { }
361
~HostConnection()362 HostConnection::~HostConnection()
363 {
364 // round-trip to ensure that queued commands have been processed
365 // before process pipe closure is detected.
366 if (m_rcEnc) {
367 (void)m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
368 }
369 if (m_grallocType == GRALLOC_TYPE_MINIGBM) {
370 delete m_grallocHelper;
371 }
372
373 if (m_rendernodeFdOwned) {
374 close(m_rendernodeFd);
375 }
376 }
377
378 // static
connect()379 std::unique_ptr<HostConnection> HostConnection::connect() {
380 const enum HostConnectionType connType = getConnectionTypeFromProperty();
381 // const enum HostConnectionType connType = HOST_CONNECTION_VIRTIO_GPU;
382
383 // Use "new" to access a non-public constructor.
384 auto con = std::unique_ptr<HostConnection>(new HostConnection);
385 switch (connType) {
386 case HOST_CONNECTION_ADDRESS_SPACE: {
387 auto stream = std::unique_ptr<AddressSpaceStream>(
388 createAddressSpaceStream(STREAM_BUFFER_SIZE));
389 if (!stream) {
390 ALOGE("Failed to create AddressSpaceStream for host connection!!!\n");
391 return nullptr;
392 }
393 con->m_connectionType = HOST_CONNECTION_ADDRESS_SPACE;
394 con->m_grallocType = GRALLOC_TYPE_RANCHU;
395 con->m_stream = std::move(stream);
396 con->m_grallocHelper = &m_goldfishGralloc;
397 con->m_processPipe = &m_goldfishProcessPipe;
398 break;
399 }
400 case HOST_CONNECTION_QEMU_PIPE: {
401 auto stream = std::make_unique<QemuPipeStream>(STREAM_BUFFER_SIZE);
402 if (!stream) {
403 ALOGE("Failed to create QemuPipeStream for host connection!!!\n");
404 return nullptr;
405 }
406 if (stream->connect() < 0) {
407 ALOGE("Failed to connect to host (QemuPipeStream)!!!\n");
408 return nullptr;
409 }
410 con->m_connectionType = HOST_CONNECTION_QEMU_PIPE;
411 con->m_grallocType = GRALLOC_TYPE_RANCHU;
412 con->m_stream = std::move(stream);
413 con->m_grallocHelper = &m_goldfishGralloc;
414 con->m_processPipe = &m_goldfishProcessPipe;
415 break;
416 }
417 case HOST_CONNECTION_TCP: {
418 #ifdef __Fuchsia__
419 ALOGE("Fuchsia doesn't support HOST_CONNECTION_TCP!!!\n");
420 return nullptr;
421 break;
422 #else
423 auto stream = std::make_unique<TcpStream>(STREAM_BUFFER_SIZE);
424 if (!stream) {
425 ALOGE("Failed to create TcpStream for host connection!!!\n");
426 return nullptr;
427 }
428
429 if (stream->connect("10.0.2.2", STREAM_PORT_NUM) < 0) {
430 ALOGE("Failed to connect to host (TcpStream)!!!\n");
431 return nullptr;
432 }
433 con->m_connectionType = HOST_CONNECTION_TCP;
434 con->m_grallocType = GRALLOC_TYPE_RANCHU;
435 con->m_stream = std::move(stream);
436 con->m_grallocHelper = &m_goldfishGralloc;
437 con->m_processPipe = &m_goldfishProcessPipe;
438 break;
439 #endif
440 }
441 #ifdef VIRTIO_GPU
442 case HOST_CONNECTION_VIRTIO_GPU: {
443 auto stream = std::make_unique<VirtioGpuStream>(STREAM_BUFFER_SIZE);
444 if (!stream) {
445 ALOGE("Failed to create VirtioGpu for host connection!!!\n");
446 return nullptr;
447 }
448 if (stream->connect() < 0) {
449 ALOGE("Failed to connect to host (VirtioGpu)!!!\n");
450 return nullptr;
451 }
452 con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU;
453 con->m_grallocType = GRALLOC_TYPE_MINIGBM;
454 auto rendernodeFd = stream->getRendernodeFd();
455 con->m_processPipe = stream->getProcessPipe();
456 con->m_stream = std::move(stream);
457 con->m_rendernodeFdOwned = false;
458 con->m_rendernodeFdOwned = rendernodeFd;
459 MinigbmGralloc* m = new MinigbmGralloc;
460 m->setFd(rendernodeFd);
461 con->m_grallocHelper = m;
462 break;
463 }
464 case HOST_CONNECTION_VIRTIO_GPU_PIPE: {
465 auto stream =
466 std::make_unique<VirtioGpuPipeStream>(STREAM_BUFFER_SIZE);
467 if (!stream) {
468 ALOGE("Failed to create VirtioGpu for host connection!!!\n");
469 return nullptr;
470 }
471 if (stream->connect() < 0) {
472 ALOGE("Failed to connect to host (VirtioGpu)!!!\n");
473 return nullptr;
474 }
475 con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_PIPE;
476 con->m_grallocType = getGrallocTypeFromProperty();
477 con->m_rendernodeFdOwned = false;
478 auto rendernodeFd = stream->getRendernodeFd();
479 con->m_stream = std::move(stream);
480 con->m_rendernodeFd = rendernodeFd;
481 switch (con->m_grallocType) {
482 case GRALLOC_TYPE_RANCHU:
483 con->m_grallocHelper = &m_goldfishGralloc;
484 break;
485 case GRALLOC_TYPE_MINIGBM: {
486 MinigbmGralloc* m = new MinigbmGralloc;
487 m->setFd(rendernodeFd);
488 con->m_grallocHelper = m;
489 break;
490 }
491 default:
492 ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
493 abort();
494 }
495 con->m_processPipe = &m_goldfishProcessPipe;
496 break;
497 }
498 #if !defined(HOST_BUILD) && !defined(__Fuchsia__)
499 case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
500 auto stream = std::unique_ptr<AddressSpaceStream>(
501 createVirtioGpuAddressSpaceStream(STREAM_BUFFER_SIZE));
502 if (!stream) {
503 ALOGE("Failed to create virtgpu AddressSpaceStream for host connection!!!\n");
504 return nullptr;
505 }
506 con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
507 con->m_grallocType = getGrallocTypeFromProperty();
508 con->m_rendernodeFdOwned = false;
509 auto rendernodeFd = stream->getRendernodeFd();
510 con->m_stream = std::move(stream);
511 con->m_rendernodeFd = rendernodeFd;
512 switch (con->m_grallocType) {
513 case GRALLOC_TYPE_RANCHU:
514 con->m_grallocHelper = &m_goldfishGralloc;
515 break;
516 case GRALLOC_TYPE_MINIGBM: {
517 MinigbmGralloc* m = new MinigbmGralloc;
518 m->setFd(rendernodeFd);
519 con->m_grallocHelper = m;
520 break;
521 }
522 default:
523 ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
524 abort();
525 }
526 con->m_processPipe = &m_goldfishProcessPipe;
527 break;
528 }
529 #endif // !HOST_BUILD && !__Fuchsia__
530 #else
531 default:
532 break;
533 #endif
534 }
535
536 // send zero 'clientFlags' to the host.
537 unsigned int *pClientFlags =
538 (unsigned int *)con->m_stream->allocBuffer(sizeof(unsigned int));
539 *pClientFlags = 0;
540 con->m_stream->commitBuffer(sizeof(unsigned int));
541
542 ALOGD("HostConnection::get() New Host Connection established %p, tid %d\n",
543 con.get(), getCurrentThreadId());
544
545 // ALOGD("Address space echo latency check done\n");
546 return con;
547 }
548
get()549 HostConnection *HostConnection::get() {
550 return getWithThreadInfo(getEGLThreadInfo());
551 }
552
getWithThreadInfo(EGLThreadInfo * tinfo)553 HostConnection *HostConnection::getWithThreadInfo(EGLThreadInfo* tinfo) {
554 // Get thread info
555 if (!tinfo) {
556 return NULL;
557 }
558
559 if (tinfo->hostConn == NULL) {
560 tinfo->hostConn = HostConnection::createUnique();
561 }
562
563 return tinfo->hostConn.get();
564 }
565
exit()566 void HostConnection::exit() {
567 EGLThreadInfo *tinfo = getEGLThreadInfo();
568 if (!tinfo) {
569 return;
570 }
571
572 tinfo->hostConn.reset();
573 }
574
575 // static
createUnique()576 std::unique_ptr<HostConnection> HostConnection::createUnique() {
577 ALOGD("%s: call\n", __func__);
578 return connect();
579 }
580
glEncoder()581 GLEncoder *HostConnection::glEncoder()
582 {
583 if (!m_glEnc) {
584 m_glEnc = std::make_unique<GLEncoder>(m_stream.get(), checksumHelper());
585 DBG("HostConnection::glEncoder new encoder %p, tid %d",
586 m_glEnc, getCurrentThreadId());
587 m_glEnc->setContextAccessor(s_getGLContext);
588 }
589 return m_glEnc.get();
590 }
591
gl2Encoder()592 GL2Encoder *HostConnection::gl2Encoder()
593 {
594 if (!m_gl2Enc) {
595 m_gl2Enc =
596 std::make_unique<GL2Encoder>(m_stream.get(), checksumHelper());
597 DBG("HostConnection::gl2Encoder new encoder %p, tid %d",
598 m_gl2Enc, getCurrentThreadId());
599 m_gl2Enc->setContextAccessor(s_getGL2Context);
600 m_gl2Enc->setNoHostError(m_noHostError);
601 m_gl2Enc->setDrawCallFlushInterval(
602 getDrawCallFlushIntervalFromProperty());
603 m_gl2Enc->setHasAsyncUnmapBuffer(m_rcEnc->hasAsyncUnmapBuffer());
604 }
605 return m_gl2Enc.get();
606 }
607
vkEncoder()608 VkEncoder *HostConnection::vkEncoder()
609 {
610 if (!m_vkEnc) {
611 m_vkEnc = std::make_unique<VkEncoder>(m_stream.get());
612 }
613 return m_vkEnc.get();
614 }
615
rcEncoder()616 ExtendedRCEncoderContext *HostConnection::rcEncoder()
617 {
618 if (!m_rcEnc) {
619 m_rcEnc = std::make_unique<ExtendedRCEncoderContext>(m_stream.get(),
620 checksumHelper());
621
622 ExtendedRCEncoderContext* rcEnc = m_rcEnc.get();
623 setChecksumHelper(rcEnc);
624 queryAndSetSyncImpl(rcEnc);
625 queryAndSetDmaImpl(rcEnc);
626 queryAndSetGLESMaxVersion(rcEnc);
627 queryAndSetNoErrorState(rcEnc);
628 queryAndSetHostCompositionImpl(rcEnc);
629 queryAndSetDirectMemSupport(rcEnc);
630 queryAndSetVulkanSupport(rcEnc);
631 queryAndSetDeferredVulkanCommandsSupport(rcEnc);
632 queryAndSetVulkanNullOptionalStringsSupport(rcEnc);
633 queryAndSetVulkanCreateResourcesWithRequirementsSupport(rcEnc);
634 queryAndSetVulkanIgnoredHandles(rcEnc);
635 queryAndSetYUVCache(rcEnc);
636 queryAndSetAsyncUnmapBuffer(rcEnc);
637 queryAndSetVirtioGpuNext(rcEnc);
638 queryHasSharedSlotsHostMemoryAllocator(rcEnc);
639 queryAndSetVulkanFreeMemorySync(rcEnc);
640 queryAndSetVirtioGpuNativeSync(rcEnc);
641 queryAndSetVulkanShaderFloat16Int8Support(rcEnc);
642 queryAndSetVulkanAsyncQueueSubmitSupport(rcEnc);
643 if (m_processPipe) {
644 m_processPipe->processPipeInit(m_connectionType, rcEnc);
645 }
646 }
647 return m_rcEnc.get();
648 }
649
getOrCreateRendernodeFd()650 int HostConnection::getOrCreateRendernodeFd() {
651 if (m_rendernodeFd >= 0) return m_rendernodeFd;
652 #ifdef __Fuchsia__
653 return -1;
654 #else
655 #ifdef VIRTIO_GPU
656 m_rendernodeFd = VirtioGpuPipeStream::openRendernode();
657 if (m_rendernodeFd < 0) {
658 ALOGE("%s: failed to create secondary "
659 "rendernode for host connection. "
660 "error: %s (%d)\n", __FUNCTION__,
661 strerror(errno), errno);
662 return -1;
663 }
664
665 // Remember to close it on exit
666 m_rendernodeFdOwned = true;
667 return m_rendernodeFd;
668 #else
669 return -1;
670 #endif
671 #endif
672 }
673
s_getGLContext()674 gl_client_context_t *HostConnection::s_getGLContext()
675 {
676 EGLThreadInfo *ti = getEGLThreadInfo();
677 if (ti->hostConn) {
678 return ti->hostConn->m_glEnc.get();
679 }
680 return NULL;
681 }
682
s_getGL2Context()683 gl2_client_context_t *HostConnection::s_getGL2Context()
684 {
685 EGLThreadInfo *ti = getEGLThreadInfo();
686 if (ti->hostConn) {
687 return ti->hostConn->m_gl2Enc.get();
688 }
689 return NULL;
690 }
691
queryGLExtensions(ExtendedRCEncoderContext * rcEnc)692 const std::string& HostConnection::queryGLExtensions(ExtendedRCEncoderContext *rcEnc) {
693 if (!m_glExtensions.empty()) {
694 return m_glExtensions;
695 }
696
697 // Extensions strings are usually quite long, preallocate enough here.
698 std::string extensions_buffer(1023, '\0');
699
700 // rcGetGLString() returns required size including the 0-terminator, so
701 // account it when passing/using the sizes.
702 int extensionSize = rcEnc->rcGetGLString(rcEnc, GL_EXTENSIONS,
703 &extensions_buffer[0],
704 extensions_buffer.size() + 1);
705 if (extensionSize < 0) {
706 extensions_buffer.resize(-extensionSize);
707 extensionSize = rcEnc->rcGetGLString(rcEnc, GL_EXTENSIONS,
708 &extensions_buffer[0],
709 -extensionSize + 1);
710 }
711
712 if (extensionSize > 0) {
713 extensions_buffer.resize(extensionSize - 1);
714 m_glExtensions.swap(extensions_buffer);
715 }
716
717 return m_glExtensions;
718 }
719
queryAndSetHostCompositionImpl(ExtendedRCEncoderContext * rcEnc)720 void HostConnection::queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc) {
721 const std::string& glExtensions = queryGLExtensions(rcEnc);
722 ALOGD("HostComposition ext %s", glExtensions.c_str());
723 // make sure V2 is checked first before V1, as host may declare supporting both
724 if (glExtensions.find(kHostCompositionV2) != std::string::npos) {
725 rcEnc->setHostComposition(HOST_COMPOSITION_V2);
726 }
727 else if (glExtensions.find(kHostCompositionV1) != std::string::npos) {
728 rcEnc->setHostComposition(HOST_COMPOSITION_V1);
729 }
730 else {
731 rcEnc->setHostComposition(HOST_COMPOSITION_NONE);
732 }
733 }
734
setChecksumHelper(ExtendedRCEncoderContext * rcEnc)735 void HostConnection::setChecksumHelper(ExtendedRCEncoderContext *rcEnc) {
736 const std::string& glExtensions = queryGLExtensions(rcEnc);
737 // check the host supported version
738 uint32_t checksumVersion = 0;
739 const char* checksumPrefix = ChecksumCalculator::getMaxVersionStrPrefix();
740 const char* glProtocolStr = strstr(glExtensions.c_str(), checksumPrefix);
741 if (glProtocolStr) {
742 uint32_t maxVersion = ChecksumCalculator::getMaxVersion();
743 sscanf(glProtocolStr+strlen(checksumPrefix), "%d", &checksumVersion);
744 if (maxVersion < checksumVersion) {
745 checksumVersion = maxVersion;
746 }
747 // The ordering of the following two commands matters!
748 // Must tell the host first before setting it in the guest
749 rcEnc->rcSelectChecksumHelper(rcEnc, checksumVersion, 0);
750 m_checksumHelper.setVersion(checksumVersion);
751 }
752 }
753
queryAndSetSyncImpl(ExtendedRCEncoderContext * rcEnc)754 void HostConnection::queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc) {
755 const std::string& glExtensions = queryGLExtensions(rcEnc);
756 #if PLATFORM_SDK_VERSION <= 16 || (!defined(__i386__) && !defined(__x86_64__))
757 rcEnc->setSyncImpl(SYNC_IMPL_NONE);
758 #else
759 if (glExtensions.find(kRCNativeSyncV4) != std::string::npos) {
760 rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V4);
761 } else if (glExtensions.find(kRCNativeSyncV3) != std::string::npos) {
762 rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V3);
763 } else if (glExtensions.find(kRCNativeSyncV2) != std::string::npos) {
764 rcEnc->setSyncImpl(SYNC_IMPL_NATIVE_SYNC_V2);
765 } else {
766 rcEnc->setSyncImpl(SYNC_IMPL_NONE);
767 }
768 #endif
769 }
770
queryAndSetDmaImpl(ExtendedRCEncoderContext * rcEnc)771 void HostConnection::queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc) {
772 std::string glExtensions = queryGLExtensions(rcEnc);
773 #if PLATFORM_SDK_VERSION <= 16 || (!defined(__i386__) && !defined(__x86_64__))
774 rcEnc->setDmaImpl(DMA_IMPL_NONE);
775 #else
776 if (glExtensions.find(kDmaExtStr_v1) != std::string::npos) {
777 rcEnc->setDmaImpl(DMA_IMPL_v1);
778 } else {
779 rcEnc->setDmaImpl(DMA_IMPL_NONE);
780 }
781 #endif
782 }
783
queryAndSetGLESMaxVersion(ExtendedRCEncoderContext * rcEnc)784 void HostConnection::queryAndSetGLESMaxVersion(ExtendedRCEncoderContext* rcEnc) {
785 std::string glExtensions = queryGLExtensions(rcEnc);
786 if (glExtensions.find(kGLESMaxVersion_2) != std::string::npos) {
787 rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
788 } else if (glExtensions.find(kGLESMaxVersion_3_0) != std::string::npos) {
789 rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_0);
790 } else if (glExtensions.find(kGLESMaxVersion_3_1) != std::string::npos) {
791 rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_1);
792 } else if (glExtensions.find(kGLESMaxVersion_3_2) != std::string::npos) {
793 rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_3_2);
794 } else {
795 ALOGW("Unrecognized GLES max version string in extensions: %s",
796 glExtensions.c_str());
797 rcEnc->setGLESMaxVersion(GLES_MAX_VERSION_2);
798 }
799 }
800
queryAndSetNoErrorState(ExtendedRCEncoderContext * rcEnc)801 void HostConnection::queryAndSetNoErrorState(ExtendedRCEncoderContext* rcEnc) {
802 std::string glExtensions = queryGLExtensions(rcEnc);
803 if (glExtensions.find(kGLESNoHostError) != std::string::npos) {
804 m_noHostError = true;
805 }
806 }
807
queryAndSetDirectMemSupport(ExtendedRCEncoderContext * rcEnc)808 void HostConnection::queryAndSetDirectMemSupport(ExtendedRCEncoderContext* rcEnc) {
809 std::string glExtensions = queryGLExtensions(rcEnc);
810 if (glExtensions.find(kGLDirectMem) != std::string::npos) {
811 rcEnc->featureInfo()->hasDirectMem = true;
812 }
813 }
814
queryAndSetVulkanSupport(ExtendedRCEncoderContext * rcEnc)815 void HostConnection::queryAndSetVulkanSupport(ExtendedRCEncoderContext* rcEnc) {
816 std::string glExtensions = queryGLExtensions(rcEnc);
817 if (glExtensions.find(kVulkan) != std::string::npos) {
818 rcEnc->featureInfo()->hasVulkan = true;
819 }
820 }
821
queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext * rcEnc)822 void HostConnection::queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
823 std::string glExtensions = queryGLExtensions(rcEnc);
824 if (glExtensions.find(kDeferredVulkanCommands) != std::string::npos) {
825 rcEnc->featureInfo()->hasDeferredVulkanCommands = true;
826 }
827 }
828
queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext * rcEnc)829 void HostConnection::queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext* rcEnc) {
830 std::string glExtensions = queryGLExtensions(rcEnc);
831 if (glExtensions.find(kVulkanNullOptionalStrings) != std::string::npos) {
832 rcEnc->featureInfo()->hasVulkanNullOptionalStrings = true;
833 }
834 }
835
queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext * rcEnc)836 void HostConnection::queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext* rcEnc) {
837 std::string glExtensions = queryGLExtensions(rcEnc);
838 if (glExtensions.find(kVulkanCreateResourcesWithRequirements) != std::string::npos) {
839 rcEnc->featureInfo()->hasVulkanCreateResourcesWithRequirements = true;
840 }
841 }
842
queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext * rcEnc)843 void HostConnection::queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext* rcEnc) {
844 std::string glExtensions = queryGLExtensions(rcEnc);
845 if (glExtensions.find(kVulkanIgnoredHandles) != std::string::npos) {
846 rcEnc->featureInfo()->hasVulkanIgnoredHandles = true;
847 }
848 }
849
queryAndSetYUVCache(ExtendedRCEncoderContext * rcEnc)850 void HostConnection::queryAndSetYUVCache(ExtendedRCEncoderContext* rcEnc) {
851 std::string glExtensions = queryGLExtensions(rcEnc);
852 if (glExtensions.find(kYUVCache) != std::string::npos) {
853 rcEnc->featureInfo()->hasYUVCache = true;
854 }
855 }
856
queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext * rcEnc)857 void HostConnection::queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext* rcEnc) {
858 std::string glExtensions = queryGLExtensions(rcEnc);
859 if (glExtensions.find(kAsyncUnmapBuffer) != std::string::npos) {
860 rcEnc->featureInfo()->hasAsyncUnmapBuffer = true;
861 }
862 }
863
queryAndSetVirtioGpuNext(ExtendedRCEncoderContext * rcEnc)864 void HostConnection::queryAndSetVirtioGpuNext(ExtendedRCEncoderContext* rcEnc) {
865 std::string glExtensions = queryGLExtensions(rcEnc);
866 if (glExtensions.find(kVirtioGpuNext) != std::string::npos) {
867 rcEnc->featureInfo()->hasVirtioGpuNext = true;
868 }
869 }
870
queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext * rcEnc)871 void HostConnection::queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc) {
872 const std::string& glExtensions = queryGLExtensions(rcEnc);
873 if (glExtensions.find(kHasSharedSlotsHostMemoryAllocator) != std::string::npos) {
874 rcEnc->featureInfo()->hasSharedSlotsHostMemoryAllocator = true;
875 }
876 }
877
queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext * rcEnc)878 void HostConnection::queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc) {
879 const std::string& glExtensions = queryGLExtensions(rcEnc);
880 if (glExtensions.find(kVulkanFreeMemorySync) != std::string::npos) {
881 rcEnc->featureInfo()->hasVulkanFreeMemorySync = true;
882 }
883 }
884
queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext * rcEnc)885 void HostConnection::queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext* rcEnc) {
886 std::string glExtensions = queryGLExtensions(rcEnc);
887 if (glExtensions.find(kVirtioGpuNativeSync) != std::string::npos) {
888 rcEnc->featureInfo()->hasVirtioGpuNativeSync = true;
889 }
890 }
891
queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext * rcEnc)892 void HostConnection::queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext* rcEnc) {
893 std::string glExtensions = queryGLExtensions(rcEnc);
894 if (glExtensions.find(kVulkanShaderFloat16Int8) != std::string::npos) {
895 rcEnc->featureInfo()->hasVulkanShaderFloat16Int8 = true;
896 }
897 }
898
queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext * rcEnc)899 void HostConnection::queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext* rcEnc) {
900 std::string glExtensions = queryGLExtensions(rcEnc);
901 if (glExtensions.find(kVulkanAsyncQueueSubmit) != std::string::npos) {
902 rcEnc->featureInfo()->hasVulkanAsyncQueueSubmit = true;
903 }
904 }
905