1// Copyright (C) 2019 The Android Open Source Project
2// Copyright (C) 2019 Google Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
16#include <linux/types.h>
17#include <linux/ioctl.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <sys/mman.h>
21#include <sys/ioctl.h>
22#include <fcntl.h>
23#include <unistd.h>
24#include <cstdlib>
25#include <errno.h>
26#include <memory>
27
28#ifdef VIRTIO_GPU
29#include <drm/virtgpu_drm.h>
30#include <xf86drm.h>
31#endif
32
33#include <log/log.h>
34
35#include "goldfish_address_space.h"
36#include "virtio_gpu_next.h"
37
38namespace {
39
40struct goldfish_address_space_allocate_block {
41    __u64 size;
42    __u64 offset;
43    __u64 phys_addr;
44};
45
46struct goldfish_address_space_claim_shared {
47    __u64 offset;
48    __u64 size;
49};
50
51#define GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC		'G'
52#define GOLDFISH_ADDRESS_SPACE_IOCTL_OP(OP, T)		_IOWR(GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC, OP, T)
53#define GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(10, struct goldfish_address_space_allocate_block)
54#define GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(11, __u64)
55#define GOLDFISH_ADDRESS_SPACE_IOCTL_PING		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(12, struct address_space_ping)
56#define GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(13, struct goldfish_address_space_claim_shared)
57#define GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(14, __u64)
58
59const char GOLDFISH_ADDRESS_SPACE_DEVICE_NAME[] = "/dev/goldfish_address_space";
60
61const int HOST_MEMORY_ALLOCATOR_COMMAND_ALLOCATE_ID = 1;
62const int HOST_MEMORY_ALLOCATOR_COMMAND_UNALLOCATE_ID = 2;
63
64int create_address_space_fd()
65{
66    return ::open(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME, O_RDWR);
67}
68
69long ioctl_allocate(int fd, struct goldfish_address_space_allocate_block *request)
70{
71    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK, request);
72}
73
74long ioctl_deallocate(int fd, uint64_t offset)
75{
76    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK, &offset);
77}
78
79long ioctl_ping(int fd, struct address_space_ping *request)
80{
81    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_PING, request);
82}
83
84long set_address_space_subdevice_type(int fd, uint64_t type)
85{
86    struct address_space_ping request;
87    ::memset(&request, 0, sizeof(request));
88    request.version = sizeof(request);
89    request.metadata = type;
90
91    long ret = ioctl_ping(fd, &request);
92    if (ret) {
93        return ret;
94    }
95
96    return request.metadata;
97}
98
99long ioctl_claim_shared(int fd, struct goldfish_address_space_claim_shared *request)
100{
101    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED, request);
102}
103
104long ioctl_unclaim_shared(int fd, uint64_t offset)
105{
106    return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED, &offset);
107}
108
109}  // namespace
110
111GoldfishAddressSpaceBlockProvider::GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType subdevice)
112  : m_handle(create_address_space_fd())
113{
114    if ((subdevice != GoldfishAddressSpaceSubdeviceType::NoSubdevice) && is_opened()) {
115        const long ret = set_address_space_subdevice_type(m_handle, subdevice);
116        if (ret != 0 && ret != subdevice) {  // TODO: retire the 'ret != subdevice' check
117            ALOGE("%s: set_address_space_subdevice_type failed for device_type=%lu, ret=%ld",
118                  __func__, static_cast<unsigned long>(subdevice), ret);
119            close();
120        }
121    }
122}
123
124GoldfishAddressSpaceBlockProvider::~GoldfishAddressSpaceBlockProvider()
125{
126    if (is_opened()) {
127        ::close(m_handle);
128    }
129}
130
131bool GoldfishAddressSpaceBlockProvider::is_opened() const
132{
133    return m_handle >= 0;
134}
135
136void GoldfishAddressSpaceBlockProvider::close()
137{
138    if (is_opened()) {
139        ::close(m_handle);
140        m_handle = -1;
141    }
142}
143
144address_space_handle_t GoldfishAddressSpaceBlockProvider::release()
145{
146    address_space_handle_t handle = m_handle;
147    m_handle = -1;
148    return handle;
149}
150
151void GoldfishAddressSpaceBlockProvider::closeHandle(address_space_handle_t handle)
152{
153    ::close(handle);
154}
155
156GoldfishAddressSpaceBlock::GoldfishAddressSpaceBlock()
157    : m_handle(-1)
158    , m_mmaped_ptr(NULL)
159    , m_phys_addr(0)
160    , m_host_addr(0)
161    , m_offset(0)
162    , m_size(0) {}
163
164GoldfishAddressSpaceBlock::~GoldfishAddressSpaceBlock()
165{
166    destroy();
167}
168
169GoldfishAddressSpaceBlock &GoldfishAddressSpaceBlock::operator=(const GoldfishAddressSpaceBlock &rhs)
170{
171    m_mmaped_ptr = rhs.m_mmaped_ptr;
172    m_phys_addr = rhs.m_phys_addr;
173    m_host_addr = rhs.m_host_addr;
174    m_offset = rhs.m_offset;
175    m_size = rhs.m_size;
176    m_handle = rhs.m_handle;
177
178    return *this;
179}
180
181bool GoldfishAddressSpaceBlock::allocate(GoldfishAddressSpaceBlockProvider *provider, size_t size)
182{
183    ALOGD("%s: Ask for block of size 0x%llx\n", __func__,
184         (unsigned long long)size);
185
186    destroy();
187
188    if (!provider->is_opened()) {
189        return false;
190    }
191
192    struct goldfish_address_space_allocate_block request;
193    ::memset(&request, 0, sizeof(request));
194    request.size = size;
195
196    long res = ioctl_allocate(provider->m_handle, &request);
197    if (res) {
198        return false;
199    } else {
200        m_phys_addr = request.phys_addr;
201        m_offset = request.offset;
202        m_size = request.size;
203        m_handle = provider->m_handle;
204        m_is_shared_mapping = false;
205
206        ALOGD("%s: ioctl allocate returned offset 0x%llx size 0x%llx\n", __func__,
207                (unsigned long long)m_offset,
208                (unsigned long long)m_size);
209
210        return true;
211    }
212}
213
214bool GoldfishAddressSpaceBlock::claimShared(GoldfishAddressSpaceBlockProvider *provider, uint64_t offset, uint64_t size)
215{
216    ALOGD("%s: Ask to claim region [0x%llx 0x%llx]\n", __func__,
217         (unsigned long long)offset,
218         (unsigned long long)offset + size);
219
220    destroy();
221
222    if (!provider->is_opened()) {
223        return false;
224    }
225
226    struct goldfish_address_space_claim_shared request;
227    request.offset = offset;
228    request.size = size;
229    long res = ioctl_claim_shared(provider->m_handle, &request);
230
231    if (res) {
232        return false;
233    }
234
235    m_offset = offset;
236    m_size = size;
237    m_handle = provider->m_handle;
238    m_is_shared_mapping = true;
239
240    return true;
241}
242
243uint64_t GoldfishAddressSpaceBlock::physAddr() const
244{
245    return m_phys_addr;
246}
247
248uint64_t GoldfishAddressSpaceBlock::hostAddr() const
249{
250    return m_host_addr;
251}
252
253void *GoldfishAddressSpaceBlock::mmap(uint64_t host_addr)
254{
255    if (m_size == 0) {
256        ALOGE("%s: called with zero size\n", __func__);
257        return NULL;
258    }
259    if (m_mmaped_ptr) {
260        ALOGE("'mmap' called for an already mmaped address block");
261        ::abort();
262    }
263
264    void *result;
265    const int res = memoryMap(NULL, m_size, m_handle, m_offset, &result);
266    if (res) {
267        ALOGE("%s: host memory map failed with size 0x%llx "
268              "off 0x%llx errno %d\n",
269              __func__,
270              (unsigned long long)m_size,
271              (unsigned long long)m_offset, res);
272        return NULL;
273    } else {
274        m_mmaped_ptr = result;
275        m_host_addr = host_addr;
276        return guestPtr();
277    }
278}
279
280void *GoldfishAddressSpaceBlock::guestPtr() const
281{
282    return reinterpret_cast<char *>(m_mmaped_ptr) + (m_host_addr & (PAGE_SIZE - 1));
283}
284
285void GoldfishAddressSpaceBlock::destroy()
286{
287    if (m_mmaped_ptr && m_size) {
288        memoryUnmap(m_mmaped_ptr, m_size);
289        m_mmaped_ptr = NULL;
290    }
291
292    if (m_size) {
293        long res = -EINVAL;
294
295        if (m_is_shared_mapping) {
296            res = ioctl_unclaim_shared(m_handle, m_offset);
297            if (res) {
298                ALOGE("ioctl_unclaim_shared failed, res=%ld", res);
299                ::abort();
300            }
301        } else {
302            res = ioctl_deallocate(m_handle, m_offset);
303            if (res) {
304                ALOGE("ioctl_deallocate failed, res=%ld", res);
305                ::abort();
306            }
307        }
308
309        m_is_shared_mapping = false;
310
311        m_phys_addr = 0;
312        m_host_addr = 0;
313        m_offset = 0;
314        m_size = 0;
315    }
316}
317
318void GoldfishAddressSpaceBlock::release()
319{
320    m_handle = -1;
321    m_mmaped_ptr = NULL;
322    m_phys_addr = 0;
323    m_host_addr = 0;
324    m_offset = 0;
325    m_size = 0;
326}
327
328int GoldfishAddressSpaceBlock::memoryMap(void *addr,
329                                         size_t len,
330                                         address_space_handle_t fd,
331                                         uint64_t off,
332                                         void** dst) {
333    void* ptr = ::mmap64(addr, len, PROT_WRITE, MAP_SHARED, fd, off);
334    if (MAP_FAILED == ptr) {
335        return errno;
336    } else {
337        *dst = ptr;
338        return 0;
339    }
340}
341
342void GoldfishAddressSpaceBlock::memoryUnmap(void *ptr, size_t size)
343{
344    ::munmap(ptr, size);
345}
346
347GoldfishAddressSpaceHostMemoryAllocator::GoldfishAddressSpaceHostMemoryAllocator(bool useSharedSlots)
348  : m_provider(useSharedSlots
349        ? GoldfishAddressSpaceSubdeviceType::SharedSlotsHostMemoryAllocator
350        : GoldfishAddressSpaceSubdeviceType::HostMemoryAllocator),
351    m_useSharedSlots(useSharedSlots)
352{}
353
354bool GoldfishAddressSpaceHostMemoryAllocator::is_opened() const { return m_provider.is_opened(); }
355
356long GoldfishAddressSpaceHostMemoryAllocator::hostMalloc(GoldfishAddressSpaceBlock *block, size_t size)
357{
358    if (size == 0) {
359        return -EINVAL;
360    }
361    if (block->size() > 0) {
362        return -EINVAL;
363    }
364    if (!m_provider.is_opened()) {
365        return -ENODEV;
366    }
367
368    struct address_space_ping request;
369    if (m_useSharedSlots) {
370        // shared memory slots are supported
371        ::memset(&request, 0, sizeof(request));
372        request.version = sizeof(request);
373        request.size = size;
374        request.metadata = HOST_MEMORY_ALLOCATOR_COMMAND_ALLOCATE_ID;
375
376        long ret = ioctl_ping(m_provider.m_handle, &request);
377        if (ret) {
378            return ret;
379        }
380        ret = static_cast<long>(request.metadata);
381        if (ret) {
382            return ret;
383        }
384
385        block->claimShared(&m_provider, request.offset, request.size);
386    } else {
387        // shared memory slots are not supported
388        if (!block->allocate(&m_provider, size)) {
389            return -ENOMEM;
390        }
391
392        ::memset(&request, 0, sizeof(request));
393        request.version = sizeof(request);
394        request.offset = block->offset();
395        request.size = block->size();
396        request.metadata = HOST_MEMORY_ALLOCATOR_COMMAND_ALLOCATE_ID;
397
398        long ret = ioctl_ping(m_provider.m_handle, &request);
399        if (ret) {
400            return ret;
401        }
402        ret = static_cast<long>(request.metadata);
403        if (ret) {
404            return ret;
405        }
406    }
407
408    block->mmap(0);
409    return 0;
410}
411
412void GoldfishAddressSpaceHostMemoryAllocator::hostFree(GoldfishAddressSpaceBlock *block)
413{
414    if (block->size() == 0) {
415        return;
416    }
417
418    if (!m_provider.is_opened()) {
419        ALOGE("%s: device is not available", __func__);
420        ::abort();
421    }
422
423    if (block->guestPtr()) {
424        struct address_space_ping request;
425        ::memset(&request, 0, sizeof(request));
426        request.version = sizeof(request);
427        request.offset = block->offset();
428        request.metadata = HOST_MEMORY_ALLOCATOR_COMMAND_UNALLOCATE_ID;
429
430        const long ret = ioctl_ping(m_provider.m_handle, &request);
431        if (ret) {
432            ALOGE("%s: ioctl_ping failed, ret=%ld", __func__, ret);
433            ::abort();
434        }
435    }
436
437    block->replace(NULL);
438}
439
440address_space_handle_t goldfish_address_space_open() {
441    return ::open(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME, O_RDWR);
442}
443
444void goldfish_address_space_close(address_space_handle_t handle) {
445    ::close(handle);
446}
447
448bool goldfish_address_space_allocate(
449    address_space_handle_t handle,
450    size_t size, uint64_t* phys_addr, uint64_t* offset) {
451
452    struct goldfish_address_space_allocate_block request;
453    ::memset(&request, 0, sizeof(request));
454    request.size = size;
455
456    long res = ioctl_allocate(handle, &request);
457
458    if (res) return false;
459
460    *phys_addr = request.phys_addr;
461    *offset = request.offset;
462    return true;
463}
464
465bool goldfish_address_space_free(
466    address_space_handle_t handle, uint64_t offset) {
467
468    long res = ioctl_deallocate(handle, offset);
469
470    if (res) {
471        ALOGE("ioctl_deallocate failed, res=%ld", res);
472        ::abort();
473    }
474
475    return true;
476}
477
478bool goldfish_address_space_claim_shared(
479    address_space_handle_t handle, uint64_t offset, uint64_t size) {
480
481    struct goldfish_address_space_claim_shared request;
482    request.offset = offset;
483    request.size = size;
484    long res = ioctl_claim_shared(handle, &request);
485
486    if (res) return false;
487
488    return true;
489}
490
491bool goldfish_address_space_unclaim_shared(
492        address_space_handle_t handle, uint64_t offset) {
493    long res = ioctl_unclaim_shared(handle, offset);
494    if (res) {
495        ALOGE("ioctl_unclaim_shared failed, res=%ld", res);
496        ::abort();
497    }
498
499    return true;
500}
501
502// pgoff is the offset into the page to return in the result
503void* goldfish_address_space_map(
504    address_space_handle_t handle,
505    uint64_t offset, uint64_t size,
506    uint64_t pgoff) {
507
508    void* res = ::mmap64(0, size, PROT_WRITE, MAP_SHARED, handle, offset);
509
510    if (res == MAP_FAILED) {
511        ALOGE("%s: failed to map. errno: %d\n", __func__, errno);
512        return 0;
513    }
514
515    return (void*)(((char*)res) + (uintptr_t)(pgoff & (PAGE_SIZE - 1)));
516}
517
518void goldfish_address_space_unmap(void* ptr, uint64_t size) {
519    void* pagePtr = (void*)(((uintptr_t)ptr) & ~(PAGE_SIZE - 1));
520    ::munmap(pagePtr, size);
521}
522
523bool goldfish_address_space_set_subdevice_type(
524    address_space_handle_t handle, GoldfishAddressSpaceSubdeviceType type,
525    address_space_handle_t* handle_out) {
526    struct address_space_ping request;
527    request.metadata = (uint64_t)type;
528    *handle_out = handle;
529    return goldfish_address_space_ping(handle, &request);
530}
531
532bool goldfish_address_space_ping(
533    address_space_handle_t handle,
534    struct address_space_ping* ping) {
535    long res = ioctl_ping(handle, ping);
536
537    if (res) {
538        ALOGE("%s: ping failed: errno: %d\n", __func__, errno);
539        return false;
540    }
541
542    return true;
543}
544
545// virtio-gpu version
546address_space_handle_t virtgpu_address_space_open() {
547return drmOpenRender(128);
548}
549
550void virtgpu_address_space_close(address_space_handle_t fd) {
551close(fd);
552}
553
554// kVirtioGpuAddressSpaceContextCreateWithSubdevice | subdeviceType
555const uint32_t kVirtioGpuAddressSpaceContextCreateWithSubdevice = 0x1001;
556
557// kVirtioGpuAddressSpacePing | offset_lo | offset_hi | size_lo | size_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
558// no output
559const uint32_t kVirtioGpuAddressSpacePing = 0x1002;
560
561// kVirtioGpuAddressSpacePingWithResponse | resp_resid | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
562// out: same as input then | out: error
563const uint32_t kVirtioGpuAddressSpacePingWithResponse = 0x1003;
564
565// Ping with no response
566bool virtgpu_address_space_ping(address_space_handle_t fd, struct address_space_ping* info) {
567
568    uint32_t words[] = {
569        kVirtioGpuAddressSpacePing,
570        (uint32_t)(info->offset), (uint32_t)(info->offset >> 32),
571        (uint32_t)(info->size), (uint32_t)(info->size >> 32),
572        (uint32_t)(info->metadata), (uint32_t)(info->metadata >> 32),
573        (uint32_t)(info->version), (uint32_t)(info->wait_fd),
574        (uint32_t)(info->wait_flags), (uint32_t)(info->direction),
575    };
576
577    drm_virtgpu_execbuffer execbuffer = {
578        .flags = 0,
579        .size = sizeof(words),
580        .command = (uint64_t)(uintptr_t)(words),
581        .bo_handles = 0,
582        .num_bo_handles = 0,
583        .fence_fd = -1,
584    };
585
586    int queue_work_err = drmIoctl(fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
587
588    if (queue_work_err) {
589        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
590                queue_work_err, strerror(errno));
591        return false;
592    }
593
594    return true;
595}
596
597bool virtgpu_address_space_create_context_with_subdevice(
598    address_space_handle_t fd,
599    uint32_t subdevice_type,
600    struct address_space_virtgpu_info* info_out) {
601
602    // response page
603    drm_virtgpu_resource_create create = {
604        .target     = PIPE_BUFFER,
605        .format     = VIRGL_FORMAT_R8_UNORM,
606        .bind       = VIRGL_BIND_CUSTOM,
607        .width      = 4096,
608        .height     = 1U,
609        .depth      = 1U,
610        .array_size = 0U,
611        .size       = 4096,
612        .stride     = 4096,
613    };
614
615    int ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
616    if (ret) {
617        ALOGE("%s: failed with %d allocating command buffer (%s)\n",
618                __func__, ret, strerror(errno));
619        return false;
620    }
621
622    drm_virtgpu_map map = {
623        .handle = create.bo_handle,
624    };
625
626    ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_MAP, &map);
627    if (ret) {
628        ALOGE("%s: failed with %d mapping command response buffer (%s)\n",
629            __func__, ret, strerror(errno));
630        return false;
631    }
632
633    void* ptr = static_cast<unsigned char*>(
634            mmap64(nullptr, 4096, PROT_WRITE, MAP_SHARED, fd, map.offset));
635
636    if (ptr == MAP_FAILED) {
637        ALOGE("%s: failed with %d mmap'ing command response buffer (%s)\n",
638                __func__, errno, strerror(errno));
639        return false;
640    }
641
642    info_out->fd = fd;
643    info_out->resp_bo = create.bo_handle;
644    info_out->resp_resid = create.res_handle;
645    info_out->resp_mapped_ptr = ptr;
646
647    ALOGD("%s: resp bo: %u resid %u mapped %p\n", __func__,
648            create.bo_handle, create.res_handle, ptr);
649
650    // Context creation command
651    uint32_t words[] = {
652        kVirtioGpuAddressSpaceContextCreateWithSubdevice,
653        subdevice_type,
654    };
655
656    drm_virtgpu_execbuffer execbuffer = {
657        .flags = 0,
658        .size = sizeof(words),
659        .command = (uint64_t)(uintptr_t)(words),
660        .bo_handles = 0,
661        .num_bo_handles = 0,
662        .fence_fd = -1,
663    };
664
665    int queue_work_err = drmIoctl(fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
666
667    if (queue_work_err) {
668        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
669                queue_work_err, strerror(errno));
670        return false;
671    }
672
673    return true;
674}
675
676bool virtgpu_address_space_allocate_hostmem(
677    address_space_handle_t fd,
678    size_t size,
679    uint64_t hostmem_id,
680    struct address_space_virtgpu_hostmem_info* hostmem_info_out) {
681
682    struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
683    drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST;
684    drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_MAPPABLE;
685    drm_rc_blob.blob_id = hostmem_id;
686    drm_rc_blob.size = size;
687
688    int res = drmIoctl(
689            fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
690
691    if (res) {
692        ALOGE("%s: Failed to resource create v2: sterror: %s errno: %d\n", __func__,
693                strerror(errno), errno);
694        abort();
695    }
696
697    struct drm_virtgpu_map map_info = {
698        .handle = drm_rc_blob.bo_handle,
699    };
700
701    res = drmIoctl(fd, DRM_IOCTL_VIRTGPU_MAP, &map_info);
702    if (res) {
703        ALOGE("%s: Failed to virtgpu map: sterror: %s errno: %d\n", __func__,
704                strerror(errno), errno);
705        abort();
706    }
707
708    void* directMappedAddr = mmap64(0, size, PROT_WRITE, MAP_SHARED, fd, map_info.offset);
709
710    if (!directMappedAddr) {
711        ALOGE("%s: mmap of virtio gpu resource failed\n", __func__);
712        abort();
713    }
714
715    hostmem_info_out->id = hostmem_id;
716    hostmem_info_out->bo = drm_rc_blob.bo_handle;
717    hostmem_info_out->ptr = directMappedAddr;
718    return true;
719}
720
721uint64_t buildu64(uint32_t lo, uint32_t hi) {
722    uint64_t res = (uint64_t)lo;
723    uint64_t hi64 = (uint64_t)hi;
724    return res | (hi64 << 32);
725}
726
727// Ping with response
728bool virtgpu_address_space_ping_with_response(
729    struct address_space_virtgpu_info* info,
730    struct address_space_ping* ping) {
731
732    uint32_t words[] = {
733        kVirtioGpuAddressSpacePingWithResponse,
734        info->resp_resid,
735        (uint32_t)(ping->offset), (uint32_t)(ping->offset >> 32),
736        (uint32_t)(ping->size), (uint32_t)(ping->size >> 32),
737        (uint32_t)(ping->metadata), (uint32_t)(ping->metadata >> 32),
738        (uint32_t)(ping->version), (uint32_t)(ping->wait_fd),
739        (uint32_t)(ping->wait_flags), (uint32_t)(ping->direction),
740    };
741
742    drm_virtgpu_execbuffer execbuffer = {
743        .flags = 0,
744        .size = sizeof(words),
745        .command = (uint64_t)(uintptr_t)(words),
746        .bo_handles = (uint64_t)(uintptr_t)(&info->resp_bo),
747        .num_bo_handles = 1,
748        .fence_fd = -1,
749    };
750
751    int queue_work_err = drmIoctl(info->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
752
753    if (queue_work_err) {
754        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
755                queue_work_err, strerror(errno));
756        return false;
757    }
758
759    struct drm_virtgpu_3d_wait waitcmd;
760    memset(&waitcmd, 0, sizeof(waitcmd));
761    waitcmd.handle = info->resp_bo;
762
763    int ret = drmIoctl(info->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
764    if (ret) {
765        ALOGE("%s: DRM_IOCTL_VIRTGPU_WAIT failed with %d (%s)\n", __func__, errno, strerror(errno));
766        return false;
767    }
768
769    uint32_t* respWords = (uint32_t*)info->resp_mapped_ptr;
770
771    ping->offset = buildu64(respWords[0], respWords[1]);
772    ping->size = buildu64(respWords[2], respWords[3]);
773    ping->metadata = buildu64(respWords[4], respWords[5]);
774    ping->version = respWords[6];
775    ping->wait_fd = respWords[7];
776    ping->wait_flags = respWords[8];
777    ping->direction = respWords[9];
778
779    return true;
780}
781