1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef __COMMON_HOST_CONNECTION_H 17 #define __COMMON_HOST_CONNECTION_H 18 19 #include "EmulatorFeatureInfo.h" 20 #include "IOStream.h" 21 #include "renderControl_enc.h" 22 #include "ChecksumCalculator.h" 23 #include "goldfish_dma.h" 24 25 #include <cutils/native_handle.h> 26 27 #ifdef GOLDFISH_VULKAN 28 #include <mutex> 29 #else 30 #include <utils/threads.h> 31 #endif 32 33 #include <memory> 34 #include <string> 35 36 class GLEncoder; 37 struct gl_client_context_t; 38 class GL2Encoder; 39 struct gl2_client_context_t; 40 41 namespace goldfish_vk { 42 class VkEncoder; 43 } 44 45 // ExtendedRCEncoderContext is an extended version of renderControl_encoder_context_t 46 // that will be used to track available emulator features. 47 class ExtendedRCEncoderContext : public renderControl_encoder_context_t { 48 public: ExtendedRCEncoderContext(IOStream * stream,ChecksumCalculator * checksumCalculator)49 ExtendedRCEncoderContext(IOStream *stream, ChecksumCalculator *checksumCalculator) 50 : renderControl_encoder_context_t(stream, checksumCalculator), 51 m_dmaCxt(NULL), m_dmaPtr(NULL), m_dmaPhysAddr(0) { } setSyncImpl(SyncImpl syncImpl)52 void setSyncImpl(SyncImpl syncImpl) { m_featureInfo.syncImpl = syncImpl; } setDmaImpl(DmaImpl dmaImpl)53 void setDmaImpl(DmaImpl dmaImpl) { m_featureInfo.dmaImpl = dmaImpl; } setHostComposition(HostComposition hostComposition)54 void setHostComposition(HostComposition hostComposition) { 55 m_featureInfo.hostComposition = hostComposition; } hasNativeSync()56 bool hasNativeSync() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V2; } hasNativeSyncV3()57 bool hasNativeSyncV3() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V3; } hasNativeSyncV4()58 bool hasNativeSyncV4() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V4; } hasVirtioGpuNativeSync()59 bool hasVirtioGpuNativeSync() const { return m_featureInfo.hasVirtioGpuNativeSync; } hasHostCompositionV1()60 bool hasHostCompositionV1() const { 61 return m_featureInfo.hostComposition == HOST_COMPOSITION_V1; } hasHostCompositionV2()62 bool hasHostCompositionV2() const { 63 return m_featureInfo.hostComposition == HOST_COMPOSITION_V2; } hasYUVCache()64 bool hasYUVCache() const { 65 return m_featureInfo.hasYUVCache; } hasAsyncUnmapBuffer()66 bool hasAsyncUnmapBuffer() const { 67 return m_featureInfo.hasAsyncUnmapBuffer; } getDmaVersion()68 DmaImpl getDmaVersion() const { return m_featureInfo.dmaImpl; } bindDmaContext(struct goldfish_dma_context * cxt)69 void bindDmaContext(struct goldfish_dma_context* cxt) { m_dmaCxt = cxt; } bindDmaDirectly(void * dmaPtr,uint64_t dmaPhysAddr)70 void bindDmaDirectly(void* dmaPtr, uint64_t dmaPhysAddr) { 71 m_dmaPtr = dmaPtr; 72 m_dmaPhysAddr = dmaPhysAddr; 73 } lockAndWriteDma(void * data,uint32_t size)74 virtual uint64_t lockAndWriteDma(void* data, uint32_t size) { 75 if (m_dmaPtr && m_dmaPhysAddr) { 76 memcpy(m_dmaPtr, data, size); 77 return m_dmaPhysAddr; 78 } else if (m_dmaCxt) { 79 return writeGoldfishDma(data, size, m_dmaCxt); 80 } else { 81 ALOGE("%s: ERROR: No DMA context bound!", __func__); 82 return 0; 83 } 84 } setGLESMaxVersion(GLESMaxVersion ver)85 void setGLESMaxVersion(GLESMaxVersion ver) { m_featureInfo.glesMaxVersion = ver; } getGLESMaxVersion()86 GLESMaxVersion getGLESMaxVersion() const { return m_featureInfo.glesMaxVersion; } hasDirectMem()87 bool hasDirectMem() const { 88 #ifdef HOST_BUILD 89 // unit tests do not support restoring "guest" ram because there is no VM 90 return false; 91 #else 92 return m_featureInfo.hasDirectMem; 93 #endif 94 } 95 featureInfo_const()96 const EmulatorFeatureInfo* featureInfo_const() const { return &m_featureInfo; } featureInfo()97 EmulatorFeatureInfo* featureInfo() { return &m_featureInfo; } 98 private: writeGoldfishDma(void * data,uint32_t size,struct goldfish_dma_context * dmaCxt)99 static uint64_t writeGoldfishDma(void* data, uint32_t size, 100 struct goldfish_dma_context* dmaCxt) { 101 ALOGV("%s(data=%p, size=%u): call", __func__, data, size); 102 103 goldfish_dma_write(dmaCxt, data, size); 104 uint64_t paddr = goldfish_dma_guest_paddr(dmaCxt); 105 106 ALOGV("%s: paddr=0x%llx", __func__, (unsigned long long)paddr); 107 return paddr; 108 } 109 110 EmulatorFeatureInfo m_featureInfo; 111 struct goldfish_dma_context* m_dmaCxt; 112 void* m_dmaPtr; 113 uint64_t m_dmaPhysAddr; 114 }; 115 116 // Abstraction for gralloc handle conversion 117 class Gralloc { 118 public: 119 virtual uint32_t createColorBuffer( 120 ExtendedRCEncoderContext* rcEnc, int width, int height, uint32_t glformat); 121 virtual uint32_t getHostHandle(native_handle_t const* handle) = 0; 122 virtual int getFormat(native_handle_t const* handle) = 0; 123 virtual size_t getAllocatedSize(native_handle_t const* handle) = 0; ~Gralloc()124 virtual ~Gralloc() {} 125 }; 126 127 // Abstraction for process pipe helper 128 class ProcessPipe { 129 public: 130 virtual bool processPipeInit(HostConnectionType connType, renderControl_encoder_context_t *rcEnc) = 0; ~ProcessPipe()131 virtual ~ProcessPipe() {} 132 }; 133 134 struct EGLThreadInfo; 135 136 137 class HostConnection 138 { 139 public: 140 static HostConnection *get(); 141 static HostConnection *getWithThreadInfo(EGLThreadInfo* tInfo); 142 static void exit(); 143 144 static std::unique_ptr<HostConnection> createUnique(); 145 HostConnection(const HostConnection&) = delete; 146 147 ~HostConnection(); 148 connectionType()149 HostConnectionType connectionType() const { 150 return m_connectionType; 151 } 152 153 GLEncoder *glEncoder(); 154 GL2Encoder *gl2Encoder(); 155 goldfish_vk::VkEncoder *vkEncoder(); 156 ExtendedRCEncoderContext *rcEncoder(); 157 158 // Returns rendernode fd, in case the stream is virtio-gpu based. 159 // Otherwise, attempts to create a rendernode fd assuming 160 // virtio-gpu is available. 161 int getOrCreateRendernodeFd(); 162 checksumHelper()163 ChecksumCalculator *checksumHelper() { return &m_checksumHelper; } grallocHelper()164 Gralloc *grallocHelper() { return m_grallocHelper; } 165 flush()166 void flush() { 167 if (m_stream) { 168 m_stream->flush(); 169 } 170 } 171 setGrallocOnly(bool gralloc_only)172 void setGrallocOnly(bool gralloc_only) { 173 m_grallocOnly = gralloc_only; 174 } 175 isGrallocOnly()176 bool isGrallocOnly() const { return m_grallocOnly; } 177 178 #ifdef __clang__ 179 #pragma clang diagnostic push 180 #pragma clang diagnostic ignored "-Wthread-safety-analysis" 181 #endif lock()182 void lock() const { m_lock.lock(); } unlock()183 void unlock() const { m_lock.unlock(); } 184 #ifdef __clang__ 185 #pragma clang diagnostic pop 186 #endif 187 188 private: 189 // If the connection failed, |conn| is deleted. 190 // Returns NULL if connection failed. 191 static std::unique_ptr<HostConnection> connect(); 192 193 HostConnection(); 194 static gl_client_context_t *s_getGLContext(); 195 static gl2_client_context_t *s_getGL2Context(); 196 197 const std::string& queryGLExtensions(ExtendedRCEncoderContext *rcEnc); 198 // setProtocol initilizes GL communication protocol for checksums 199 // should be called when m_rcEnc is created 200 void setChecksumHelper(ExtendedRCEncoderContext *rcEnc); 201 void queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc); 202 void queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc); 203 void queryAndSetGLESMaxVersion(ExtendedRCEncoderContext *rcEnc); 204 void queryAndSetNoErrorState(ExtendedRCEncoderContext *rcEnc); 205 void queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc); 206 void queryAndSetDirectMemSupport(ExtendedRCEncoderContext *rcEnc); 207 void queryAndSetVulkanSupport(ExtendedRCEncoderContext *rcEnc); 208 void queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext *rcEnc); 209 void queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext *rcEnc); 210 void queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext *rcEnc); 211 void queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext *rcEnc); 212 void queryAndSetYUVCache(ExtendedRCEncoderContext *mrcEnc); 213 void queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext *rcEnc); 214 void queryAndSetVirtioGpuNext(ExtendedRCEncoderContext *rcEnc); 215 void queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc); 216 void queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc); 217 void queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext *rcEnc); 218 void queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext *rcEnc); 219 void queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext *rcEnc); 220 221 private: 222 HostConnectionType m_connectionType; 223 GrallocType m_grallocType; 224 225 std::unique_ptr<IOStream> m_stream; 226 std::unique_ptr<GLEncoder> m_glEnc; 227 std::unique_ptr<GL2Encoder> m_gl2Enc; 228 std::unique_ptr<goldfish_vk::VkEncoder> m_vkEnc; 229 std::unique_ptr<ExtendedRCEncoderContext> m_rcEnc; 230 231 ChecksumCalculator m_checksumHelper; 232 Gralloc* m_grallocHelper = nullptr; 233 ProcessPipe* m_processPipe = nullptr; 234 std::string m_glExtensions; 235 bool m_grallocOnly; 236 bool m_noHostError; 237 #ifdef GOLDFISH_VULKAN 238 mutable std::mutex m_lock; 239 #else 240 mutable android::Mutex m_lock; 241 #endif 242 int m_rendernodeFd; 243 bool m_rendernodeFdOwned; 244 }; 245 246 #endif 247