aboutsummaryrefslogtreecommitdiff
path: root/system/OpenglSystemCommon/HostConnection.h
blob: 9d31610b967da5177d5660f8214481f685d56648 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COMMON_HOST_CONNECTION_H
#define __COMMON_HOST_CONNECTION_H

#include "EmulatorFeatureInfo.h"
#include "IOStream.h"
#include "renderControl_enc.h"
#include "ChecksumCalculator.h"
#ifdef __Fuchsia__
struct goldfish_dma_context;
#else
#include "goldfish_dma.h"
#endif

#include <cutils/native_handle.h>

#ifdef GFXSTREAM
#include <mutex>
#else
#include <utils/threads.h>
#endif

#include <memory>
#include <cstring>

class GLEncoder;
struct gl_client_context_t;
class GL2Encoder;
struct gl2_client_context_t;

namespace goldfish_vk {
class VkEncoder;
}

// ExtendedRCEncoderContext is an extended version of renderControl_encoder_context_t
// that will be used to track available emulator features.
class ExtendedRCEncoderContext : public renderControl_encoder_context_t {
public:
    ExtendedRCEncoderContext(IOStream *stream, ChecksumCalculator *checksumCalculator)
        : renderControl_encoder_context_t(stream, checksumCalculator),
          m_dmaCxt(NULL), m_dmaPtr(NULL), m_dmaPhysAddr(0) { }
    void setSyncImpl(SyncImpl syncImpl) { m_featureInfo.syncImpl = syncImpl; }
    void setDmaImpl(DmaImpl dmaImpl) { m_featureInfo.dmaImpl = dmaImpl; }
    void setHostComposition(HostComposition hostComposition) {
        m_featureInfo.hostComposition = hostComposition; }
    bool hasNativeSync() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V2; }
    bool hasNativeSyncV3() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V3; }
    bool hasNativeSyncV4() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V4; }
    bool hasVirtioGpuNativeSync() const { return m_featureInfo.hasVirtioGpuNativeSync; }
    bool hasHostCompositionV1() const {
        return m_featureInfo.hostComposition == HOST_COMPOSITION_V1; }
    bool hasHostCompositionV2() const {
        return m_featureInfo.hostComposition == HOST_COMPOSITION_V2; }
    bool hasYUVCache() const {
        return m_featureInfo.hasYUVCache; }
    bool hasAsyncUnmapBuffer() const {
        return m_featureInfo.hasAsyncUnmapBuffer; }
    bool hasHostSideTracing() const {
        return m_featureInfo.hasHostSideTracing;
    }
    bool hasAsyncFrameCommands() const {
        return m_featureInfo.hasAsyncFrameCommands;
    }
    bool hasSyncBufferData() const {
        return m_featureInfo.hasSyncBufferData; }
    bool hasHWCMultiConfigs() const {
        return m_featureInfo.hasHWCMultiConfigs;
    }
    DmaImpl getDmaVersion() const { return m_featureInfo.dmaImpl; }
    void bindDmaContext(struct goldfish_dma_context* cxt) { m_dmaCxt = cxt; }
    void bindDmaDirectly(void* dmaPtr, uint64_t dmaPhysAddr) {
        m_dmaPtr = dmaPtr;
        m_dmaPhysAddr = dmaPhysAddr;
    }
    virtual uint64_t lockAndWriteDma(void* data, uint32_t size) {
        if (m_dmaPtr && m_dmaPhysAddr) {
            if (data != m_dmaPtr) {
                memcpy(m_dmaPtr, data, size);
            }
            return m_dmaPhysAddr;
        } else if (m_dmaCxt) {
            return writeGoldfishDma(data, size, m_dmaCxt);
        } else {
            ALOGE("%s: ERROR: No DMA context bound!", __func__);
            return 0;
        }
    }
    void setGLESMaxVersion(GLESMaxVersion ver) { m_featureInfo.glesMaxVersion = ver; }
    GLESMaxVersion getGLESMaxVersion() const { return m_featureInfo.glesMaxVersion; }
    bool hasDirectMem() const {
#ifdef HOST_BUILD
        // unit tests do not support restoring "guest" ram because there is no VM
        return false;
#else
        return m_featureInfo.hasDirectMem;
#endif
    }

    const EmulatorFeatureInfo* featureInfo_const() const { return &m_featureInfo; }
    EmulatorFeatureInfo* featureInfo() { return &m_featureInfo; }
private:
    static uint64_t writeGoldfishDma(void* data, uint32_t size,
                                     struct goldfish_dma_context* dmaCxt) {
#ifdef __Fuchsia__
        ALOGE("%s Not implemented!", __FUNCTION__);
        return 0u;
#else
        ALOGV("%s(data=%p, size=%u): call", __func__, data, size);

        goldfish_dma_write(dmaCxt, data, size);
        uint64_t paddr = goldfish_dma_guest_paddr(dmaCxt);

        ALOGV("%s: paddr=0x%llx", __func__, (unsigned long long)paddr);
        return paddr;
#endif
    }

    EmulatorFeatureInfo m_featureInfo;
    struct goldfish_dma_context* m_dmaCxt;
    void* m_dmaPtr;
    uint64_t m_dmaPhysAddr;
};

// Abstraction for gralloc handle conversion
class Gralloc {
public:
    virtual uint32_t createColorBuffer(
        ExtendedRCEncoderContext* rcEnc, int width, int height, uint32_t glformat) = 0;
    virtual uint32_t getHostHandle(native_handle_t const* handle) = 0;
    virtual int getFormat(native_handle_t const* handle) = 0;
    virtual size_t getAllocatedSize(native_handle_t const* handle) = 0;
    virtual ~Gralloc() {}
};

// Abstraction for process pipe helper
class ProcessPipe {
public:
    virtual bool processPipeInit(HostConnectionType connType, renderControl_encoder_context_t *rcEnc) = 0;
    virtual ~ProcessPipe() {}
};

struct EGLThreadInfo;

// Rutabaga capsets.
#define VIRTIO_GPU_CAPSET_NONE 0
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
#define VIRTIO_GPU_CAPSET_GFXSTREAM 3
#define VIRTIO_GPU_CAPSET_VENUS 4
#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5

class HostConnection
{
public:
    static HostConnection *get();
    static HostConnection *getOrCreate(uint32_t capset_id);

    static HostConnection *getWithThreadInfo(EGLThreadInfo* tInfo,
                                             uint32_t capset_id = VIRTIO_GPU_CAPSET_NONE);
    static void exit();
    static void exitUnclean(); // for testing purposes

    static std::unique_ptr<HostConnection> createUnique(uint32_t capset_id = VIRTIO_GPU_CAPSET_NONE);
    HostConnection(const HostConnection&) = delete;

    ~HostConnection();

    GLEncoder *glEncoder();
    GL2Encoder *gl2Encoder();
    goldfish_vk::VkEncoder *vkEncoder();
    ExtendedRCEncoderContext *rcEncoder();

    int getRendernodeFd() { return m_rendernodeFd; }

    ChecksumCalculator *checksumHelper() { return &m_checksumHelper; }
    Gralloc *grallocHelper() { return m_grallocHelper; }

    void flush() {
        if (m_stream) {
            m_stream->flush();
        }
    }

    void setGrallocOnly(bool gralloc_only) {
        m_grallocOnly = gralloc_only;
    }

    bool isGrallocOnly() const { return m_grallocOnly; }

#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wthread-safety-analysis"
#endif
    void lock() const { m_lock.lock(); }
    void unlock() const { m_lock.unlock(); }
#ifdef __clang__
#pragma clang diagnostic pop
#endif

    bool exitUncleanly; // for testing purposes

private:
    // If the connection failed, |conn| is deleted.
    // Returns NULL if connection failed.
    static std::unique_ptr<HostConnection> connect(uint32_t capset_id);

    HostConnection();
    static gl_client_context_t  *s_getGLContext();
    static gl2_client_context_t *s_getGL2Context();

    const std::string& queryGLExtensions(ExtendedRCEncoderContext *rcEnc);
    // setProtocol initilizes GL communication protocol for checksums
    // should be called when m_rcEnc is created
    void setChecksumHelper(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetSyncImpl(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetDmaImpl(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetGLESMaxVersion(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetNoErrorState(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetHostCompositionImpl(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetDirectMemSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetDeferredVulkanCommandsSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanNullOptionalStringsSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanCreateResourcesWithRequirementsSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanIgnoredHandles(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetYUVCache(ExtendedRCEncoderContext *mrcEnc);
    void queryAndSetAsyncUnmapBuffer(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVirtioGpuNext(ExtendedRCEncoderContext *rcEnc);
    void queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetSyncBufferData(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetVulkanAsyncQsri(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetReadColorBufferDma(ExtendedRCEncoderContext *rcEnc);
    void queryAndSetHWCMultiConfigs(ExtendedRCEncoderContext* rcEnc);
    GLint queryVersion(ExtendedRCEncoderContext* rcEnc);

private:
    HostConnectionType m_connectionType;
    GrallocType m_grallocType;

    // intrusively refcounted
    IOStream* m_stream = nullptr;

    std::unique_ptr<GLEncoder> m_glEnc;
    std::unique_ptr<GL2Encoder> m_gl2Enc;

    // intrusively refcounted
    goldfish_vk::VkEncoder* m_vkEnc = nullptr;
    std::unique_ptr<ExtendedRCEncoderContext> m_rcEnc;

    ChecksumCalculator m_checksumHelper;
    Gralloc* m_grallocHelper = nullptr;
    ProcessPipe* m_processPipe = nullptr;
    std::string m_glExtensions;
    bool m_grallocOnly;
    bool m_noHostError;
#ifdef GFXSTREAM
    mutable std::mutex m_lock;
#else
    mutable android::Mutex m_lock;
#endif
    int m_rendernodeFd;
};

#endif