aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAlistair Delva <adelva@google.com>2021-03-05 23:49:21 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2021-03-05 23:49:21 +0000
commit3f3fc93f2688427bfe12d90f8236e20e28f29e49 (patch)
tree1418aa452ff3c020803ac1248aedd190ff7228ec /src
parentd85f0b2f71ab7caf78c3ad8f6a8c528a0a47eaa4 (diff)
parent6ff9940a7463feb3c3f4e4a21e6c84ae4961f9ec (diff)
downloadvirglrenderer-3f3fc93f2688427bfe12d90f8236e20e28f29e49.tar.gz
Original change: https://android-review.googlesource.com/c/platform/external/virglrenderer/+/1621572 MUST ONLY BE SUBMITTED BY AUTOMERGER Change-Id: I7eedb466c64f654ca32077f7c5406a5b0356e367
Diffstat (limited to 'src')
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_cache.c4
-rw-r--r--src/gallium/auxiliary/util/u_format.csv30
-rw-r--r--src/gallium/auxiliary/util/u_format.h14
-rw-r--r--src/meson.build4
-rw-r--r--src/virgl_context.h36
-rw-r--r--src/virgl_hw.h33
-rw-r--r--src/virgl_protocol.h25
-rw-r--r--src/virgl_resource.c18
-rw-r--r--src/virgl_resource.h18
-rw-r--r--src/virgl_util.c41
-rw-r--r--src/virgl_util.h41
-rw-r--r--src/virglrenderer.c164
-rw-r--r--src/virglrenderer.h23
-rw-r--r--src/virglrenderer_hw.h2
-rw-r--r--src/vrend_debug.c1
-rw-r--r--src/vrend_debug.h2
-rw-r--r--src/vrend_decode.c113
-rw-r--r--src/vrend_formats.c84
-rw-r--r--src/vrend_renderer.c2113
-rw-r--r--src/vrend_renderer.h54
-rw-r--r--src/vrend_winsys.c9
-rw-r--r--src/vrend_winsys.h6
-rw-r--r--src/vrend_winsys_egl.c176
-rw-r--r--src/vrend_winsys_egl.h14
-rw-r--r--src/vrend_winsys_gbm.c3
-rw-r--r--src/vrend_winsys_glx.c12
-rw-r--r--src/vrend_winsys_glx.h1
27 files changed, 2008 insertions, 1033 deletions
diff --git a/src/gallium/auxiliary/cso_cache/cso_cache.c b/src/gallium/auxiliary/cso_cache/cso_cache.c
index 232fa9ec..0bd124e6 100644
--- a/src/gallium/auxiliary/cso_cache/cso_cache.c
+++ b/src/gallium/auxiliary/cso_cache/cso_cache.c
@@ -54,8 +54,10 @@ static unsigned hash_key(const void *key, unsigned key_size)
/* I'm sure this can be improved on:
*/
- for (i = 0; i < key_size/4; i++)
+ for (i = 0; i < key_size/4; i++) {
+ hash = (hash << 7) | (hash >> 25);
hash ^= ikey[i];
+ }
return hash;
}
diff --git a/src/gallium/auxiliary/util/u_format.csv b/src/gallium/auxiliary/util/u_format.csv
index 2409a610..1d743a69 100644
--- a/src/gallium/auxiliary/util/u_format.csv
+++ b/src/gallium/auxiliary/util/u_format.csv
@@ -409,3 +409,33 @@ PIPE_FORMAT_ETC2_R11_UNORM , etc, 4, 4, x64, , , , x001, rgb
PIPE_FORMAT_ETC2_R11_SNORM , etc, 4, 4, x64, , , , x001, rgb
PIPE_FORMAT_ETC2_RG11_UNORM , etc, 4, 4, x128, , , , xy01, rgb
PIPE_FORMAT_ETC2_RG11_SNORM , etc, 4, 4, x128, , , , xy01, rgb
+
+PIPE_FORMAT_ASTC_4x4 , astc, 4, 4, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_5x4 , astc, 5, 4, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_5x5 , astc, 5, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_6x5 , astc, 6, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_6x6 , astc, 6, 6, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_8x5 , astc, 8, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_8x6 , astc, 8, 6, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_8x8 , astc, 8, 8, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x5 , astc,10, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x6 , astc,10, 6, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x8 , astc,10, 8, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x10 , astc,10,10, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_12x10 , astc,12,10, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_12x12 , astc,12,12, x128, , , , xyzw, rgb
+
+PIPE_FORMAT_ASTC_4x4_SRGB , astc, 4, 4, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_5x4_SRGB , astc, 5, 4, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_5x5_SRGB , astc, 5, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_6x5_SRGB , astc, 6, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_6x6_SRGB , astc, 6, 6, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_8x5_SRGB , astc, 8, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_8x6_SRGB , astc, 8, 6, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_8x8_SRGB , astc, 8, 8, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x5_SRGB , astc,10, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x6_SRGB , astc,10, 6, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x8_SRGB , astc,10, 8, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x10_SRGB , astc,10,10, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_12x10_SRGB , astc,12,10, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_12x12_SRGB , astc,12,12, x128, , , , xyzw, srgb \ No newline at end of file
diff --git a/src/gallium/auxiliary/util/u_format.h b/src/gallium/auxiliary/util/u_format.h
index 1990c606..978b9201 100644
--- a/src/gallium/auxiliary/util/u_format.h
+++ b/src/gallium/auxiliary/util/u_format.h
@@ -83,10 +83,19 @@ enum util_format_layout {
*/
UTIL_FORMAT_LAYOUT_BPTC = 7,
+ UTIL_FORMAT_LAYOUT_ASTC = 8,
+
+ UTIL_FORMAT_LAYOUT_ATC = 9,
+
+ /** Formats with 2 or more planes. */
+ UTIL_FORMAT_LAYOUT_PLANAR2 = 10,
+ UTIL_FORMAT_LAYOUT_PLANAR3 = 11,
+
+ UTIL_FORMAT_LAYOUT_FXT1 = 12,
/**
* Everything else that doesn't fit in any of the above layouts.
*/
- UTIL_FORMAT_LAYOUT_OTHER = 8
+ UTIL_FORMAT_LAYOUT_OTHER = 13,
};
@@ -299,6 +308,9 @@ util_format_is_compressed(enum pipe_format format)
case UTIL_FORMAT_LAYOUT_RGTC:
case UTIL_FORMAT_LAYOUT_ETC:
case UTIL_FORMAT_LAYOUT_BPTC:
+ case UTIL_FORMAT_LAYOUT_ASTC:
+ case UTIL_FORMAT_LAYOUT_ATC:
+ case UTIL_FORMAT_LAYOUT_FXT1:
/* XXX add other formats in the future */
return TRUE;
default:
diff --git a/src/meson.build b/src/meson.build
index d854027f..257d7dc7 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -86,6 +86,10 @@ if with_tracing == 'perfetto'
virgl_depends += [vperfetto_min_dep]
endif
+if with_tracing == 'percetto'
+ virgl_depends += [percetto_dep]
+endif
+
virgl_sources += vrend_sources
if have_egl
diff --git a/src/virgl_context.h b/src/virgl_context.h
index fa39fe99..ea86b31e 100644
--- a/src/virgl_context.h
+++ b/src/virgl_context.h
@@ -29,6 +29,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "virglrenderer_hw.h"
#include "virgl_resource.h"
struct vrend_transfer_info;
@@ -42,9 +43,17 @@ struct virgl_context_blob {
struct pipe_resource *pipe_resource;
} u;
+ uint32_t map_info;
+
void *renderer_data;
};
+struct virgl_context;
+
+typedef void (*virgl_context_fence_retire)(struct virgl_context *ctx,
+ uint64_t queue_id,
+ void *fence_cookie);
+
/**
* Base class for renderer contexts. For example, vrend_decode_ctx is a
* subclass of virgl_context.
@@ -52,6 +61,18 @@ struct virgl_context_blob {
struct virgl_context {
uint32_t ctx_id;
+ enum virgl_renderer_capset capset_id;
+
+ /*
+ * Each fence goes through submitted, signaled, and retired. This callback
+ * is called from virgl_context::retire_fences to retire signaled fences of
+ * each queue. When a queue has multiple signaled fences by the time
+ * virgl_context::retire_fences is called, this callback might not be called
+ * on all fences but only on the latest one, depending on the flags of the
+ * fences.
+ */
+ virgl_context_fence_retire fence_retire;
+
void (*destroy)(struct virgl_context *ctx);
void (*attach_resource)(struct virgl_context *ctx,
@@ -84,6 +105,21 @@ struct virgl_context {
int (*submit_cmd)(struct virgl_context *ctx,
const void *buffer,
size_t size);
+
+ /*
+ * Return an fd that is readable whenever there is any signaled fence in
+ * any queue, or -1 if not supported.
+ */
+ int (*get_fencing_fd)(struct virgl_context *ctx);
+
+ /* retire signaled fences of all queues */
+ void (*retire_fences)(struct virgl_context *ctx);
+
+ /* submit a fence to the queue identified by queue_id */
+ int (*submit_fence)(struct virgl_context *ctx,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie);
};
struct virgl_context_foreach_args {
diff --git a/src/virgl_hw.h b/src/virgl_hw.h
index 58dafd99..2cdbf60f 100644
--- a/src/virgl_hw.h
+++ b/src/virgl_hw.h
@@ -358,6 +358,35 @@ enum virgl_formats {
VIRGL_FORMAT_ETC2_RG11_UNORM = 277,
VIRGL_FORMAT_ETC2_RG11_SNORM = 278,
+ VIRGL_FORMAT_ASTC_4x4 = 279,
+ VIRGL_FORMAT_ASTC_5x4 = 280,
+ VIRGL_FORMAT_ASTC_5x5 = 281,
+ VIRGL_FORMAT_ASTC_6x5 = 282,
+ VIRGL_FORMAT_ASTC_6x6 = 283,
+ VIRGL_FORMAT_ASTC_8x5 = 284,
+ VIRGL_FORMAT_ASTC_8x6 = 285,
+ VIRGL_FORMAT_ASTC_8x8 = 286,
+ VIRGL_FORMAT_ASTC_10x5 = 287,
+ VIRGL_FORMAT_ASTC_10x6 = 288,
+ VIRGL_FORMAT_ASTC_10x8 = 289,
+ VIRGL_FORMAT_ASTC_10x10 = 290,
+ VIRGL_FORMAT_ASTC_12x10 = 291,
+ VIRGL_FORMAT_ASTC_12x12 = 292,
+ VIRGL_FORMAT_ASTC_4x4_SRGB = 293,
+ VIRGL_FORMAT_ASTC_5x4_SRGB = 294,
+ VIRGL_FORMAT_ASTC_5x5_SRGB = 295,
+ VIRGL_FORMAT_ASTC_6x5_SRGB = 296,
+ VIRGL_FORMAT_ASTC_6x6_SRGB = 297,
+ VIRGL_FORMAT_ASTC_8x5_SRGB = 298,
+ VIRGL_FORMAT_ASTC_8x6_SRGB = 299,
+ VIRGL_FORMAT_ASTC_8x8_SRGB = 300,
+ VIRGL_FORMAT_ASTC_10x5_SRGB = 301,
+ VIRGL_FORMAT_ASTC_10x6_SRGB = 302,
+ VIRGL_FORMAT_ASTC_10x8_SRGB = 303,
+ VIRGL_FORMAT_ASTC_10x10_SRGB = 304,
+ VIRGL_FORMAT_ASTC_12x10_SRGB = 305,
+ VIRGL_FORMAT_ASTC_12x12_SRGB = 306,
+
VIRGL_FORMAT_R10G10B10X2_UNORM = 308,
VIRGL_FORMAT_A4B4G4R4_UNORM = 311,
@@ -407,6 +436,9 @@ enum virgl_formats {
/* These are used by the capability_bits_v2 field in virgl_caps_v2. */
#define VIRGL_CAP_V2_BLEND_EQUATION (1 << 0)
+#define VIRGL_CAP_V2_UNTYPED_RESOURCE (1 << 1)
+#define VIRGL_CAP_V2_VIDEO_MEMORY (1 << 2)
+#define VIRGL_CAP_V2_MEMINFO (1 << 3)
/* virgl bind flags - these are compatible with mesa 10.5 gallium.
* but are fixed, no other should be passed to virgl either.
@@ -558,6 +590,7 @@ struct virgl_caps_v2 {
uint32_t host_feature_check_version;
struct virgl_supported_format_mask supported_readback_formats;
struct virgl_supported_format_mask scanout;
+ uint32_t max_video_memory;
uint32_t capability_bits_v2;
};
diff --git a/src/virgl_protocol.h b/src/virgl_protocol.h
index a230898c..d8d7b16d 100644
--- a/src/virgl_protocol.h
+++ b/src/virgl_protocol.h
@@ -35,6 +35,16 @@ struct virgl_host_query_state {
uint64_t result;
};
+struct virgl_memory_info
+{
+ uint32_t total_device_memory; /**< size of device memory, e.g. VRAM */
+ uint32_t avail_device_memory; /**< free device memory at the moment */
+ uint32_t total_staging_memory; /**< size of staging memory, e.g. GART */
+ uint32_t avail_staging_memory; /**< free staging memory at the moment */
+ uint32_t device_memory_evicted; /**< size of memory evicted (monotonic counter) */
+ uint32_t nr_device_memory_evictions; /**< # of evictions (monotonic counter) */
+};
+
enum virgl_object_type {
VIRGL_OBJECT_NULL,
VIRGL_OBJECT_BLEND,
@@ -102,6 +112,8 @@ enum virgl_context_cmd {
VIRGL_CCMD_SET_TWEAKS,
VIRGL_CCMD_CLEAR_TEXTURE,
VIRGL_CCMD_PIPE_RESOURCE_CREATE,
+ VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE,
+ VIRGL_CCMD_GET_MEMORY_INFO,
VIRGL_MAX_COMMANDS
};
@@ -636,4 +648,17 @@ enum vrend_tweak_type {
#define VIRGL_PIPE_RES_CREATE_FLAGS 10
#define VIRGL_PIPE_RES_CREATE_BLOB_ID 11
+/* VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE */
+#define VIRGL_PIPE_RES_SET_TYPE_SIZE(nplanes) (8 + (nplanes) * 2)
+#define VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE 1
+#define VIRGL_PIPE_RES_SET_TYPE_FORMAT 2
+#define VIRGL_PIPE_RES_SET_TYPE_BIND 3
+#define VIRGL_PIPE_RES_SET_TYPE_WIDTH 4
+#define VIRGL_PIPE_RES_SET_TYPE_HEIGHT 5
+#define VIRGL_PIPE_RES_SET_TYPE_USAGE 6
+#define VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO 7
+#define VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI 8
+#define VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(plane) (9 + (plane) * 2)
+#define VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(plane) (10 + (plane) * 2)
+
#endif
diff --git a/src/virgl_resource.c b/src/virgl_resource.c
index e3c9423b..c58dd708 100644
--- a/src/virgl_resource.c
+++ b/src/virgl_resource.c
@@ -102,7 +102,7 @@ virgl_resource_create(uint32_t res_id)
return res;
}
-int
+struct virgl_resource *
virgl_resource_create_from_pipe(uint32_t res_id,
struct pipe_resource *pres,
const struct iovec *iov,
@@ -112,7 +112,7 @@ virgl_resource_create_from_pipe(uint32_t res_id,
res = virgl_resource_create(res_id);
if (!res)
- return ENOMEM;
+ return NULL;
/* take ownership */
res->pipe_resource = pres;
@@ -120,10 +120,10 @@ virgl_resource_create_from_pipe(uint32_t res_id,
res->iov = iov;
res->iov_count = iov_count;
- return 0;
+ return res;
}
-int
+struct virgl_resource *
virgl_resource_create_from_fd(uint32_t res_id,
enum virgl_resource_fd_type fd_type,
int fd,
@@ -136,7 +136,7 @@ virgl_resource_create_from_fd(uint32_t res_id,
res = virgl_resource_create(res_id);
if (!res)
- return ENOMEM;
+ return NULL;
res->fd_type = fd_type;
/* take ownership */
@@ -145,10 +145,10 @@ virgl_resource_create_from_fd(uint32_t res_id,
res->iov = iov;
res->iov_count = iov_count;
- return 0;
+ return res;
}
-int
+struct virgl_resource *
virgl_resource_create_from_iov(uint32_t res_id,
const struct iovec *iov,
int iov_count)
@@ -160,12 +160,12 @@ virgl_resource_create_from_iov(uint32_t res_id,
res = virgl_resource_create(res_id);
if (!res)
- return ENOMEM;
+ return NULL;
res->iov = iov;
res->iov_count = iov_count;
- return 0;
+ return res;
}
void
diff --git a/src/virgl_resource.h b/src/virgl_resource.h
index 15efa8fa..42983cdc 100644
--- a/src/virgl_resource.h
+++ b/src/virgl_resource.h
@@ -44,6 +44,16 @@ enum virgl_resource_fd_type {
* and imported into a vrend_decode_ctx to create a vrend_resource.
*
* It is also possible to create a virgl_resource from a context object.
+ *
+ * The underlying storage of a virgl_resource is provided by a pipe_resource
+ * and/or a fd. When it is provided by a pipe_resource, the virgl_resource is
+ * said to be typed because pipe_resource also provides the type information.
+ *
+ * Conventional resources are always typed. Blob resources by definition do
+ * not have nor need type information, but those created from vrend_decode_ctx
+ * objects are typed. That should be considered a convenience rather than
+ * something to be relied upon. Contexts must not assume that every resource is
+ * typed when interop is expected.
*/
struct virgl_resource {
uint32_t res_id;
@@ -56,6 +66,8 @@ struct virgl_resource {
const struct iovec *iov;
int iov_count;
+ uint32_t map_info;
+
void *private_data;
};
@@ -84,20 +96,20 @@ virgl_resource_table_cleanup(void);
void
virgl_resource_table_reset(void);
-int
+struct virgl_resource *
virgl_resource_create_from_pipe(uint32_t res_id,
struct pipe_resource *pres,
const struct iovec *iov,
int iov_count);
-int
+struct virgl_resource *
virgl_resource_create_from_fd(uint32_t res_id,
enum virgl_resource_fd_type fd_type,
int fd,
const struct iovec *iov,
int iov_count);
-int
+struct virgl_resource *
virgl_resource_create_from_iov(uint32_t res_id,
const struct iovec *iov,
int iov_count);
diff --git a/src/virgl_util.c b/src/virgl_util.c
index 2f673ddc..6dead0a7 100644
--- a/src/virgl_util.c
+++ b/src/virgl_util.c
@@ -114,6 +114,15 @@ void flush_eventfd(int fd)
} while ((len == -1 && errno == EINTR) || len == sizeof(value));
}
+#if ENABLE_TRACING == TRACE_WITH_PERCETTO
+PERCETTO_CATEGORY_DEFINE(VIRGL_PERCETTO_CATEGORIES)
+
+void trace_init(void)
+{
+ PERCETTO_INIT(PERCETTO_CLOCK_DONT_CARE);
+}
+#endif
+
#if ENABLE_TRACING == TRACE_WITH_PERFETTO
void trace_init(void)
{
@@ -126,18 +135,13 @@ void trace_init(void)
vperfetto_min_startTracing(&config);
}
-char *trace_begin(const char* format, ...)
+const char *trace_begin(const char *scope)
{
- char buffer[1024];
- va_list args;
- va_start (args, format);
- vsnprintf (buffer, sizeof(buffer), format, args);
- va_end (args);
- vperfetto_min_beginTrackEvent_VMM(buffer);
- return (void *)1;
+ vperfetto_min_beginTrackEvent_VMM(scope);
+ return scope;
}
-void trace_end(char **dummy)
+void trace_end(const char **dummy)
{
(void)dummy;
vperfetto_min_endTrackEvent_VMM();
@@ -150,33 +154,22 @@ void trace_init(void)
{
}
-char *trace_begin(const char* format, ...)
+const char *trace_begin(const char *scope)
{
for (int i = 0; i < nesting_depth; ++i)
fprintf(stderr, " ");
- fprintf(stderr, "ENTER:");
- char *buffer;
- va_list args;
- va_start (args, format);
- int size = vasprintf(&buffer, format, args);
-
- if (size < 0)
- buffer=strdup("error");
-
- va_end (args);
- fprintf(stderr, "%s\n", buffer);
+ fprintf(stderr, "ENTER:%s\n", scope);
nesting_depth++;
- return buffer;
+ return scope;
}
-void trace_end(char **func_name)
+void trace_end(const char **func_name)
{
--nesting_depth;
for (int i = 0; i < nesting_depth; ++i)
fprintf(stderr, " ");
fprintf(stderr, "LEAVE %s\n", *func_name);
- free(*func_name);
}
#endif
diff --git a/src/virgl_util.h b/src/virgl_util.h
index 861ecd72..951410ec 100644
--- a/src/virgl_util.h
+++ b/src/virgl_util.h
@@ -34,6 +34,7 @@
#define TRACE_WITH_PERFETTO 1
#define TRACE_WITH_STDERR 2
+#define TRACE_WITH_PERCETTO 3
#define BIT(n) (UINT32_C(1) << (n))
@@ -63,22 +64,42 @@ void flush_eventfd(int fd);
#ifdef ENABLE_TRACING
void trace_init(void);
-char *trace_begin(const char* format, ...);
-void trace_end(char **dummy);
#define TRACE_INIT() trace_init()
-#define TRACE_FUNC() \
- char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
- trace_begin("%s", __func__)
+#define TRACE_FUNC() TRACE_SCOPE(__func__)
-#define TRACE_SCOPE(FORMAT, ...) \
- char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
- trace_begin(FORMAT, __VA_ARGS__)
+#if ENABLE_TRACING == TRACE_WITH_PERCETTO
+
+#include <percetto.h>
+
+#define VIRGL_PERCETTO_CATEGORIES(C, G) \
+ C(virgl, "virglrenderer") \
+ C(virgls, "virglrenderer detailed events", "slow")
+
+PERCETTO_CATEGORY_DECLARE(VIRGL_PERCETTO_CATEGORIES)
+
+#define TRACE_SCOPE(SCOPE) TRACE_EVENT(virgl, SCOPE)
+/* Trace high frequency events (tracing may impact performance). */
+#define TRACE_SCOPE_SLOW(SCOPE) TRACE_EVENT(virgls, SCOPE)
+
+#else
+
+const char *trace_begin(const char *scope);
+void trace_end(const char **scope);
+
+#define TRACE_SCOPE(SCOPE) \
+ const char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
+ trace_begin(SCOPE)
+
+#define TRACE_SCOPE_SLOW(SCOPE) TRACE_SCOPE(SCOPE)
+
+#endif /* ENABLE_TRACING == TRACE_WITH_PERCETTO */
#else
#define TRACE_INIT()
#define TRACE_FUNC()
-#define TRACE_SCOPE(FORMAT, ...)
-#endif
+#define TRACE_SCOPE(SCOPE)
+#define TRACE_SCOPE_SLOW(SCOPE)
+#endif /* ENABLE_TRACING */
#endif /* VIRGL_UTIL_H */
diff --git a/src/virglrenderer.c b/src/virglrenderer.c
index 8458b215..0730a1d8 100644
--- a/src/virglrenderer.c
+++ b/src/virglrenderer.c
@@ -65,7 +65,7 @@ static int virgl_renderer_resource_create_internal(struct virgl_renderer_resourc
UNUSED struct iovec *iov, UNUSED uint32_t num_iovs,
void *image)
{
- int ret;
+ struct virgl_resource *res;
struct pipe_resource *pipe_res;
struct vrend_renderer_resource_create_args vrend_args = { 0 };
@@ -88,12 +88,14 @@ static int virgl_renderer_resource_create_internal(struct virgl_renderer_resourc
if (!pipe_res)
return EINVAL;
- ret = virgl_resource_create_from_pipe(args->handle, pipe_res, iov, num_iovs);
- if (ret) {
+ res = virgl_resource_create_from_pipe(args->handle, pipe_res, iov, num_iovs);
+ if (!res) {
vrend_renderer_resource_destroy((struct vrend_resource *)pipe_res);
- return ret;
+ return -ENOMEM;
}
+ res->map_info = vrend_renderer_resource_get_map_info(pipe_res);
+
return 0;
}
@@ -163,24 +165,57 @@ void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
}
}
-int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *name)
+static void per_context_fence_retire(struct virgl_context *ctx,
+ uint64_t queue_id,
+ void *fence_cookie)
+{
+ state.cbs->write_context_fence(state.cookie,
+ ctx->ctx_id,
+ queue_id,
+ fence_cookie);
+}
+
+int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
+ uint32_t ctx_flags,
+ uint32_t nlen,
+ const char *name)
{
+ const enum virgl_renderer_capset capset_id =
+ ctx_flags & VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK;
struct virgl_context *ctx;
int ret;
TRACE_FUNC();
/* user context id must be greater than 0 */
- if (handle == 0)
+ if (ctx_id == 0)
return EINVAL;
- if (virgl_context_lookup(handle))
- return 0;
+ /* unsupported flags */
+ if (ctx_flags & ~VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK)
+ return EINVAL;
+
+ ctx = virgl_context_lookup(ctx_id);
+ if (ctx) {
+ return ctx->capset_id == capset_id ? 0 : EINVAL;
+ }
- ctx = vrend_renderer_context_create(handle, nlen, name);
+ switch (capset_id) {
+ case VIRGL_RENDERER_CAPSET_VIRGL:
+ case VIRGL_RENDERER_CAPSET_VIRGL2:
+ ctx = vrend_renderer_context_create(ctx_id, nlen, name);
+ break;
+ default:
+ return EINVAL;
+ break;
+ }
if (!ctx)
return ENOMEM;
+ ctx->ctx_id = ctx_id;
+ ctx->capset_id = capset_id;
+ ctx->fence_retire = per_context_fence_retire;
+
ret = virgl_context_add(ctx);
if (ret) {
ctx->destroy(ctx);
@@ -190,6 +225,14 @@ int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *na
return 0;
}
+int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *name)
+{
+ return virgl_renderer_context_create_with_flags(handle,
+ VIRGL_RENDERER_CAPSET_VIRGL2,
+ nlen,
+ name);
+}
+
void virgl_renderer_context_destroy(uint32_t handle)
{
TRACE_FUNC();
@@ -315,10 +358,44 @@ void virgl_renderer_resource_detach_iov(int res_handle, struct iovec **iov_p, in
virgl_resource_detach_iov(res);
}
-int virgl_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
+int virgl_renderer_create_fence(int client_fence_id, UNUSED uint32_t ctx_id)
{
TRACE_FUNC();
- return vrend_renderer_create_fence(client_fence_id, ctx_id);
+ const uint32_t fence_id = (uint32_t)client_fence_id;
+ if (state.vrend_initialized)
+ return vrend_renderer_create_ctx0_fence(fence_id);
+ return EINVAL;
+}
+
+int virgl_renderer_context_create_fence(uint32_t ctx_id,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie)
+{
+ struct virgl_context *ctx = virgl_context_lookup(ctx_id);
+ if (!ctx)
+ return -EINVAL;
+
+ assert(state.cbs->version >= 3 && state.cbs->write_context_fence);
+ return ctx->submit_fence(ctx, flags, queue_id, fence_cookie);
+}
+
+void virgl_renderer_context_poll(uint32_t ctx_id)
+{
+ struct virgl_context *ctx = virgl_context_lookup(ctx_id);
+ if (!ctx)
+ return;
+
+ ctx->retire_fences(ctx);
+}
+
+int virgl_renderer_context_get_poll_fd(uint32_t ctx_id)
+{
+ struct virgl_context *ctx = virgl_context_lookup(ctx_id);
+ if (!ctx)
+ return -1;
+
+ return ctx->get_fencing_fd(ctx);
}
void virgl_renderer_force_ctx_0(void)
@@ -400,8 +477,10 @@ void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int nu
}
-static void virgl_write_fence(uint32_t fence_id)
+static void ctx0_fence_retire(void *fence_cookie,
+ UNUSED void *retire_data)
{
+ const uint32_t fence_id = (uint32_t)(uintptr_t)fence_cookie;
state.cbs->write_fence(state.cookie, fence_id);
}
@@ -438,7 +517,7 @@ static int make_current(virgl_renderer_gl_context ctx)
}
static const struct vrend_if_cbs vrend_cbs = {
- virgl_write_fence,
+ ctx0_fence_retire,
create_gl_context,
destroy_gl_context,
make_current,
@@ -622,10 +701,28 @@ static int virgl_renderer_export_query(void *execute_args, uint32_t execute_size
return -EINVAL;
res = virgl_resource_lookup(export_query->in_resource_id);
- if (!res || !res->pipe_resource)
+ if (!res)
return -EINVAL;
- return vrend_renderer_export_query(res->pipe_resource, export_query);
+
+ if (res->pipe_resource) {
+ return vrend_renderer_export_query(res->pipe_resource, export_query);
+ } else if (!export_query->in_export_fds) {
+ /* Untyped resources are expected to be exported with
+ * virgl_renderer_resource_export_blob instead and have no type
+ * information. But when this is called to query (in_export_fds is
+ * false) an untyped resource, we should return sane values.
+ */
+ export_query->out_num_fds = 1;
+ export_query->out_fourcc = 0;
+ export_query->out_fds[0] = -1;
+ export_query->out_strides[0] = 0;
+ export_query->out_offsets[0] = 0;
+ export_query->out_modifier = DRM_FORMAT_MOD_INVALID;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static int virgl_renderer_supported_structures(void *execute_args, uint32_t execute_size)
@@ -668,6 +765,7 @@ int virgl_renderer_execute(void *execute_args, uint32_t execute_size)
int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_create_blob_args *args)
{
TRACE_FUNC();
+ struct virgl_resource *res;
struct virgl_context *ctx;
struct virgl_context_blob blob;
bool has_host_storage;
@@ -707,9 +805,14 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
}
if (!has_host_storage) {
- return virgl_resource_create_from_iov(args->res_handle,
- args->iovecs,
- args->num_iovs);
+ res = virgl_resource_create_from_iov(args->res_handle,
+ args->iovecs,
+ args->num_iovs);
+ if (!res)
+ return -ENOMEM;
+
+ res->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
+ return 0;
}
ctx = virgl_context_lookup(args->ctx_id);
@@ -721,26 +824,28 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
return ret;
if (blob.type != VIRGL_RESOURCE_FD_INVALID) {
- ret = virgl_resource_create_from_fd(args->res_handle,
+ res = virgl_resource_create_from_fd(args->res_handle,
blob.type,
blob.u.fd,
args->iovecs,
args->num_iovs);
- if (ret) {
+ if (!res) {
close(blob.u.fd);
- return ret;
+ return -ENOMEM;
}
} else {
- ret = virgl_resource_create_from_pipe(args->res_handle,
+ res = virgl_resource_create_from_pipe(args->res_handle,
blob.u.pipe_resource,
args->iovecs,
args->num_iovs);
- if (ret) {
+ if (!res) {
vrend_renderer_resource_destroy((struct vrend_resource *)blob.u.pipe_resource);
- return ret;
+ return -ENOMEM;
}
}
+ res->map_info = blob.map_info;
+
if (ctx->get_blob_done)
ctx->get_blob_done(ctx, args->res_handle, &blob);
@@ -771,10 +876,15 @@ int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint32_t *map_info
{
TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
- if (!res || !res->pipe_resource)
+ if (!res)
+ return -EINVAL;
+
+ if ((res->map_info & VIRGL_RENDERER_MAP_CACHE_MASK) ==
+ VIRGL_RENDERER_MAP_CACHE_NONE)
return -EINVAL;
- return vrend_renderer_resource_get_map_info(res->pipe_resource, map_info);
+ *map_info = res->map_info;
+ return 0;
}
int
@@ -802,5 +912,5 @@ int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd)
{
TRACE_FUNC();
- return vrend_renderer_export_fence(client_fence_id, fd);
+ return vrend_renderer_export_ctx0_fence(client_fence_id, fd);
}
diff --git a/src/virglrenderer.h b/src/virglrenderer.h
index e7592a80..d56b5dcc 100644
--- a/src/virglrenderer.h
+++ b/src/virglrenderer.h
@@ -45,7 +45,11 @@ struct virgl_renderer_gl_ctx_param {
int minor_ver;
};
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+#define VIRGL_RENDERER_CALLBACKS_VERSION 3
+#else
#define VIRGL_RENDERER_CALLBACKS_VERSION 2
+#endif
struct virgl_renderer_callbacks {
int version;
@@ -57,6 +61,10 @@ struct virgl_renderer_callbacks {
int (*make_current)(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx);
int (*get_drm_fd)(void *cookie); /* v2, used with flags & VIRGL_RENDERER_USE_EGL */
+
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+ void (*write_context_fence)(void *cookie, uint32_t ctx_id, uint64_t queue_id, void *fence_cookie);
+#endif
};
/* virtio-gpu compatible interface */
@@ -256,6 +264,13 @@ VIRGL_EXPORT int virgl_renderer_execute(void *execute_args, uint32_t execute_siz
*/
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+#define VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK 0xff
+
+VIRGL_EXPORT int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
+ uint32_t ctx_flags,
+ uint32_t nlen,
+ const char *name);
+
#define VIRGL_RENDERER_BLOB_MEM_GUEST 0x0001
#define VIRGL_RENDERER_BLOB_MEM_HOST3D 0x0002
#define VIRGL_RENDERER_BLOB_MEM_HOST3D_GUEST 0x0003
@@ -300,6 +315,14 @@ virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
VIRGL_EXPORT int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd);
+#define VIRGL_RENDERER_FENCE_FLAG_MERGEABLE (1 << 0)
+VIRGL_EXPORT int virgl_renderer_context_create_fence(uint32_t ctx_id,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie);
+VIRGL_EXPORT void virgl_renderer_context_poll(uint32_t ctx_id); /* force fences */
+VIRGL_EXPORT int virgl_renderer_context_get_poll_fd(uint32_t ctx_id);
+
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
#endif
diff --git a/src/virglrenderer_hw.h b/src/virglrenderer_hw.h
index 65f98cb2..c2105eb6 100644
--- a/src/virglrenderer_hw.h
+++ b/src/virglrenderer_hw.h
@@ -26,11 +26,9 @@
#include "virgl_hw.h"
-#ifdef VIRGL_RENDERER_UNSTABLE_APIS
enum virgl_renderer_capset {
VIRGL_RENDERER_CAPSET_VIRGL = 1,
VIRGL_RENDERER_CAPSET_VIRGL2 = 2,
};
-#endif
#endif /* VIRGLRENDERER_HW_H */
diff --git a/src/vrend_debug.c b/src/vrend_debug.c
index 93398304..e48bface 100644
--- a/src/vrend_debug.c
+++ b/src/vrend_debug.c
@@ -78,6 +78,7 @@ static const char *command_names[VIRGL_MAX_COMMANDS] = {
"TWEAK",
"CLEAR_TEXTURE"
"PIPE_RESOURCE_CREATE",
+ "PIPE_RESOURCE_SET_TYPE",
};
static const char *object_type_names[VIRGL_MAX_OBJECTS] = {
diff --git a/src/vrend_debug.h b/src/vrend_debug.h
index 91038318..f4efefac 100644
--- a/src/vrend_debug.h
+++ b/src/vrend_debug.h
@@ -97,7 +97,7 @@ virgl_debug_callback_type vrend_set_debug_callback(virgl_debug_callback_type cb)
} while (0)
#else
-#define VREND_DEBUG(flag, ctx, fmt, ...)
+#define VREND_DEBUG(flag, ctx, ...)
#define VREND_DEBUG_EXT(flag, ctx, X)
#define VREND_DEBUG_NOCTX(flag, ctx, ...)
#endif
diff --git a/src/vrend_decode.c b/src/vrend_decode.c
index f60e89ba..bf162bd6 100644
--- a/src/vrend_decode.c
+++ b/src/vrend_decode.c
@@ -283,19 +283,18 @@ static int vrend_decode_set_index_buffer(struct vrend_context *ctx, const uint32
static int vrend_decode_set_constant_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t shader;
- uint32_t index;
int nc = (length - 2);
if (length < 2)
return EINVAL;
shader = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE);
- index = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_INDEX);
+ /* VIRGL_SET_CONSTANT_BUFFER_INDEX is not used */
if (shader >= PIPE_SHADER_TYPES)
return EINVAL;
- vrend_set_constants(ctx, shader, index, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
+ vrend_set_constants(ctx, shader, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
return 0;
}
@@ -757,7 +756,7 @@ static int vrend_decode_create_object(struct vrend_context *ctx, const uint32_t
VREND_DEBUG(dbg_object, ctx," CREATE %-18s handle:0x%x len:%d\n",
vrend_get_object_type_name(obj_type), handle, length);
- TRACE_SCOPE("CREATE %-18s", vrend_get_object_type_name(obj_type));
+ TRACE_SCOPE(vrend_get_object_type_name(obj_type));
switch (obj_type){
case VIRGL_OBJECT_BLEND:
@@ -1432,9 +1431,44 @@ static int vrend_decode_pipe_resource_create(struct vrend_context *ctx, const ui
return vrend_renderer_pipe_resource_create(ctx, blob_id, &args);
}
+static int vrend_decode_pipe_resource_set_type(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
+{
+ struct vrend_renderer_resource_set_type_args args = { 0 };
+ uint32_t res_id;
+
+ if (length >= VIRGL_PIPE_RES_SET_TYPE_SIZE(0))
+ args.plane_count = (length - VIRGL_PIPE_RES_SET_TYPE_SIZE(0)) / 2;
+
+ if (length != VIRGL_PIPE_RES_SET_TYPE_SIZE(args.plane_count) ||
+ !args.plane_count || args.plane_count > VIRGL_GBM_MAX_PLANES)
+ return EINVAL;
+
+ res_id = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE);
+ args.format = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_FORMAT);
+ args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_BIND);
+ args.width = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_WIDTH);
+ args.height = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_HEIGHT);
+ args.usage = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_USAGE);
+ args.modifier = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO);
+ args.modifier |= (uint64_t)get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI) << 32;
+ for (uint32_t i = 0; i < args.plane_count; i++) {
+ args.plane_strides[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i));
+ args.plane_offsets[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i));
+ }
+
+ return vrend_renderer_pipe_resource_set_type(ctx, res_id, &args);
+}
+
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
uint32_t ctx_id);
+static void vrend_decode_ctx_fence_retire(void *fence_cookie,
+ void *retire_data)
+{
+ struct vrend_decode_ctx *dctx = retire_data;
+ dctx->base.fence_retire(&dctx->base, 0, fence_cookie);
+}
+
struct virgl_context *vrend_renderer_context_create(uint32_t handle,
uint32_t nlen,
const char *debug_name)
@@ -1453,6 +1487,10 @@ struct virgl_context *vrend_renderer_context_create(uint32_t handle,
return NULL;
}
+ vrend_renderer_set_fence_retire(dctx->grctx,
+ vrend_decode_ctx_fence_retire,
+ dctx);
+
return &dctx->base;
}
@@ -1470,12 +1508,7 @@ static void vrend_decode_ctx_attach_resource(struct virgl_context *ctx,
{
TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
- /* in the future, we should import to create the pipe resource */
- if (!res->pipe_resource)
- return;
-
- vrend_renderer_attach_res_ctx(dctx->grctx, res->res_id,
- res->pipe_resource);
+ vrend_renderer_attach_res_ctx(dctx->grctx, res);
}
static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
@@ -1483,7 +1516,7 @@ static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
{
TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
- vrend_renderer_detach_res_ctx(dctx->grctx, res->res_id);
+ vrend_renderer_detach_res_ctx(dctx->grctx, res);
}
static int vrend_decode_ctx_transfer_3d(struct virgl_context *ctx,
@@ -1508,8 +1541,24 @@ static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
blob->type = VIRGL_RESOURCE_FD_INVALID;
/* this transfers ownership and blob_id is no longer valid */
blob->u.pipe_resource = vrend_get_blob_pipe(dctx->grctx, blob_id);
+ if (!blob->u.pipe_resource)
+ return -EINVAL;
- return blob->u.pipe_resource ? 0 : EINVAL;
+ blob->map_info = vrend_renderer_resource_get_map_info(blob->u.pipe_resource);
+ return 0;
+}
+
+static int vrend_decode_get_memory_info(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
+{
+ TRACE_FUNC();
+ if (length != 1)
+ return EINVAL;
+
+ uint32_t res_handle = get_buf_entry(buf, 1);
+
+ vrend_renderer_get_meminfo(ctx, res_handle);
+
+ return 0;
}
typedef int (*vrend_decode_callback)(struct vrend_context *ctx, const uint32_t *buf, uint32_t length);
@@ -1522,7 +1571,7 @@ static int vrend_decode_dummy(struct vrend_context *ctx, const uint32_t *buf, ui
return 0;
}
-vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
+static const vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
[VIRGL_CCMD_NOP] = vrend_decode_dummy,
[VIRGL_CCMD_CREATE_OBJECT] = vrend_decode_create_object,
[VIRGL_CCMD_BIND_OBJECT] = vrend_decode_bind_object,
@@ -1571,7 +1620,9 @@ vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
[VIRGL_CCMD_COPY_TRANSFER3D] = vrend_decode_copy_transfer3d,
[VIRGL_CCMD_END_TRANSFERS] = vrend_decode_dummy,
[VIRGL_CCMD_SET_TWEAKS] = vrend_decode_set_tweaks,
- [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create
+ [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create,
+ [VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE] = vrend_decode_pipe_resource_set_type,
+ [VIRGL_CCMD_GET_MEMORY_INFO] = vrend_decode_get_memory_info,
};
static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
@@ -1615,19 +1666,41 @@ static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
VREND_DEBUG(dbg_cmd, gdctx->grctx, "%-4d %-20s len:%d\n",
cur_offset, vrend_get_comand_name(cmd), len);
- TRACE_SCOPE("%s", vrend_get_comand_name(cmd));
+ TRACE_SCOPE_SLOW(vrend_get_comand_name(cmd));
ret = decode_table[cmd](gdctx->grctx, buf, len);
if (ret) {
- if (ret == EINVAL) {
+ if (ret == EINVAL)
vrend_report_buffer_error(gdctx->grctx, *buf);
- return ret;
- }
+ return ret;
}
}
return 0;
}
+static int vrend_decode_ctx_get_fencing_fd(UNUSED struct virgl_context *ctx)
+{
+ return vrend_renderer_get_poll_fd();
+}
+
+static void vrend_decode_ctx_retire_fences(UNUSED struct virgl_context *ctx)
+{
+ vrend_renderer_check_fences();
+}
+
+static int vrend_decode_ctx_submit_fence(struct virgl_context *ctx,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie)
+{
+ struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
+
+ if (queue_id)
+ return -EINVAL;
+
+ return vrend_renderer_create_fence(dctx->grctx, flags, fence_cookie);
+}
+
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
uint32_t ctx_id)
{
@@ -1644,4 +1717,8 @@ static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
ctx->get_blob = vrend_decode_ctx_get_blob;
ctx->get_blob_done = NULL;
ctx->submit_cmd = vrend_decode_ctx_submit_cmd;
+
+ ctx->get_fencing_fd = vrend_decode_ctx_get_fencing_fd;
+ ctx->retire_fences = vrend_decode_ctx_retire_fences;
+ ctx->submit_fence = vrend_decode_ctx_submit_fence;
}
diff --git a/src/vrend_formats.c b/src/vrend_formats.c
index af35dc95..8b269440 100644
--- a/src/vrend_formats.c
+++ b/src/vrend_formats.c
@@ -246,6 +246,36 @@ static struct vrend_format_table etc2_formats[] = {
{VIRGL_FORMAT_ETC2_RG11_UNORM, GL_COMPRESSED_RG11_EAC, GL_RG, GL_UNSIGNED_BYTE, NO_SWIZZLE},
{VIRGL_FORMAT_ETC2_RG11_SNORM, GL_COMPRESSED_SIGNED_RG11_EAC, GL_RG, GL_BYTE, NO_SWIZZLE},
};
+static struct vrend_format_table astc_formats[] = {
+ {VIRGL_FORMAT_ASTC_4x4, GL_COMPRESSED_RGBA_ASTC_4x4, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x4, GL_COMPRESSED_RGBA_ASTC_5x4, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x5, GL_COMPRESSED_RGBA_ASTC_5x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x5, GL_COMPRESSED_RGBA_ASTC_6x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x6, GL_COMPRESSED_RGBA_ASTC_6x6, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x5, GL_COMPRESSED_RGBA_ASTC_8x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x6, GL_COMPRESSED_RGBA_ASTC_8x6, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x8, GL_COMPRESSED_RGBA_ASTC_8x8, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x5, GL_COMPRESSED_RGBA_ASTC_10x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x6, GL_COMPRESSED_RGBA_ASTC_10x6, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x8, GL_COMPRESSED_RGBA_ASTC_10x8, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x10, GL_COMPRESSED_RGBA_ASTC_10x10, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x10, GL_COMPRESSED_RGBA_ASTC_12x10, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x12, GL_COMPRESSED_RGBA_ASTC_12x12, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_4x4_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x4_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x6_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x6_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x8_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x6_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x8_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x10_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x10_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x12_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+};
static struct vrend_format_table rgtc_formats[] = {
{ VIRGL_FORMAT_RGTC1_UNORM, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, NO_SWIZZLE },
@@ -433,6 +463,10 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
continue;
+ case UTIL_FORMAT_LAYOUT_ASTC:
+ if(epoxy_has_gl_extension("GL_KHR_texture_compression_astc_ldr"))
+ vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
+ continue;
default:
;/* do logic below */
}
@@ -552,6 +586,7 @@ void vrend_build_format_list_common(void)
/* compressed */
add_formats(etc2_formats);
+ add_formats(astc_formats);
add_formats(rgtc_formats);
add_formats(dxtn_formats);
add_formats(dxtn_srgb_formats);
@@ -712,6 +747,7 @@ unsigned vrend_renderer_query_multisample_caps(unsigned max_samples, struct virg
static int format_uncompressed_compressed_copy_compatible(enum virgl_formats src,
enum virgl_formats dst)
{
+
switch (src) {
case VIRGL_FORMAT_R32G32B32A32_UINT:
case VIRGL_FORMAT_R32G32B32A32_SINT:
@@ -734,6 +770,34 @@ static int format_uncompressed_compressed_copy_compatible(enum virgl_formats src
case VIRGL_FORMAT_ETC2_RG11_UNORM:
case VIRGL_FORMAT_ETC2_RG11_SNORM:
return 1;
+ case VIRGL_FORMAT_ASTC_4x4:
+ case VIRGL_FORMAT_ASTC_5x4:
+ case VIRGL_FORMAT_ASTC_5x5:
+ case VIRGL_FORMAT_ASTC_6x5:
+ case VIRGL_FORMAT_ASTC_6x6:
+ case VIRGL_FORMAT_ASTC_8x5:
+ case VIRGL_FORMAT_ASTC_8x6:
+ case VIRGL_FORMAT_ASTC_8x8:
+ case VIRGL_FORMAT_ASTC_10x5:
+ case VIRGL_FORMAT_ASTC_10x6:
+ case VIRGL_FORMAT_ASTC_10x8:
+ case VIRGL_FORMAT_ASTC_10x10:
+ case VIRGL_FORMAT_ASTC_12x10:
+ case VIRGL_FORMAT_ASTC_12x12:
+ case VIRGL_FORMAT_ASTC_4x4_SRGB:
+ case VIRGL_FORMAT_ASTC_5x5_SRGB:
+ case VIRGL_FORMAT_ASTC_6x5_SRGB:
+ case VIRGL_FORMAT_ASTC_6x6_SRGB:
+ case VIRGL_FORMAT_ASTC_8x5_SRGB:
+ case VIRGL_FORMAT_ASTC_8x6_SRGB:
+ case VIRGL_FORMAT_ASTC_8x8_SRGB:
+ case VIRGL_FORMAT_ASTC_10x5_SRGB:
+ case VIRGL_FORMAT_ASTC_10x6_SRGB:
+ case VIRGL_FORMAT_ASTC_10x8_SRGB:
+ case VIRGL_FORMAT_ASTC_10x10_SRGB:
+ case VIRGL_FORMAT_ASTC_12x10_SRGB:
+ case VIRGL_FORMAT_ASTC_12x12_SRGB:
+ return epoxy_is_desktop_gl() ? -1 : 1;
default:
return -1;
}
@@ -771,6 +835,26 @@ static int format_uncompressed_compressed_copy_compatible(enum virgl_formats src
static boolean format_compressed_compressed_copy_compatible(enum virgl_formats src, enum virgl_formats dst)
{
+ const bool is_desktop_gl = epoxy_is_desktop_gl();
+
+ if(!is_desktop_gl) {
+ if((src == VIRGL_FORMAT_ASTC_4x4 && dst == VIRGL_FORMAT_ASTC_4x4_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_5x4 && dst == VIRGL_FORMAT_ASTC_5x4_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_5x5 && dst == VIRGL_FORMAT_ASTC_5x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_6x5 && dst == VIRGL_FORMAT_ASTC_6x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_6x6 && dst == VIRGL_FORMAT_ASTC_6x6_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_8x5 && dst == VIRGL_FORMAT_ASTC_8x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_8x6 && dst == VIRGL_FORMAT_ASTC_8x6_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_8x8 && dst == VIRGL_FORMAT_ASTC_8x8_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x5 && dst == VIRGL_FORMAT_ASTC_10x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x6 && dst == VIRGL_FORMAT_ASTC_10x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x8 && dst == VIRGL_FORMAT_ASTC_10x8_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x10 && dst == VIRGL_FORMAT_ASTC_10x10_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_12x10 && dst == VIRGL_FORMAT_ASTC_12x10_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_12x12 && dst == VIRGL_FORMAT_ASTC_12x12_SRGB))
+ return true;
+ }
+
if ((src == VIRGL_FORMAT_RGTC1_UNORM && dst == VIRGL_FORMAT_RGTC1_SNORM) ||
(src == VIRGL_FORMAT_RGTC2_UNORM && dst == VIRGL_FORMAT_RGTC2_SNORM) ||
(src == VIRGL_FORMAT_BPTC_RGBA_UNORM && dst == VIRGL_FORMAT_BPTC_SRGBA) ||
diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
index 93b8ca2a..15474d23 100644
--- a/src/vrend_renderer.c
+++ b/src/vrend_renderer.c
@@ -55,9 +55,14 @@
#include "virgl_resource.h"
#include "virglrenderer.h"
#include "virglrenderer_hw.h"
+#include "virgl_protocol.h"
#include "tgsi/tgsi_text.h"
+#ifdef HAVE_EPOXY_GLX_H
+#include <epoxy/glx.h>
+#endif
+
/*
* VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both
* virgl_caps_v1 and are exactly the same.
@@ -76,8 +81,14 @@ static const uint32_t fake_occlusion_query_samples_passed_default = 1024;
const struct vrend_if_cbs *vrend_clicbs;
struct vrend_fence {
- uint32_t fence_id;
- uint32_t ctx_id;
+ /* When the sync thread is waiting on the fence and the main thread
+ * destroys the context, ctx is set to NULL. Otherwise, ctx is always
+ * valid.
+ */
+ struct vrend_context *ctx;
+ uint32_t flags;
+ void *fence_cookie;
+
union {
GLsync glsyncobj;
#ifdef HAVE_EPOXY_EGL_H
@@ -110,6 +121,7 @@ enum features_id
feat_arb_robustness,
feat_arb_buffer_storage,
feat_arrays_of_arrays,
+ feat_ati_meminfo,
feat_atomic_counters,
feat_base_instance,
feat_barrier,
@@ -154,6 +166,7 @@ enum features_id
feat_multi_draw_indirect,
feat_nv_conditional_render,
feat_nv_prim_restart,
+ feat_nvx_gpu_memory_info,
feat_polygon_offset_clamp,
feat_occlusion_query,
feat_occlusion_query_boolean,
@@ -162,6 +175,7 @@ enum features_id
feat_sample_mask,
feat_sample_shading,
feat_samplers,
+ feat_sampler_border_colors,
feat_shader_clock,
feat_separate_shader_objects,
feat_ssbo,
@@ -206,6 +220,7 @@ static const struct {
FEAT(arb_robustness, UNAVAIL, UNAVAIL, "GL_ARB_robustness" ),
FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage", "GL_EXT_buffer_storage"),
FEAT(arrays_of_arrays, 43, 31, "GL_ARB_arrays_of_arrays"),
+ FEAT(ati_meminfo, UNAVAIL, UNAVAIL, "GL_ATI_meminfo" ),
FEAT(atomic_counters, 42, 31, "GL_ARB_shader_atomic_counters" ),
FEAT(base_instance, 42, UNAVAIL, "GL_ARB_base_instance", "GL_EXT_base_instance" ),
FEAT(barrier, 42, 31, NULL),
@@ -250,6 +265,7 @@ static const struct {
FEAT(multi_draw_indirect, 43, UNAVAIL, "GL_ARB_multi_draw_indirect", "GL_EXT_multi_draw_indirect" ),
FEAT(nv_conditional_render, UNAVAIL, UNAVAIL, "GL_NV_conditional_render" ),
FEAT(nv_prim_restart, UNAVAIL, UNAVAIL, "GL_NV_primitive_restart" ),
+ FEAT(nvx_gpu_memory_info, UNAVAIL, UNAVAIL, "GL_NVX_gpu_memory_info" ),
FEAT(polygon_offset_clamp, 46, UNAVAIL, "GL_ARB_polygon_offset_clamp", "GL_EXT_polygon_offset_clamp"),
FEAT(occlusion_query, 15, UNAVAIL, "GL_ARB_occlusion_query"),
FEAT(occlusion_query_boolean, 33, 30, "GL_EXT_occlusion_query_boolean", "GL_ARB_occlusion_query2"),
@@ -258,6 +274,7 @@ static const struct {
FEAT(sample_mask, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(sample_shading, 40, 32, "GL_ARB_sample_shading", "GL_OES_sample_shading" ),
FEAT(samplers, 33, 30, "GL_ARB_sampler_objects" ),
+ FEAT(sampler_border_colors, 33, 32, "GL_ARB_sampler_objects", "GL_EXT_texture_border_clamp", "GL_OES_texture_border_clamp" ),
FEAT(separate_shader_objects, 41, 31, "GL_ARB_seperate_shader_objects"),
FEAT(shader_clock, UNAVAIL, UNAVAIL, "GL_ARB_shader_clock" ),
FEAT(ssbo, 43, 31, "GL_ARB_shader_storage_buffer_object" ),
@@ -270,7 +287,7 @@ static const struct {
FEAT(texture_barrier, 45, UNAVAIL, "GL_ARB_texture_barrier" ),
FEAT(texture_buffer_range, 43, 32, "GL_ARB_texture_buffer_range" ),
FEAT(texture_gather, 40, 31, "GL_ARB_texture_gather" ),
- FEAT(texture_multisample, 32, 30, "GL_ARB_texture_multisample" ),
+ FEAT(texture_multisample, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(texture_query_lod, 40, UNAVAIL, "GL_ARB_texture_query_lod", "GL_EXT_texture_query_lod"),
FEAT(texture_srgb_decode, UNAVAIL, UNAVAIL, "GL_EXT_texture_sRGB_decode" ),
FEAT(texture_storage, 42, 30, "GL_ARB_texture_storage" ),
@@ -316,8 +333,12 @@ struct global_renderer_state {
int eventfd;
pipe_mutex fence_mutex;
+ /* a fence is always on either of the lists, or is pointed to by
+ * fence_waiting
+ */
struct list_head fence_list;
struct list_head fence_wait_list;
+ struct vrend_fence *fence_waiting;
pipe_condvar fence_cond;
struct vrend_context *ctx0;
@@ -355,6 +376,7 @@ struct vrend_linked_shader_program {
bool dual_src_linked;
struct vrend_shader *ss[PIPE_SHADER_TYPES];
+ uint64_t vs_fs_key;
uint32_t ubo_used_mask[PIPE_SHADER_TYPES];
uint32_t samplers_used_mask[PIPE_SHADER_TYPES];
@@ -415,10 +437,7 @@ struct vrend_shader_selector {
struct vrend_texture {
struct vrend_resource base;
struct pipe_sampler_state state;
- GLenum cur_swizzle_r;
- GLenum cur_swizzle_g;
- GLenum cur_swizzle_b;
- GLenum cur_swizzle_a;
+ GLint cur_swizzle[4];
GLuint cur_srgb_decode;
GLuint cur_base, cur_max;
};
@@ -452,10 +471,7 @@ struct vrend_sampler_view {
enum virgl_formats format;
GLenum target;
GLuint val0, val1;
- GLuint gl_swizzle_r;
- GLuint gl_swizzle_g;
- GLuint gl_swizzle_b;
- GLuint gl_swizzle_a;
+ GLint gl_swizzle[4];
GLenum depth_texture_mode;
GLuint srgb_decode;
struct vrend_resource *texture;
@@ -540,6 +556,14 @@ struct vrend_streamout_object {
#define XFB_STATE_STARTED 2
#define XFB_STATE_PAUSED 3
+struct vrend_vertex_buffer {
+ struct pipe_vertex_buffer base;
+ uint32_t res_id;
+};
+
+#define VREND_PROGRAM_NQUEUES (1 << 8)
+#define VREND_PROGRAM_NQUEUE_MASK (VREND_PROGRAM_NQUEUES - 1)
+
struct vrend_sub_context {
struct list_head head;
@@ -550,14 +574,18 @@ struct vrend_sub_context {
GLuint vaoid;
uint32_t enabled_attribs_bitmask;
- struct list_head programs;
+ /* Using an array of lists only adds VREND_PROGRAM_NQUEUES - 1 list_head
+ * structures to the consumed memory, but looking up the program can
+ * be spead up by the factor VREND_PROGRAM_NQUEUES which makes this
+ * worthwile. */
+ struct list_head gl_programs[VREND_PROGRAM_NQUEUES];
+ struct list_head cs_programs;
struct util_hash_table *object_hash;
struct vrend_vertex_element_array *ve;
int num_vbos;
int old_num_vbos; /* for cleaning up */
- struct pipe_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
- uint32_t vbo_res_ids[PIPE_MAX_ATTRIBS];
+ struct vrend_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer ib;
uint32_t index_buffer_res_id;
@@ -573,7 +601,7 @@ struct vrend_sub_context {
struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES];
struct vrend_linked_shader_program *prog;
- int prog_ids[PIPE_SHADER_TYPES];
+ GLuint prog_ids[PIPE_SHADER_TYPES];
struct vrend_shader_view views[PIPE_SHADER_TYPES];
struct vrend_constants consts[PIPE_SHADER_TYPES];
@@ -655,6 +683,13 @@ struct vrend_sub_context {
int fake_occlusion_query_samples_passed_multiplier;
int prim_mode;
+ bool drawing;
+ struct vrend_context *parent;
+};
+
+struct vrend_untyped_resource {
+ struct virgl_resource *resource;
+ struct list_head head;
};
struct vrend_context {
@@ -671,7 +706,6 @@ struct vrend_context {
bool in_error;
bool ctx_switch_pending;
bool pstip_inited;
- bool drawing;
GLuint pstipple_tex_id;
@@ -680,31 +714,55 @@ struct vrend_context {
/* resource bounds to this context */
struct util_hash_table *res_hash;
+ /*
+ * vrend_context only works with typed virgl_resources. More specifically,
+ * it works with vrend_resources that are inherited from pipe_resources
+ * wrapped in virgl_resources.
+ *
+ * Normally, a vrend_resource is created first by
+ * vrend_renderer_resource_create. It is then wrapped in a virgl_resource
+ * by virgl_resource_create_from_pipe. Depending on whether it is a blob
+ * resource or not, the two functions can be called from different paths.
+ * But we always get both a virgl_resource and a vrend_resource as a
+ * result.
+ *
+ * It is however possible that we encounter untyped virgl_resources that
+ * have no pipe_resources. To work with untyped virgl_resources, we park
+ * them in untyped_resources first when they are attached. We move them
+ * into res_hash only after we get the type information and create the
+ * vrend_resources in vrend_decode_pipe_resource_set_type.
+ */
+ struct list_head untyped_resources;
+ struct virgl_resource *untyped_resource_cache;
+
struct list_head active_nontimer_query_list;
struct vrend_shader_cfg shader_cfg;
unsigned debug_flags;
+
+ vrend_context_fence_retire fence_retire;
+ void *fence_retire_data;
};
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle);
static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause);
-static void vrend_update_viewport_state(struct vrend_context *ctx);
-static void vrend_update_scissor_state(struct vrend_context *ctx);
+static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx);
+static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx);
static void vrend_destroy_query_object(void *obj_ptr);
static void vrend_finish_context_switch(struct vrend_context *ctx);
-static void vrend_patch_blend_state(struct vrend_context *ctx);
-static void vrend_update_frontface_state(struct vrend_context *ctx);
+static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx);
+static void vrend_update_frontface_state(struct vrend_sub_context *ctx);
static void vrender_get_glsl_version(int *glsl_version);
static void vrend_destroy_program(struct vrend_linked_shader_program *ent);
-static void vrend_apply_sampler_state(struct vrend_context *ctx,
+static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id, int sampler_id,
struct vrend_sampler_view *tview);
static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples);
-void vrend_update_stencil_state(struct vrend_context *ctx);
+void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx);
static struct vrend_format_table tex_conv_table[VIRGL_FORMAT_MAX_EXTENDED];
@@ -1053,7 +1111,7 @@ static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
free(sel);
}
-static bool vrend_compile_shader(struct vrend_context *ctx,
+static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx,
struct vrend_shader *shader)
{
GLint param;
@@ -1068,7 +1126,7 @@ static bool vrend_compile_shader(struct vrend_context *ctx,
char infolog[65536];
int len;
glGetShaderInfoLog(shader->id, 65536, &len, infolog);
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_printf("shader failed to compile\n%s\n", infolog);
vrend_shader_dump(shader);
return false;
@@ -1151,11 +1209,11 @@ static bool vrend_is_timer_query(GLenum gltype)
gltype == GL_TIME_ELAPSED;
}
-static void vrend_use_program(struct vrend_context *ctx, GLuint program_id)
+static void vrend_use_program(struct vrend_sub_context *sub_ctx, GLuint program_id)
{
- if (ctx->sub->program_id != program_id) {
+ if (sub_ctx->program_id != program_id) {
glUseProgram(program_id);
- ctx->sub->program_id = program_id;
+ sub_ctx->program_id = program_id;
}
}
@@ -1198,10 +1256,10 @@ static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_e
}
}
-static void vrend_stencil_test_enable(struct vrend_context *ctx, bool stencil_test_enable)
+static void vrend_stencil_test_enable(struct vrend_sub_context *sub_ctx, bool stencil_test_enable)
{
- if (ctx->sub->stencil_test_enabled != stencil_test_enable) {
- ctx->sub->stencil_test_enabled = stencil_test_enable;
+ if (sub_ctx->stencil_test_enabled != stencil_test_enable) {
+ sub_ctx->stencil_test_enabled = stencil_test_enable;
if (stencil_test_enable)
glEnable(GL_STENCIL_TEST);
else
@@ -1257,7 +1315,7 @@ static char *get_skip_str(int *skip_val)
return start_skip;
}
-static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_context *ctx,
+static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_sub_context *sub_ctx,
int prog_id,
struct vrend_shader_info *sinfo)
{
@@ -1272,7 +1330,7 @@ static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_context *ctx,
if (!so->num_outputs)
return;
- VREND_DEBUG_EXT(dbg_shader_streamout, ctx, dump_stream_out(so));
+ VREND_DEBUG_EXT(dbg_shader_streamout, sub_ctx->parent, dump_stream_out(so));
for (i = 0; i < so->num_outputs; i++) {
if (last_buffer != so->output[i].output_buffer) {
@@ -1496,9 +1554,9 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs);
sprog->id = prog_id;
- list_addtail(&sprog->head, &ctx->sub->programs);
+ list_addtail(&sprog->head, &ctx->sub->cs_programs);
- vrend_use_program(ctx, prog_id);
+ vrend_use_program(ctx->sub, prog_id);
bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE, 0);
bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE, 0);
@@ -1508,7 +1566,7 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
return sprog;
}
-static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
+static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_context *sub_ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs,
@@ -1535,20 +1593,20 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
if (gs) {
if (gs->id > 0)
glAttachShader(prog_id, gs->id);
- set_stream_out_varyings(ctx, prog_id, &gs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &gs->sel->sinfo);
} else if (tes)
- set_stream_out_varyings(ctx, prog_id, &tes->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &tes->sel->sinfo);
else
- set_stream_out_varyings(ctx, prog_id, &vs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &vs->sel->sinfo);
glAttachShader(prog_id, fs->id);
if (fs->sel->sinfo.num_outputs > 1) {
- if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
+ if (util_blend_state_is_dual(&sub_ctx->blend_state, 0)) {
if (has_feature(feat_dual_src_blend)) {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
} else {
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
}
sprog->dual_src_linked = true;
} else {
@@ -1579,7 +1637,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
glGetProgramInfoLog(prog_id, 65536, &len, infolog);
vrend_printf("got error linking\n%s\n", infolog);
/* dump shaders */
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_shader_dump(vs);
if (gs)
vrend_shader_dump(gs);
@@ -1591,6 +1649,9 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
+ sprog->vs_fs_key = (((uint64_t)fs->id) << 32) | (vs->id & ~VREND_PROGRAM_NQUEUE_MASK) |
+ (sprog->dual_src_linked ? 1 : 0);
+
sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
sprog->ss[PIPE_SHADER_TESS_CTRL] = tcs;
sprog->ss[PIPE_SHADER_TESS_EVAL] = tes;
@@ -1607,7 +1668,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
sprog->id = prog_id;
- list_addtail(&sprog->head, &ctx->sub->programs);
+ list_addtail(&sprog->head, &sub_ctx->gl_programs[vs->id & VREND_PROGRAM_NQUEUE_MASK]);
if (fs->key.pstipple_tex)
sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
@@ -1619,7 +1680,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
sprog->fs_alpha_ref_val_loc = -1;
sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust_y");
- vrend_use_program(ctx, prog_id);
+ vrend_use_program(sub_ctx, prog_id);
int next_ubo_id = 0, next_sampler_id = 0;
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
@@ -1659,16 +1720,17 @@ static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend
GLuint cs_id)
{
struct vrend_linked_shader_program *ent;
- LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
- if (!ent->ss[PIPE_SHADER_COMPUTE])
- continue;
- if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id)
+ LIST_FOR_EACH_ENTRY(ent, &ctx->sub->cs_programs, head) {
+ if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id) {
+ list_del(&ent->head);
+ list_add(&ent->head, &ctx->sub->cs_programs);
return ent;
+ }
}
return NULL;
}
-static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_context *ctx,
+static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_sub_context *sub_ctx,
GLuint vs_id,
GLuint fs_id,
GLuint gs_id,
@@ -1676,16 +1738,15 @@ static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_co
GLuint tes_id,
bool dual_src)
{
+ uint64_t vs_fs_key = (((uint64_t)fs_id) << 32) | (vs_id & ~VREND_PROGRAM_NQUEUE_MASK) |
+ (dual_src ? 1 : 0);
+
struct vrend_linked_shader_program *ent;
- LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
- if (ent->dual_src_linked != dual_src)
- continue;
- if (ent->ss[PIPE_SHADER_COMPUTE])
+
+ struct list_head *programs = &sub_ctx->gl_programs[vs_id & VREND_PROGRAM_NQUEUE_MASK];
+ LIST_FOR_EACH_ENTRY(ent, programs, head) {
+ if (likely(ent->vs_fs_key != vs_fs_key))
continue;
- if (ent->ss[PIPE_SHADER_VERTEX]->id != vs_id)
- continue;
- if (ent->ss[PIPE_SHADER_FRAGMENT]->id != fs_id)
- continue;
if (ent->ss[PIPE_SHADER_GEOMETRY] &&
ent->ss[PIPE_SHADER_GEOMETRY]->id != gs_id)
continue;
@@ -1695,8 +1756,14 @@ static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_co
if (ent->ss[PIPE_SHADER_TESS_EVAL] &&
ent->ss[PIPE_SHADER_TESS_EVAL]->id != tes_id)
continue;
+ /* put the entry in front */
+ if (programs->next != &ent->head) {
+ list_del(&ent->head);
+ list_add(&ent->head, programs);
+ }
return ent;
}
+
return NULL;
}
@@ -1725,11 +1792,16 @@ static void vrend_free_programs(struct vrend_sub_context *sub)
{
struct vrend_linked_shader_program *ent, *tmp;
- if (LIST_IS_EMPTY(&sub->programs))
- return;
+ if (!LIST_IS_EMPTY(&sub->cs_programs)) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->cs_programs, head)
+ vrend_destroy_program(ent);
+ }
- LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
- vrend_destroy_program(ent);
+ for (unsigned i = 0; i < VREND_PROGRAM_NQUEUES; ++i) {
+ if (!LIST_IS_EMPTY(&sub->gl_programs[i])) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->gl_programs[i], head)
+ vrend_destroy_program(ent);
+ }
}
}
@@ -1944,6 +2016,16 @@ static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_fi
return 0;
}
+static void apply_sampler_border_color(GLuint sampler,
+ const GLuint colors[static 4])
+{
+ if (has_feature(feat_sampler_border_colors)) {
+ glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, colors);
+ } else if (colors[0] || colors[1] || colors[2] || colors[3]) {
+ vrend_printf("sampler border color setting requested but not supported\n");
+ }
+}
+
int vrend_create_sampler_state(struct vrend_context *ctx,
uint32_t handle,
struct pipe_sampler_state *templ)
@@ -1984,7 +2066,7 @@ int vrend_create_sampler_state(struct vrend_context *ctx,
}
- glSamplerParameterIuiv(state->ids[i], GL_TEXTURE_BORDER_COLOR, templ->border_color.ui);
+ apply_sampler_border_color(state->ids[i], templ->border_color.ui);
glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT, i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT);
}
}
@@ -2105,10 +2187,8 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]];
}
- view->gl_swizzle_r = to_gl_swizzle(swizzle[0]);
- view->gl_swizzle_g = to_gl_swizzle(swizzle[1]);
- view->gl_swizzle_b = to_gl_swizzle(swizzle[2]);
- view->gl_swizzle_a = to_gl_swizzle(swizzle[3]);
+ for (unsigned i = 0; i < 4; ++i)
+ view->gl_swizzle[i] = to_gl_swizzle(swizzle[i]);
if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
enum virgl_formats format;
@@ -2172,10 +2252,12 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
glTexParameteri(view->target, GL_TEXTURE_BASE_LEVEL, base_level);
glTexParameteri(view->target, GL_TEXTURE_MAX_LEVEL, max_level);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]);
+ }
+ } else
+ glTexParameteriv(view->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle);
if (util_format_is_srgb(view->format) &&
has_feature(feat_texture_srgb_decode)) {
glTexParameteri(view->target, GL_TEXTURE_SRGB_DECODE_EXT,
@@ -2330,9 +2412,9 @@ static void vrend_hw_set_zsurf_texture(struct vrend_context *ctx)
}
}
-static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
+static void vrend_hw_set_color_surface(struct vrend_sub_context *sub_ctx, int index)
{
- struct vrend_surface *surf = ctx->sub->surf[index];
+ struct vrend_surface *surf = sub_ctx->surf[index];
if (!surf) {
GLenum attachment = GL_COLOR_ATTACHMENT0 + index;
@@ -2340,15 +2422,15 @@ static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
glFramebufferTexture2D(GL_FRAMEBUFFER, attachment,
GL_TEXTURE_2D, 0, 0);
} else {
- uint32_t first_layer = ctx->sub->surf[index]->val1 & 0xffff;
- uint32_t last_layer = (ctx->sub->surf[index]->val1 >> 16) & 0xffff;
+ uint32_t first_layer = sub_ctx->surf[index]->val1 & 0xffff;
+ uint32_t last_layer = (sub_ctx->surf[index]->val1 >> 16) & 0xffff;
vrend_fb_bind_texture_id(surf->texture, surf->id, index, surf->val0,
first_layer != last_layer ? 0xffffffff : first_layer);
}
}
-static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
+static void vrend_hw_emit_framebuffer_state(struct vrend_sub_context *sub_ctx)
{
static const GLenum buffers[8] = {
GL_COLOR_ATTACHMENT0,
@@ -2361,19 +2443,19 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
GL_COLOR_ATTACHMENT7,
};
- if (ctx->sub->nr_cbufs == 0) {
+ if (sub_ctx->nr_cbufs == 0) {
glReadBuffer(GL_NONE);
if (has_feature(feat_srgb_write_control)) {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
- ctx->sub->framebuffer_srgb_enabled = false;
+ sub_ctx->framebuffer_srgb_enabled = false;
}
} else if (has_feature(feat_srgb_write_control)) {
struct vrend_surface *surf = NULL;
bool use_srgb = false;
int i;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- surf = ctx->sub->surf[i];
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ surf = sub_ctx->surf[i];
if (util_format_is_srgb(surf->format)) {
use_srgb = true;
}
@@ -2384,25 +2466,25 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
} else {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
}
- ctx->sub->framebuffer_srgb_enabled = use_srgb;
+ sub_ctx->framebuffer_srgb_enabled = use_srgb;
}
if (vrend_state.use_gles &&
- vrend_get_tweak_is_active(&ctx->sub->tweaks, virgl_tweak_gles_brga_apply_dest_swizzle)) {
- ctx->sub->swizzle_output_rgb_to_bgr = 0;
- for (int i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- struct vrend_surface *surf = ctx->sub->surf[i];
+ vrend_get_tweak_is_active(&sub_ctx->tweaks, virgl_tweak_gles_brga_apply_dest_swizzle)) {
+ sub_ctx->swizzle_output_rgb_to_bgr = 0;
+ for (int i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ struct vrend_surface *surf = sub_ctx->surf[i];
if (surf->texture->base.bind & VIRGL_BIND_PREFER_EMULATED_BGRA) {
- VREND_DEBUG(dbg_tweak, ctx, "Swizzled BGRA output for 0x%x (%s)\n", i, util_format_name(surf->format));
- ctx->sub->swizzle_output_rgb_to_bgr |= 1 << i;
+ VREND_DEBUG(dbg_tweak, sub_ctx->parent, "Swizzled BGRA output for 0x%x (%s)\n", i, util_format_name(surf->format));
+ sub_ctx->swizzle_output_rgb_to_bgr |= 1 << i;
}
}
}
}
- glDrawBuffers(ctx->sub->nr_cbufs, buffers);
+ glDrawBuffers(sub_ctx->nr_cbufs, buffers);
}
void vrend_set_framebuffer_state(struct vrend_context *ctx,
@@ -2416,10 +2498,12 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
GLint new_height = -1;
bool new_ibf = false;
- glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->fb_id);
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
+ glBindFramebuffer(GL_FRAMEBUFFER, sub_ctx->fb_id);
if (zsurf_handle) {
- zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
+ zsurf = vrend_object_lookup(sub_ctx->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
@@ -2427,18 +2511,18 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
} else
zsurf = NULL;
- if (ctx->sub->zsurf != zsurf) {
- vrend_surface_reference(&ctx->sub->zsurf, zsurf);
+ if (sub_ctx->zsurf != zsurf) {
+ vrend_surface_reference(&sub_ctx->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
- old_num = ctx->sub->nr_cbufs;
- ctx->sub->nr_cbufs = nr_cbufs;
- ctx->sub->old_nr_cbufs = old_num;
+ old_num = sub_ctx->nr_cbufs;
+ sub_ctx->nr_cbufs = nr_cbufs;
+ sub_ctx->old_nr_cbufs = old_num;
for (i = 0; i < (int)nr_cbufs; i++) {
if (surf_handle[i] != 0) {
- surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
+ surf = vrend_object_lookup(sub_ctx->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
@@ -2446,32 +2530,32 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
} else
surf = NULL;
- if (ctx->sub->surf[i] != surf) {
- vrend_surface_reference(&ctx->sub->surf[i], surf);
- vrend_hw_set_color_surface(ctx, i);
+ if (sub_ctx->surf[i] != surf) {
+ vrend_surface_reference(&sub_ctx->surf[i], surf);
+ vrend_hw_set_color_surface(sub_ctx, i);
}
}
- if (old_num > ctx->sub->nr_cbufs) {
- for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
- vrend_surface_reference(&ctx->sub->surf[i], NULL);
- vrend_hw_set_color_surface(ctx, i);
+ if (old_num > sub_ctx->nr_cbufs) {
+ for (i = sub_ctx->nr_cbufs; i < old_num; i++) {
+ vrend_surface_reference(&sub_ctx->surf[i], NULL);
+ vrend_hw_set_color_surface(sub_ctx, i);
}
}
/* find a buffer to set fb_height from */
- if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
+ if (sub_ctx->nr_cbufs == 0 && !sub_ctx->zsurf) {
new_height = 0;
new_ibf = false;
- } else if (ctx->sub->nr_cbufs == 0) {
- new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
- new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
+ } else if (sub_ctx->nr_cbufs == 0) {
+ new_height = u_minify(sub_ctx->zsurf->texture->base.height0, sub_ctx->zsurf->val0);
+ new_ibf = sub_ctx->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- surf = ctx->sub->surf[i];
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ surf = sub_ctx->surf[i];
break;
}
}
@@ -2484,23 +2568,23 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
}
if (new_height != -1) {
- if (ctx->sub->fb_height != (uint32_t)new_height || ctx->sub->inverted_fbo_content != new_ibf) {
- ctx->sub->fb_height = new_height;
- ctx->sub->inverted_fbo_content = new_ibf;
- ctx->sub->viewport_state_dirty = (1 << 0);
+ if (sub_ctx->fb_height != (uint32_t)new_height || sub_ctx->inverted_fbo_content != new_ibf) {
+ sub_ctx->fb_height = new_height;
+ sub_ctx->inverted_fbo_content = new_ibf;
+ sub_ctx->viewport_state_dirty = (1 << 0);
}
}
- vrend_hw_emit_framebuffer_state(ctx);
+ vrend_hw_emit_framebuffer_state(sub_ctx);
- if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
+ if (sub_ctx->nr_cbufs > 0 || sub_ctx->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
vrend_printf("failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
- ctx->sub->shader_dirty = true;
- ctx->sub->blend_state_dirty = true;
+ sub_ctx->shader_dirty = true;
+ sub_ctx->blend_state_dirty = true;
}
void vrend_set_framebuffer_state_no_attach(UNUSED struct vrend_context *ctx,
@@ -2622,41 +2706,48 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
}
type = GL_FALSE;
- if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
- if (desc->channel[0].size == 32)
- type = GL_FLOAT;
- else if (desc->channel[0].size == 64)
- type = GL_DOUBLE;
- else if (desc->channel[0].size == 16)
- type = GL_HALF_FLOAT;
- } else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 8)
- type = GL_UNSIGNED_BYTE;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 8)
- type = GL_BYTE;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 16)
- type = GL_UNSIGNED_SHORT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 16)
- type = GL_SHORT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 32)
- type = GL_UNSIGNED_INT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 32)
- type = GL_INT;
- else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
- elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
- elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
- type = GL_INT_2_10_10_10_REV;
- else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
- elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
- elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
- type = GL_UNSIGNED_INT_2_10_10_10_REV;
- else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
- type = GL_UNSIGNED_INT_10F_11F_11F_REV;
+ switch (desc->channel[0].type) {
+ case UTIL_FORMAT_TYPE_FLOAT:
+ switch (desc->channel[0].size) {
+ case 16: type = GL_HALF_FLOAT; break;
+ case 32: type = GL_FLOAT; break;
+ case 64: type = GL_DOUBLE; break;
+ }
+ break;
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ switch (desc->channel[0].size) {
+ case 8: type = GL_UNSIGNED_BYTE; break;
+ case 16: type = GL_UNSIGNED_SHORT; break;
+ case 32: type = GL_UNSIGNED_INT; break;
+ }
+ break;
+ case UTIL_FORMAT_TYPE_SIGNED:
+ switch (desc->channel[0].size) {
+ case 8: type = GL_BYTE; break;
+ case 16: type = GL_SHORT; break;
+ case 32: type = GL_INT; break;
+ }
+ break;
+ }
+ if (type == GL_FALSE) {
+ switch (elements[i].src_format) {
+ case PIPE_FORMAT_R10G10B10A2_SSCALED:
+ case PIPE_FORMAT_R10G10B10A2_SNORM:
+ case PIPE_FORMAT_B10G10R10A2_SNORM:
+ type = GL_INT_2_10_10_10_REV;
+ break;
+ case PIPE_FORMAT_R10G10B10A2_USCALED:
+ case PIPE_FORMAT_R10G10B10A2_UNORM:
+ case PIPE_FORMAT_B10G10R10A2_UNORM:
+ type = GL_UNSIGNED_INT_2_10_10_10_REV;
+ break;
+ case PIPE_FORMAT_R11G11B10_FLOAT:
+ type = GL_UNSIGNED_INT_10F_11F_11F_REV;
+ break;
+ default:
+ ;
+ }
+ }
if (type == GL_FALSE) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
@@ -2725,7 +2816,6 @@ void vrend_bind_vertex_elements_state(struct vrend_context *ctx,
void vrend_set_constants(struct vrend_context *ctx,
uint32_t shader,
- UNUSED uint32_t index,
uint32_t num_constant,
const float *data)
{
@@ -2759,6 +2849,9 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
if (!has_feature(feat_ubo))
return;
+ struct pipe_constant_buffer *cbs = &ctx->sub->cbs[shader][index];
+ const uint32_t mask = 1u << index;
+
if (res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
@@ -2766,18 +2859,17 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
- ctx->sub->cbs[shader][index].buffer = (struct pipe_resource *)res;
- ctx->sub->cbs[shader][index].buffer_offset = offset;
- ctx->sub->cbs[shader][index].buffer_size = length;
-
- ctx->sub->const_bufs_used_mask[shader] |= (1u << index);
+ cbs->buffer = (struct pipe_resource *)res;
+ cbs->buffer_offset = offset;
+ cbs->buffer_size = length;
+ ctx->sub->const_bufs_used_mask[shader] |= mask;
} else {
- ctx->sub->cbs[shader][index].buffer = NULL;
- ctx->sub->cbs[shader][index].buffer_offset = 0;
- ctx->sub->cbs[shader][index].buffer_size = 0;
- ctx->sub->const_bufs_used_mask[shader] &= ~(1u << index);
+ cbs->buffer = NULL;
+ cbs->buffer_offset = 0;
+ cbs->buffer_size = 0;
+ ctx->sub->const_bufs_used_mask[shader] &= ~mask;
}
- ctx->sub->const_bufs_dirty[shader] |= (1u << index);
+ ctx->sub->const_bufs_dirty[shader] |= mask;
}
void vrend_set_index_buffer(struct vrend_context *ctx,
@@ -2814,27 +2906,28 @@ void vrend_set_single_vbo(struct vrend_context *ctx,
uint32_t res_handle)
{
struct vrend_resource *res;
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[index];
- if (ctx->sub->vbo[index].stride != stride ||
- ctx->sub->vbo[index].buffer_offset != buffer_offset ||
- ctx->sub->vbo_res_ids[index] != res_handle)
+ if (vbo->base.stride != stride ||
+ vbo->base.buffer_offset != buffer_offset ||
+ vbo->res_id != res_handle)
ctx->sub->vbo_dirty = true;
- ctx->sub->vbo[index].stride = stride;
- ctx->sub->vbo[index].buffer_offset = buffer_offset;
+ vbo->base.stride = stride;
+ vbo->base.buffer_offset = buffer_offset;
if (res_handle == 0) {
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, NULL);
- ctx->sub->vbo_res_ids[index] = 0;
- } else if (ctx->sub->vbo_res_ids[index] != res_handle) {
+ vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, NULL);
+ vbo->res_id = 0;
+ } else if (vbo->res_id != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
- ctx->sub->vbo_res_ids[index] = 0;
+ vbo->res_id = 0;
return;
}
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, res);
- ctx->sub->vbo_res_ids[index] = res_handle;
+ vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, res);
+ vbo->res_id = res_handle;
}
}
@@ -2851,8 +2944,8 @@ void vrend_set_num_vbo(struct vrend_context *ctx,
ctx->sub->vbo_dirty = true;
for (i = num_vbo; i < old_num; i++) {
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].buffer, NULL);
- ctx->sub->vbo_res_ids[i] = 0;
+ vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].base.buffer, NULL);
+ ctx->sub->vbo[i].res_id = 0;
}
}
@@ -2916,22 +3009,18 @@ void vrend_set_single_sampler_view(struct vrend_context *ctx,
glTexParameteri(view->texture->target, GL_TEXTURE_MAX_LEVEL, max_level);
tex->cur_max = max_level;
}
- if (tex->cur_swizzle_r != view->gl_swizzle_r) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
- tex->cur_swizzle_r = view->gl_swizzle_r;
- }
- if (tex->cur_swizzle_g != view->gl_swizzle_g) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
- tex->cur_swizzle_g = view->gl_swizzle_g;
- }
- if (tex->cur_swizzle_b != view->gl_swizzle_b) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
- tex->cur_swizzle_b = view->gl_swizzle_b;
- }
- if (tex->cur_swizzle_a != view->gl_swizzle_a) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
- tex->cur_swizzle_a = view->gl_swizzle_a;
+ if (memcmp(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint))) {
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ if (tex->cur_swizzle[i] != view->gl_swizzle[i]) {
+ glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]);
+ }
+ }
+ } else
+ glTexParameteriv(view->texture->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle);
+ memcpy(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint));
}
+
if (tex->cur_srgb_decode != view->srgb_decode && util_format_is_srgb(tex->base.base.format)) {
if (has_feature(feat_samplers))
ctx->sub->sampler_views_dirty[shader_type] |= (1u << index);
@@ -3146,7 +3235,7 @@ static inline bool can_emulate_logicop(enum pipe_logicop op)
}
-static inline void vrend_fill_shader_key(struct vrend_context *ctx,
+static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
struct vrend_shader_key *key)
{
@@ -3157,54 +3246,54 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
bool add_alpha_test = true;
key->cbufs_are_a8_bitmask = 0;
// Only use integer info when drawing to avoid stale info.
- if (vrend_state.use_integer && ctx->drawing) {
- key->attrib_signed_int_bitmask = ctx->sub->ve->signed_int_bitmask;
- key->attrib_unsigned_int_bitmask = ctx->sub->ve->unsigned_int_bitmask;
+ if (vrend_state.use_integer && sub_ctx->drawing) {
+ key->attrib_signed_int_bitmask = sub_ctx->ve->signed_int_bitmask;
+ key->attrib_unsigned_int_bitmask = sub_ctx->ve->unsigned_int_bitmask;
}
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (!ctx->sub->surf[i])
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (!sub_ctx->surf[i])
continue;
- if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format))
+ if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format))
key->cbufs_are_a8_bitmask |= (1 << i);
- if (util_format_is_pure_integer(ctx->sub->surf[i]->format)) {
+ if (util_format_is_pure_integer(sub_ctx->surf[i]->format)) {
add_alpha_test = false;
- update_int_sign_masks(ctx->sub->surf[i]->format, i,
+ update_int_sign_masks(sub_ctx->surf[i]->format, i,
&key->cbufs_signed_int_bitmask,
&key->cbufs_unsigned_int_bitmask);
}
- key->surface_component_bits[i] = util_format_get_component_bits(ctx->sub->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
+ key->surface_component_bits[i] = util_format_get_component_bits(sub_ctx->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
}
if (add_alpha_test) {
- key->add_alpha_test = ctx->sub->dsa_state.alpha.enabled;
- key->alpha_test = ctx->sub->dsa_state.alpha.func;
+ key->add_alpha_test = sub_ctx->dsa_state.alpha.enabled;
+ key->alpha_test = sub_ctx->dsa_state.alpha.func;
}
- key->pstipple_tex = ctx->sub->rs_state.poly_stipple_enable;
- key->color_two_side = ctx->sub->rs_state.light_twoside;
+ key->pstipple_tex = sub_ctx->rs_state.poly_stipple_enable;
+ key->color_two_side = sub_ctx->rs_state.light_twoside;
- key->clip_plane_enable = ctx->sub->rs_state.clip_plane_enable;
- key->flatshade = ctx->sub->rs_state.flatshade ? true : false;
+ key->clip_plane_enable = sub_ctx->rs_state.clip_plane_enable;
+ key->flatshade = sub_ctx->rs_state.flatshade ? true : false;
} else {
key->add_alpha_test = 0;
key->pstipple_tex = 0;
}
- if (type == PIPE_SHADER_FRAGMENT && vrend_state.use_gles && can_emulate_logicop(ctx->sub->blend_state.logicop_func)) {
- key->fs_logicop_enabled = ctx->sub->blend_state.logicop_enable;
- key->fs_logicop_func = ctx->sub->blend_state.logicop_func;
+ if (type == PIPE_SHADER_FRAGMENT && vrend_state.use_gles && can_emulate_logicop(sub_ctx->blend_state.logicop_func)) {
+ key->fs_logicop_enabled = sub_ctx->blend_state.logicop_enable;
+ key->fs_logicop_func = sub_ctx->blend_state.logicop_func;
key->fs_logicop_emulate_coherent = !has_feature(feat_framebuffer_fetch_non_coherent);
}
- key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
+ key->invert_fs_origin = !sub_ctx->inverted_fbo_content;
if (type == PIPE_SHADER_FRAGMENT)
- key->fs_swizzle_output_rgb_to_bgr = ctx->sub->swizzle_output_rgb_to_bgr;
+ key->fs_swizzle_output_rgb_to_bgr = sub_ctx->swizzle_output_rgb_to_bgr;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
+ if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY])
key->gs_present = true;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_CTRL])
key->tcs_present = true;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL])
key->tes_present = true;
int prev_type = -1;
@@ -3213,7 +3302,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
* old shader is still bound we should ignore the "previous" (as in
* execution order) shader when the key is evaluated, unless the currently
* bound shader selector is actually refers to the current shader. */
- if (ctx->sub->shaders[type] == sel) {
+ if (sub_ctx->shaders[type] == sel) {
switch (type) {
case PIPE_SHADER_GEOMETRY:
if (key->tcs_present || key->tes_present)
@@ -3243,36 +3332,36 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
}
}
- if (prev_type != -1 && ctx->sub->shaders[prev_type]) {
- key->prev_stage_num_clip_out = ctx->sub->shaders[prev_type]->sinfo.num_clip_out;
- key->prev_stage_num_cull_out = ctx->sub->shaders[prev_type]->sinfo.num_cull_out;
- key->num_indirect_generic_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
- key->num_indirect_patch_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
- key->num_prev_generic_and_patch_outputs = ctx->sub->shaders[prev_type]->sinfo.num_generic_and_patch_outputs;
- key->guest_sent_io_arrays = ctx->sub->shaders[prev_type]->sinfo.guest_sent_io_arrays;
+ if (prev_type != -1 && sub_ctx->shaders[prev_type]) {
+ key->prev_stage_num_clip_out = sub_ctx->shaders[prev_type]->sinfo.num_clip_out;
+ key->prev_stage_num_cull_out = sub_ctx->shaders[prev_type]->sinfo.num_cull_out;
+ key->num_indirect_generic_inputs = sub_ctx->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
+ key->num_indirect_patch_inputs = sub_ctx->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
+ key->num_prev_generic_and_patch_outputs = sub_ctx->shaders[prev_type]->sinfo.num_generic_and_patch_outputs;
+ key->guest_sent_io_arrays = sub_ctx->shaders[prev_type]->sinfo.guest_sent_io_arrays;
memcpy(key->prev_stage_generic_and_patch_outputs_layout,
- ctx->sub->shaders[prev_type]->sinfo.generic_outputs_layout,
+ sub_ctx->shaders[prev_type]->sinfo.generic_outputs_layout,
64 * sizeof (struct vrend_layout_info));
- key->force_invariant_inputs = ctx->sub->shaders[prev_type]->sinfo.invariant_outputs;
+ key->force_invariant_inputs = sub_ctx->shaders[prev_type]->sinfo.invariant_outputs;
}
// Only use coord_replace if frag shader receives GL_POINTS
if (type == PIPE_SHADER_FRAGMENT) {
- int fs_prim_mode = ctx->sub->prim_mode; // inherit draw-call's mode
+ int fs_prim_mode = sub_ctx->prim_mode; // inherit draw-call's mode
switch (prev_type) {
case PIPE_SHADER_TESS_EVAL:
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
fs_prim_mode = PIPE_PRIM_POINTS;
break;
case PIPE_SHADER_GEOMETRY:
- fs_prim_mode = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
+ fs_prim_mode = sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
break;
}
key->fs_prim_is_points = (fs_prim_mode == PIPE_PRIM_POINTS);
- key->coord_replace = ctx->sub->rs_state.point_quad_rasterization
+ key->coord_replace = sub_ctx->rs_state.point_quad_rasterization
&& key->fs_prim_is_points
- ? ctx->sub->rs_state.sprite_coord_enable
+ ? sub_ctx->rs_state.sprite_coord_enable
: 0x0;
}
@@ -3284,7 +3373,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
else if (key->gs_present)
next_type = PIPE_SHADER_GEOMETRY;
else if (key->tes_present) {
- if (!ctx->shader_cfg.use_gles)
+ if (!vrend_state.use_gles)
next_type = PIPE_SHADER_TESS_EVAL;
else
next_type = PIPE_SHADER_TESS_CTRL;
@@ -3306,17 +3395,17 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
break;
}
- if (next_type != -1 && ctx->sub->shaders[next_type]) {
- key->next_stage_pervertex_in = ctx->sub->shaders[next_type]->sinfo.has_pervertex_in;
- key->num_indirect_generic_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_generic_inputs;
- key->num_indirect_patch_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_patch_inputs;
- key->generic_outputs_expected_mask = ctx->sub->shaders[next_type]->sinfo.generic_inputs_emitted_mask;
+ if (next_type != -1 && sub_ctx->shaders[next_type]) {
+ key->next_stage_pervertex_in = sub_ctx->shaders[next_type]->sinfo.has_pervertex_in;
+ key->num_indirect_generic_outputs = sub_ctx->shaders[next_type]->sinfo.num_indirect_generic_inputs;
+ key->num_indirect_patch_outputs = sub_ctx->shaders[next_type]->sinfo.num_indirect_patch_inputs;
+ key->generic_outputs_expected_mask = sub_ctx->shaders[next_type]->sinfo.generic_inputs_emitted_mask;
}
if (type != PIPE_SHADER_FRAGMENT &&
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]) {
struct vrend_shader *fs =
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current;
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current;
key->compiled_fs_uid = fs->uid;
key->fs_info = &fs->sel->sinfo;
}
@@ -3363,7 +3452,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
if (1) {//shader->sel->type == PIPE_SHADER_FRAGMENT || shader->sel->type == PIPE_SHADER_GEOMETRY) {
bool ret;
- ret = vrend_compile_shader(ctx, shader);
+ ret = vrend_compile_shader(ctx->sub, shader);
if (ret == false) {
glDeleteShader(shader->id);
strarray_free(&shader->glsl_strings, true);
@@ -3373,7 +3462,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
return 0;
}
-static int vrend_shader_select(struct vrend_context *ctx,
+static int vrend_shader_select(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
bool *dirty)
{
@@ -3382,7 +3471,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
int r;
memset(&key, 0, sizeof(key));
- vrend_fill_shader_key(ctx, sel, &key);
+ vrend_fill_shader_key(sub_ctx, sel, &key);
if (sel->current && !memcmp(&sel->current->key, &key, sizeof(key)))
return 0;
@@ -3406,7 +3495,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- r = vrend_shader_create(ctx, shader, &key);
+ r = vrend_shader_create(sub_ctx->parent, shader, &key);
if (r) {
sel->current = NULL;
FREE(shader);
@@ -3422,8 +3511,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
return 0;
}
-static void *vrend_create_shader_state(UNUSED struct vrend_context *ctx,
- const struct pipe_stream_output_info *so_info,
+static void *vrend_create_shader_state(const struct pipe_stream_output_info *so_info,
uint32_t req_local_mem,
unsigned pipe_shader_type)
{
@@ -3448,7 +3536,7 @@ static int vrend_finish_shader(struct vrend_context *ctx,
sel->tokens = tgsi_dup_tokens(tokens);
- r = vrend_shader_select(ctx, sel, NULL);
+ r = vrend_shader_select(ctx->sub, sel, NULL);
if (r) {
return EINVAL;
}
@@ -3489,17 +3577,19 @@ int vrend_create_shader(struct vrend_context *ctx,
else if (((offlen + 3) / 4) > pkt_length)
long_shader = true;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
/* if we have an in progress one - don't allow a new shader
of that type or a different handle. */
- if (ctx->sub->long_shader_in_progress_handle[type]) {
+ if (sub_ctx->long_shader_in_progress_handle[type]) {
if (new_shader == true)
return EINVAL;
- if (handle != ctx->sub->long_shader_in_progress_handle[type])
+ if (handle != sub_ctx->long_shader_in_progress_handle[type])
return EINVAL;
}
if (new_shader) {
- sel = vrend_create_shader_state(ctx, so_info, req_local_mem, type);
+ sel = vrend_create_shader_state(so_info, req_local_mem, type);
if (sel == NULL)
return ENOMEM;
@@ -3512,11 +3602,11 @@ int vrend_create_shader(struct vrend_context *ctx,
}
memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
sel->buf_offset = pkt_length * 4;
- ctx->sub->long_shader_in_progress_handle[type] = handle;
+ sub_ctx->long_shader_in_progress_handle[type] = handle;
} else
finished = true;
} else {
- sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
+ sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
vrend_printf( "got continuation without original shader %d\n", handle);
ret = EINVAL;
@@ -3588,7 +3678,7 @@ int vrend_create_shader(struct vrend_context *ctx,
sel->tmp_buf = NULL;
}
free(tokens);
- ctx->sub->long_shader_in_progress_handle[type] = 0;
+ sub_ctx->long_shader_in_progress_handle[type] = 0;
}
if (new_shader) {
@@ -3618,31 +3708,33 @@ void vrend_bind_shader(struct vrend_context *ctx,
if (type > PIPE_SHADER_COMPUTE)
return;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
if (handle == 0) {
if (type == PIPE_SHADER_COMPUTE)
- ctx->sub->cs_shader_dirty = true;
+ sub_ctx->cs_shader_dirty = true;
else
- ctx->sub->shader_dirty = true;
- vrend_shader_state_reference(&ctx->sub->shaders[type], NULL);
+ sub_ctx->shader_dirty = true;
+ vrend_shader_state_reference(&sub_ctx->shaders[type], NULL);
return;
}
- sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
+ sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel)
return;
if (sel->type != type)
return;
- if (ctx->sub->shaders[sel->type] != sel) {
+ if (sub_ctx->shaders[sel->type] != sel) {
if (type == PIPE_SHADER_COMPUTE)
- ctx->sub->cs_shader_dirty = true;
+ sub_ctx->cs_shader_dirty = true;
else
- ctx->sub->shader_dirty = true;
- ctx->sub->prog_ids[sel->type] = 0;
+ sub_ctx->shader_dirty = true;
+ sub_ctx->prog_ids[sel->type] = 0;
}
- vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
+ vrend_shader_state_reference(&sub_ctx->shaders[sel->type], sel);
}
void vrend_clear(struct vrend_context *ctx,
@@ -3651,6 +3743,7 @@ void vrend_clear(struct vrend_context *ctx,
double depth, unsigned stencil)
{
GLbitfield bits = 0;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
if (ctx->in_error)
return;
@@ -3658,20 +3751,20 @@ void vrend_clear(struct vrend_context *ctx,
if (ctx->ctx_switch_pending)
vrend_finish_context_switch(ctx);
- vrend_update_frontface_state(ctx);
- if (ctx->sub->stencil_state_dirty)
- vrend_update_stencil_state(ctx);
- if (ctx->sub->scissor_state_dirty)
- vrend_update_scissor_state(ctx);
- if (ctx->sub->viewport_state_dirty)
- vrend_update_viewport_state(ctx);
+ vrend_update_frontface_state(sub_ctx);
+ if (sub_ctx->stencil_state_dirty)
+ vrend_update_stencil_state(sub_ctx);
+ if (sub_ctx->scissor_state_dirty)
+ vrend_update_scissor_state(sub_ctx);
+ if (sub_ctx->viewport_state_dirty)
+ vrend_update_viewport_state(sub_ctx);
- vrend_use_program(ctx, 0);
+ vrend_use_program(sub_ctx, 0);
glDisable(GL_SCISSOR_TEST);
if (buffers & PIPE_CLEAR_COLOR) {
- if (ctx->sub->nr_cbufs && ctx->sub->surf[0] && vrend_format_is_emulated_alpha(ctx->sub->surf[0]->format)) {
+ if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_format_is_emulated_alpha(sub_ctx->surf[0]->format)) {
glClearColor(color->f[3], 0.0, 0.0, 0.0);
} else {
glClearColor(color->f[0], color->f[1], color->f[2], color->f[3]);
@@ -3680,7 +3773,7 @@ void vrend_clear(struct vrend_context *ctx,
/* This function implements Gallium's full clear callback (st->pipe->clear) on the host. This
callback requires no color component be masked. We must unmask all components before
calling glClear* and restore the previous colormask afterwards, as Gallium expects. */
- if (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (sub_ctx->hw_blend_state.independent_blend_enable &&
has_feature(feat_indep_blend)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
@@ -3708,24 +3801,24 @@ void vrend_clear(struct vrend_context *ctx,
glClearStencil(stencil);
}
- if (ctx->sub->hw_rs_state.rasterizer_discard)
+ if (sub_ctx->hw_rs_state.rasterizer_discard)
glDisable(GL_RASTERIZER_DISCARD);
if (buffers & PIPE_CLEAR_COLOR) {
uint32_t mask = 0;
int i;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i])
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i])
mask |= (1 << i);
}
if (mask != (buffers >> 2)) {
mask = buffers >> 2;
while (mask) {
i = u_bit_scan(&mask);
- if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_uint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
+ if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_uint(sub_ctx->surf[i] && sub_ctx->surf[i]->format))
glClearBufferuiv(GL_COLOR,
i, (GLuint *)color);
- else if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_sint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
+ else if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_sint(sub_ctx->surf[i] && sub_ctx->surf[i]->format))
glClearBufferiv(GL_COLOR,
i, (GLint *)color);
else
@@ -3748,40 +3841,40 @@ void vrend_clear(struct vrend_context *ctx,
* get here is because the guest cleared all those states but gallium
* didn't forward them before calling the clear command
*/
- if (ctx->sub->hw_rs_state.rasterizer_discard)
+ if (sub_ctx->hw_rs_state.rasterizer_discard)
glEnable(GL_RASTERIZER_DISCARD);
if (buffers & PIPE_CLEAR_DEPTH) {
- if (!ctx->sub->dsa_state.depth.writemask)
+ if (!sub_ctx->dsa_state.depth.writemask)
glDepthMask(GL_FALSE);
}
/* Restore previous stencil buffer write masks for both front and back faces */
if (buffers & PIPE_CLEAR_STENCIL) {
- glStencilMaskSeparate(GL_FRONT, ctx->sub->dsa_state.stencil[0].writemask);
- glStencilMaskSeparate(GL_BACK, ctx->sub->dsa_state.stencil[1].writemask);
+ glStencilMaskSeparate(GL_FRONT, sub_ctx->dsa_state.stencil[0].writemask);
+ glStencilMaskSeparate(GL_BACK, sub_ctx->dsa_state.stencil[1].writemask);
}
/* Restore previous colormask */
if (buffers & PIPE_CLEAR_COLOR) {
- if (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (sub_ctx->hw_blend_state.independent_blend_enable &&
has_feature(feat_indep_blend)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
- struct pipe_blend_state *blend = &ctx->sub->hw_blend_state;
+ struct pipe_blend_state *blend = &sub_ctx->hw_blend_state;
glColorMaskIndexedEXT(i, blend->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
} else {
- glColorMask(ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
+ glColorMask(sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
- if (ctx->sub->hw_rs_state.scissor)
+ if (sub_ctx->hw_rs_state.scissor)
glEnable(GL_SCISSOR_TEST);
else
glDisable(GL_SCISSOR_TEST);
@@ -3819,20 +3912,20 @@ void vrend_clear_texture(struct vrend_context* ctx,
}
}
-static void vrend_update_scissor_state(struct vrend_context *ctx)
+static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx)
{
struct pipe_scissor_state *ss;
GLint y;
GLuint idx;
- unsigned mask = ctx->sub->scissor_state_dirty;
+ unsigned mask = sub_ctx->scissor_state_dirty;
while (mask) {
idx = u_bit_scan(&mask);
if (idx >= PIPE_MAX_VIEWPORTS) {
- vrend_report_buffer_error(ctx, 0);
+ vrend_report_buffer_error(sub_ctx->parent, 0);
break;
}
- ss = &ctx->sub->ss[idx];
+ ss = &sub_ctx->ss[idx];
y = ss->miny;
if (idx > 0 && has_feature(feat_viewport_array))
@@ -3840,39 +3933,39 @@ static void vrend_update_scissor_state(struct vrend_context *ctx)
else
glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
}
- ctx->sub->scissor_state_dirty = 0;
+ sub_ctx->scissor_state_dirty = 0;
}
-static void vrend_update_viewport_state(struct vrend_context *ctx)
+static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx)
{
GLint cy;
- unsigned mask = ctx->sub->viewport_state_dirty;
+ unsigned mask = sub_ctx->viewport_state_dirty;
int idx;
while (mask) {
idx = u_bit_scan(&mask);
- if (ctx->sub->viewport_is_negative)
- cy = ctx->sub->vps[idx].cur_y - ctx->sub->vps[idx].height;
+ if (sub_ctx->viewport_is_negative)
+ cy = sub_ctx->vps[idx].cur_y - sub_ctx->vps[idx].height;
else
- cy = ctx->sub->vps[idx].cur_y;
+ cy = sub_ctx->vps[idx].cur_y;
if (idx > 0 && has_feature(feat_viewport_array))
- glViewportIndexedf(idx, ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
+ glViewportIndexedf(idx, sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height);
else
- glViewport(ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
+ glViewport(sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height);
if (idx && has_feature(feat_viewport_array))
if (vrend_state.use_gles) {
- glDepthRangeIndexedfOES(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangeIndexedfOES(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
} else
- glDepthRangeIndexed(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangeIndexed(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
else
if (vrend_state.use_gles)
- glDepthRangefOES(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangefOES(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
else
- glDepthRange(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRange(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
}
- ctx->sub->viewport_state_dirty = 0;
+ sub_ctx->viewport_state_dirty = 0;
}
static GLenum get_gs_xfb_mode(GLenum mode)
@@ -3947,7 +4040,7 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
/* XYZZY: debug this? */
break;
}
- res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].buffer;
+ res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].base.buffer;
if (!res) {
vrend_printf("cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
@@ -3978,10 +4071,12 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
glBindBuffer(GL_ARRAY_BUFFER, res->id);
- if (ctx->sub->vbo[vbo_index].stride == 0) {
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[vbo_index];
+
+ if (vbo->base.stride == 0) {
void *data;
/* for 0 stride we are kinda screwed */
- data = glMapBufferRange(GL_ARRAY_BUFFER, ctx->sub->vbo[vbo_index].buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
+ data = glMapBufferRange(GL_ARRAY_BUFFER, vbo->base.buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
switch (ve->nr_chan) {
case 1:
@@ -4003,9 +4098,9 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
} else {
enable_bitmask |= (1 << loc);
if (util_format_is_pure_integer(ve->base.src_format)) {
- glVertexAttribIPointer(loc, ve->nr_chan, ve->type, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
+ glVertexAttribIPointer(loc, ve->nr_chan, ve->type, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
} else {
- glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
+ glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
}
glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
}
@@ -4037,69 +4132,86 @@ static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
glBindVertexArray(va->id);
if (ctx->sub->vbo_dirty) {
- GLsizei count = 0;
- GLuint buffers[PIPE_MAX_ATTRIBS];
- GLintptr offsets[PIPE_MAX_ATTRIBS];
- GLsizei strides[PIPE_MAX_ATTRIBS];
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[0];
- for (i = 0; i < ctx->sub->num_vbos; i++) {
- struct vrend_resource *res = (struct vrend_resource *)ctx->sub->vbo[i].buffer;
- if (!res) {
- buffers[count] = 0;
- offsets[count] = 0;
- strides[count++] = 0;
- } else {
- buffers[count] = res->id;
- offsets[count] = ctx->sub->vbo[i].buffer_offset,
- strides[count++] = ctx->sub->vbo[i].stride;
+ if (has_feature(feat_bind_vertex_buffers)) {
+ GLsizei count = MAX2(ctx->sub->num_vbos, ctx->sub->old_num_vbos);
+
+ GLuint buffers[PIPE_MAX_ATTRIBS];
+ GLintptr offsets[PIPE_MAX_ATTRIBS];
+ GLsizei strides[PIPE_MAX_ATTRIBS];
+
+ for (i = 0; i < ctx->sub->num_vbos; i++) {
+ struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
+ if (res) {
+ buffers[i] = res->id;
+ offsets[i] = vbo[i].base.buffer_offset;
+ strides[i] = vbo[i].base.stride;
+ } else {
+ buffers[i] = 0;
+ offsets[i] = 0;
+ strides[i] = 0;
+ }
+ }
+
+ for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
+ buffers[i] = 0;
+ offsets[i] = 0;
+ strides[i] = 0;
}
- }
- for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
- buffers[count] = 0;
- offsets[count] = 0;
- strides[count++] = 0;
- }
- if (has_feature(feat_bind_vertex_buffers))
glBindVertexBuffers(0, count, buffers, offsets, strides);
- else {
- for (i = 0; i < count; ++i)
- glBindVertexBuffer(i, buffers[i], offsets[i], strides[i]);
+ } else {
+ for (i = 0; i < ctx->sub->num_vbos; i++) {
+ struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
+ if (res)
+ glBindVertexBuffer(i, res->id, vbo[i].base.buffer_offset, vbo[i].base.stride);
+ else
+ glBindVertexBuffer(i, 0, 0, 0);
+ }
+ for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++)
+ glBindVertexBuffer(i, 0, 0, 0);
}
ctx->sub->vbo_dirty = false;
}
}
-static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
+static int vrend_draw_bind_samplers_shader(struct vrend_sub_context *sub_ctx,
int shader_type,
int next_sampler_id)
{
int index = 0;
- uint32_t dirty = ctx->sub->sampler_views_dirty[shader_type];
+ uint32_t dirty = sub_ctx->sampler_views_dirty[shader_type];
+
+ uint32_t mask = sub_ctx->prog->samplers_used_mask[shader_type];
+
+ struct vrend_shader_view *sviews = &sub_ctx->views[shader_type];
- uint32_t mask = ctx->sub->prog->samplers_used_mask[shader_type];
while (mask) {
int i = u_bit_scan(&mask);
- struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
- if (dirty & (1 << i) && tview) {
- if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
- glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
- (tview->gl_swizzle_r == GL_ZERO || tview->gl_swizzle_r == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_g == GL_ZERO || tview->gl_swizzle_g == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_b == GL_ZERO || tview->gl_swizzle_b == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_a == GL_ZERO || tview->gl_swizzle_a == GL_ONE) ? 0.0 : 1.0);
- glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
- tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
+ if (!(dirty & (1 << i)))
+ continue;
+
+ struct vrend_sampler_view *tview = sviews->views[i];
+ if (tview) {
+ if (sub_ctx->prog->shadow_samp_mask[shader_type] & (1 << i)) {
+ glUniform4f(sub_ctx->prog->shadow_samp_mask_locs[shader_type][index],
+ (tview->gl_swizzle[0] == GL_ZERO || tview->gl_swizzle[0] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[1] == GL_ZERO || tview->gl_swizzle[1] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[2] == GL_ZERO || tview->gl_swizzle[2] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[3] == GL_ZERO || tview->gl_swizzle[3] == GL_ONE) ? 0.0 : 1.0);
+ glUniform4f(sub_ctx->prog->shadow_samp_add_locs[shader_type][index],
+ tview->gl_swizzle[0] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[1] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[2] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[3] == GL_ONE ? 1.0 : 0.0);
}
if (tview->texture) {
- GLuint id;
+ GLuint id = tview->id;
struct vrend_resource *texture = tview->texture;
GLenum target = tview->target;
@@ -4108,17 +4220,16 @@ static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
if (has_bit(tview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
id = texture->tbo_tex_id;
target = GL_TEXTURE_BUFFER;
- } else
- id = tview->id;
+ }
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
glBindTexture(target, id);
- if (ctx->sub->views[shader_type].old_ids[i] != id ||
- ctx->sub->sampler_views_dirty[shader_type] & (1 << i)) {
- vrend_apply_sampler_state(ctx, texture, shader_type, i,
+ if (sviews->old_ids[i] != id ||
+ sub_ctx->sampler_views_dirty[shader_type] & (1 << i)) {
+ vrend_apply_sampler_state(sub_ctx, texture, shader_type, i,
next_sampler_id, tview);
- ctx->sub->views[shader_type].old_ids[i] = id;
+ sviews->old_ids[i] = id;
}
dirty &= ~(1 << i);
}
@@ -4126,12 +4237,12 @@ static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
next_sampler_id++;
index++;
}
- ctx->sub->sampler_views_dirty[shader_type] = dirty;
+ sub_ctx->sampler_views_dirty[shader_type] = dirty;
return next_sampler_id;
}
-static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
+static int vrend_draw_bind_ubo_shader(struct vrend_sub_context *sub_ctx,
int shader_type, int next_ubo_id)
{
uint32_t mask, dirty, update;
@@ -4141,9 +4252,9 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
if (!has_feature(feat_ubo))
return next_ubo_id;
- mask = ctx->sub->prog->ubo_used_mask[shader_type];
- dirty = ctx->sub->const_bufs_dirty[shader_type];
- update = dirty & ctx->sub->const_bufs_used_mask[shader_type];
+ mask = sub_ctx->prog->ubo_used_mask[shader_type];
+ dirty = sub_ctx->const_bufs_dirty[shader_type];
+ update = dirty & sub_ctx->const_bufs_used_mask[shader_type];
if (!update)
return next_ubo_id + util_bitcount(mask);
@@ -4154,7 +4265,7 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
if (update & (1 << i)) {
/* The cbs array is indexed using the gallium uniform buffer index */
- cb = &ctx->sub->cbs[shader_type][i];
+ cb = &sub_ctx->cbs[shader_type][i];
res = (struct vrend_resource *)cb->buffer;
glBindBufferRange(GL_UNIFORM_BUFFER, next_ubo_id, res->id,
@@ -4163,26 +4274,26 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
}
next_ubo_id++;
}
- ctx->sub->const_bufs_dirty[shader_type] = dirty;
+ sub_ctx->const_bufs_dirty[shader_type] = dirty;
return next_ubo_id;
}
-static void vrend_draw_bind_const_shader(struct vrend_context *ctx,
+static void vrend_draw_bind_const_shader(struct vrend_sub_context *sub_ctx,
int shader_type, bool new_program)
{
- if (ctx->sub->consts[shader_type].consts &&
- ctx->sub->shaders[shader_type] &&
- (ctx->sub->prog->const_location[shader_type] != -1) &&
- (ctx->sub->const_dirty[shader_type] || new_program)) {
- glUniform4uiv(ctx->sub->prog->const_location[shader_type],
- ctx->sub->shaders[shader_type]->sinfo.num_consts,
- ctx->sub->consts[shader_type].consts);
- ctx->sub->const_dirty[shader_type] = false;
+ if (sub_ctx->consts[shader_type].consts &&
+ sub_ctx->shaders[shader_type] &&
+ (sub_ctx->prog->const_location[shader_type] != -1) &&
+ (sub_ctx->const_dirty[shader_type] || new_program)) {
+ glUniform4uiv(sub_ctx->prog->const_location[shader_type],
+ sub_ctx->shaders[shader_type]->sinfo.num_consts,
+ sub_ctx->consts[shader_type].consts);
+ sub_ctx->const_dirty[shader_type] = false;
}
}
-static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_type)
+static void vrend_draw_bind_ssbo_shader(struct vrend_sub_context *sub_ctx, int shader_type)
{
uint32_t mask;
struct vrend_ssbo *ssbo;
@@ -4192,30 +4303,30 @@ static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_ty
if (!has_feature(feat_ssbo))
return;
- if (!ctx->sub->prog->ssbo_locs[shader_type])
+ if (!sub_ctx->prog->ssbo_locs[shader_type])
return;
- if (!ctx->sub->ssbo_used_mask[shader_type])
+ if (!sub_ctx->ssbo_used_mask[shader_type])
return;
- mask = ctx->sub->ssbo_used_mask[shader_type];
+ mask = sub_ctx->ssbo_used_mask[shader_type];
while (mask) {
i = u_bit_scan(&mask);
- ssbo = &ctx->sub->ssbo[shader_type][i];
+ ssbo = &sub_ctx->ssbo[shader_type][i];
res = (struct vrend_resource *)ssbo->res;
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, i, res->id,
ssbo->buffer_offset, ssbo->buffer_size);
- if (ctx->sub->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
+ if (sub_ctx->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
if (!vrend_state.use_gles)
- glShaderStorageBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ssbo_locs[shader_type][i], i);
+ glShaderStorageBlockBinding(sub_ctx->prog->id, sub_ctx->prog->ssbo_locs[shader_type][i], i);
else
debug_printf("glShaderStorageBlockBinding not supported on gles \n");
}
}
}
-static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
+static void vrend_draw_bind_abo_shader(struct vrend_sub_context *sub_ctx)
{
uint32_t mask;
struct vrend_abo *abo;
@@ -4225,18 +4336,18 @@ static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
if (!has_feature(feat_atomic_counters))
return;
- mask = ctx->sub->abo_used_mask;
+ mask = sub_ctx->abo_used_mask;
while (mask) {
i = u_bit_scan(&mask);
- abo = &ctx->sub->abo[i];
+ abo = &sub_ctx->abo[i];
res = (struct vrend_resource *)abo->res;
glBindBufferRange(GL_ATOMIC_COUNTER_BUFFER, i, res->id,
abo->buffer_offset, abo->buffer_size);
}
}
-static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_type)
+static void vrend_draw_bind_images_shader(struct vrend_sub_context *sub_ctx, int shader_type)
{
GLenum access;
GLboolean layered;
@@ -4244,22 +4355,22 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
uint32_t mask, tex_id, level, first_layer;
- if (!ctx->sub->images_used_mask[shader_type])
+ if (!sub_ctx->images_used_mask[shader_type])
return;
- if (!ctx->sub->prog->img_locs[shader_type])
+ if (!sub_ctx->prog->img_locs[shader_type])
return;
if (!has_feature(feat_images))
return;
- mask = ctx->sub->images_used_mask[shader_type];
+ mask = sub_ctx->images_used_mask[shader_type];
while (mask) {
unsigned i = u_bit_scan(&mask);
- if (!(ctx->sub->prog->images_used_mask[shader_type] & (1 << i)))
+ if (!(sub_ctx->prog->images_used_mask[shader_type] & (1 << i)))
continue;
- iview = &ctx->sub->image_views[shader_type][i];
+ iview = &sub_ctx->image_views[shader_type][i];
tex_id = iview->texture->id;
if (has_bit(iview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
if (!iview->texture->tbo_tex_id)
@@ -4285,7 +4396,7 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
}
if (!vrend_state.use_gles)
- glUniform1i(ctx->sub->prog->img_locs[shader_type][i], i);
+ glUniform1i(sub_ctx->prog->img_locs[shader_type][i], i);
switch (iview->access) {
case PIPE_IMAGE_ACCESS_READ:
@@ -4306,61 +4417,162 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
}
}
-static void vrend_draw_bind_objects(struct vrend_context *ctx, bool new_program)
+static void vrend_draw_bind_objects(struct vrend_sub_context *sub_ctx, bool new_program)
{
int next_ubo_id = 0, next_sampler_id = 0;
- for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
- next_ubo_id = vrend_draw_bind_ubo_shader(ctx, shader_type, next_ubo_id);
- vrend_draw_bind_const_shader(ctx, shader_type, new_program);
- next_sampler_id = vrend_draw_bind_samplers_shader(ctx, shader_type,
+ for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= sub_ctx->last_shader_idx; shader_type++) {
+ next_ubo_id = vrend_draw_bind_ubo_shader(sub_ctx, shader_type, next_ubo_id);
+ vrend_draw_bind_const_shader(sub_ctx, shader_type, new_program);
+ next_sampler_id = vrend_draw_bind_samplers_shader(sub_ctx, shader_type,
next_sampler_id);
- vrend_draw_bind_images_shader(ctx, shader_type);
- vrend_draw_bind_ssbo_shader(ctx, shader_type);
+ vrend_draw_bind_images_shader(sub_ctx, shader_type);
+ vrend_draw_bind_ssbo_shader(sub_ctx, shader_type);
}
- vrend_draw_bind_abo_shader(ctx);
+ vrend_draw_bind_abo_shader(sub_ctx);
- if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
+ if (vrend_state.use_core_profile && sub_ctx->prog->fs_stipple_loc != -1) {
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
- glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
- glUniform1i(ctx->sub->prog->fs_stipple_loc, next_sampler_id);
+ glBindTexture(GL_TEXTURE_2D, sub_ctx->parent->pstipple_tex_id);
+ glUniform1i(sub_ctx->prog->fs_stipple_loc, next_sampler_id);
}
- if (vrend_state.use_core_profile && ctx->sub->prog->fs_alpha_ref_val_loc != -1) {
- glUniform1f(ctx->sub->prog->fs_alpha_ref_val_loc, ctx->sub->dsa_state.alpha.ref_value);
+ if (vrend_state.use_core_profile && sub_ctx->prog->fs_alpha_ref_val_loc != -1) {
+ glUniform1f(sub_ctx->prog->fs_alpha_ref_val_loc, sub_ctx->dsa_state.alpha.ref_value);
}
}
static
-void vrend_inject_tcs(struct vrend_context *ctx, int vertices_per_patch)
+void vrend_inject_tcs(struct vrend_sub_context *sub_ctx, int vertices_per_patch)
{
struct pipe_stream_output_info so_info;
memset(&so_info, 0, sizeof(so_info));
- struct vrend_shader_selector *sel = vrend_create_shader_state(ctx,
- &so_info,
+ struct vrend_shader_selector *sel = vrend_create_shader_state(&so_info,
false, PIPE_SHADER_TESS_CTRL);
struct vrend_shader *shader;
shader = CALLOC_STRUCT(vrend_shader);
- vrend_fill_shader_key(ctx, sel, &shader->key);
+ vrend_fill_shader_key(sub_ctx, sel, &shader->key);
shader->sel = sel;
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- vrend_shader_create_passthrough_tcs(ctx, &ctx->shader_cfg,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->tokens,
+ vrend_shader_create_passthrough_tcs(sub_ctx->parent, &sub_ctx->parent->shader_cfg,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->tokens,
&shader->key, vrend_state.tess_factors, &sel->sinfo,
&shader->glsl_strings, vertices_per_patch);
// Need to add inject the selected shader to the shader selector and then the code below
// can continue
sel->tokens = NULL;
sel->current = shader;
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] = sel;
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
+ sub_ctx->shaders[PIPE_SHADER_TESS_CTRL] = sel;
+ sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
shader->id = glCreateShader(conv_shader_type(shader->sel->type));
- vrend_compile_shader(ctx, shader);
+ vrend_compile_shader(sub_ctx, shader);
+}
+
+
+static bool
+vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_info *info)
+{
+ struct vrend_linked_shader_program *prog;
+ bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
+ bool new_program = false;
+
+ struct vrend_shader_selector **shaders = sub_ctx->shaders;
+
+ sub_ctx->shader_dirty = false;
+
+ if (!shaders[PIPE_SHADER_VERTEX] || !shaders[PIPE_SHADER_FRAGMENT]) {
+ vrend_printf("dropping rendering due to missing shaders: %s\n", sub_ctx->parent->debug_name);
+ return false;
+ }
+
+ // For some GPU, we'd like to use integer variable in generated GLSL if
+ // the input buffers are integer formats. But we actually don't know the
+ // buffer formats when the shader is created, we only know it here.
+ // Set it to true so the underlying code knows to use the buffer formats
+ // now.
+ sub_ctx->drawing = true;
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
+ sub_ctx->drawing = false;
+
+ if (shaders[PIPE_SHADER_TESS_CTRL] && shaders[PIPE_SHADER_TESS_CTRL]->tokens)
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
+ else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) {
+ VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n");
+ vrend_inject_tcs(sub_ctx, info->vertices_per_patch);
+
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
+ }
+
+ if (shaders[PIPE_SHADER_TESS_EVAL])
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
+ if (shaders[PIPE_SHADER_GEOMETRY])
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
+
+ if (!shaders[PIPE_SHADER_VERTEX]->current ||
+ !shaders[PIPE_SHADER_FRAGMENT]->current ||
+ (shaders[PIPE_SHADER_GEOMETRY] && !shaders[PIPE_SHADER_GEOMETRY]->current) ||
+ (shaders[PIPE_SHADER_TESS_CTRL] && !shaders[PIPE_SHADER_TESS_CTRL]->current) ||
+ (shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_TESS_EVAL]->current)) {
+ vrend_printf( "failure to compile shader variants: %s\n", sub_ctx->parent->debug_name);
+ return false;
+ }
+
+ GLuint vs_id = shaders[PIPE_SHADER_VERTEX]->current->id;
+ GLuint fs_id = shaders[PIPE_SHADER_FRAGMENT]->current->id;
+ GLuint gs_id = shaders[PIPE_SHADER_GEOMETRY] ? shaders[PIPE_SHADER_GEOMETRY]->current->id : 0;
+ GLuint tcs_id = shaders[PIPE_SHADER_TESS_CTRL] ? shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0;
+ GLuint tes_id = shaders[PIPE_SHADER_TESS_EVAL] ? shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0;
+
+ bool same_prog = sub_ctx->prog &&
+ vs_id == sub_ctx->prog_ids[PIPE_SHADER_VERTEX] &&
+ fs_id == sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] &&
+ gs_id == sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] &&
+ tcs_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] &&
+ tes_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] &&
+ sub_ctx->prog->dual_src_linked == dual_src;
+
+ if (!same_prog) {
+ prog = lookup_shader_program(sub_ctx, vs_id, fs_id, gs_id, tcs_id, tes_id, dual_src);
+ if (!prog) {
+ prog = add_shader_program(sub_ctx,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->current,
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current,
+ gs_id ? sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
+ tcs_id ? sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
+ tes_id ? sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
+ if (!prog)
+ return false;
+ }
+
+ sub_ctx->last_shader_idx = sub_ctx->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (sub_ctx->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
+ } else
+ prog = sub_ctx->prog;
+ if (sub_ctx->prog != prog) {
+ new_program = true;
+ sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = vs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] = fs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] = gs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] = tcs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] = tes_id;
+ sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = 0;
+ sub_ctx->prog = prog;
+
+ /* mark all constbufs and sampler views as dirty */
+ for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) {
+ sub_ctx->const_bufs_dirty[stage] = ~0;
+ sub_ctx->sampler_views_dirty[stage] = ~0;
+ }
+
+ prog->ref_context = sub_ctx;
+ }
+ return new_program;
}
int vrend_draw_vbo(struct vrend_context *ctx,
@@ -4372,6 +4584,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
bool new_program = false;
struct vrend_resource *indirect_res = NULL;
struct vrend_resource *indirect_params_res = NULL;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
if (ctx->in_error)
return 0;
@@ -4410,173 +4623,63 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (ctx->ctx_switch_pending)
vrend_finish_context_switch(ctx);
- vrend_update_frontface_state(ctx);
+ vrend_update_frontface_state(sub_ctx);
if (ctx->sub->stencil_state_dirty)
- vrend_update_stencil_state(ctx);
+ vrend_update_stencil_state(sub_ctx);
if (ctx->sub->scissor_state_dirty)
- vrend_update_scissor_state(ctx);
+ vrend_update_scissor_state(sub_ctx);
if (ctx->sub->viewport_state_dirty)
- vrend_update_viewport_state(ctx);
+ vrend_update_viewport_state(sub_ctx);
if (ctx->sub->blend_state_dirty)
- vrend_patch_blend_state(ctx);
+ vrend_patch_blend_state(sub_ctx);
// enable primitive-mode-dependent shader variants
- if (ctx->sub->prim_mode != (int)info->mode) {
+ if (sub_ctx->prim_mode != (int)info->mode) {
// Only refresh shader program when switching in/out of GL_POINTS primitive mode
- if (ctx->sub->prim_mode == PIPE_PRIM_POINTS
+ if (sub_ctx->prim_mode == PIPE_PRIM_POINTS
|| (int)info->mode == PIPE_PRIM_POINTS)
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
- ctx->sub->prim_mode = (int)info->mode;
+ sub_ctx->prim_mode = (int)info->mode;
}
- if (ctx->sub->shader_dirty || ctx->sub->swizzle_output_rgb_to_bgr) {
- struct vrend_linked_shader_program *prog;
- bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
- bool same_prog;
-
- ctx->sub->shader_dirty = false;
-
- if (!ctx->sub->shaders[PIPE_SHADER_VERTEX] || !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
- vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
- return 0;
- }
-
- // For some GPU, we'd like to use integer variable in generated GLSL if
- // the input buffers are integer formats. But we actually don't know the
- // buffer formats when the shader is created, we only know it here.
- // Set it to true so the underlying code knows to use the buffer formats
- // now.
- ctx->drawing = true;
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
- ctx->drawing = false;
-
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->tokens)
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
- else if (vrend_state.use_gles && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]) {
- VREND_DEBUG(dbg_shader, ctx, "Need to inject a TCS\n");
- vrend_inject_tcs(ctx, info->vertices_per_patch);
-
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
- }
-
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
-
- if (!ctx->sub->shaders[PIPE_SHADER_VERTEX]->current ||
- !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current ||
- (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && !ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current) ||
- (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && !ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current) ||
- (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && !ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current)) {
- vrend_printf( "failure to compile shader variants: %s\n", ctx->debug_name);
- return 0;
- }
- same_prog = true;
- if (ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_VERTEX])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY])
- same_prog = false;
- if (ctx->sub->prog && ctx->sub->prog->dual_src_linked != dual_src)
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL])
- same_prog = false;
-
- if (!same_prog) {
- prog = lookup_shader_program(ctx,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id,
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id,
- ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id : 0,
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0,
- dual_src);
- if (!prog) {
- prog = add_shader_program(ctx,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->current,
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current,
- ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
- if (!prog)
- return 0;
- }
-
- ctx->sub->last_shader_idx = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
- } else
- prog = ctx->sub->prog;
- if (ctx->sub->prog != prog) {
- new_program = true;
- ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id;
- ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT] = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY] = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
- ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL] = ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL] = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id;
- ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = -1;
- ctx->sub->prog = prog;
-
- /* mark all constbufs and sampler views as dirty */
- for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) {
- ctx->sub->const_bufs_dirty[stage] = ~0;
- ctx->sub->sampler_views_dirty[stage] = ~0;
- }
+ if (sub_ctx->shader_dirty || sub_ctx->swizzle_output_rgb_to_bgr)
+ new_program = vrend_select_program(sub_ctx, info);
- prog->ref_context = ctx->sub;
- }
- }
- if (!ctx->sub->prog) {
+ if (!sub_ctx->prog) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return 0;
}
- vrend_use_program(ctx, ctx->sub->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog->id);
- vrend_draw_bind_objects(ctx, new_program);
+ vrend_draw_bind_objects(sub_ctx, new_program);
- if (!ctx->sub->ve) {
+ if (!sub_ctx->ve) {
vrend_printf("illegal VE setup - skipping renderering\n");
return 0;
}
- float viewport_neg_val = ctx->sub->viewport_is_negative ? -1.0 : 1.0;
- if (ctx->sub->prog->viewport_neg_val != viewport_neg_val) {
- glUniform1f(ctx->sub->prog->vs_ws_adjust_loc, viewport_neg_val);
- ctx->sub->prog->viewport_neg_val = viewport_neg_val;
+ float viewport_neg_val = sub_ctx->viewport_is_negative ? -1.0 : 1.0;
+ if (sub_ctx->prog->viewport_neg_val != viewport_neg_val) {
+ glUniform1f(sub_ctx->prog->vs_ws_adjust_loc, viewport_neg_val);
+ sub_ctx->prog->viewport_neg_val = viewport_neg_val;
}
- if (ctx->sub->rs_state.clip_plane_enable) {
+ if (sub_ctx->rs_state.clip_plane_enable) {
for (i = 0 ; i < 8; i++) {
- glUniform4fv(ctx->sub->prog->clip_locs[i], 1, (const GLfloat *)&ctx->sub->ucp_state.ucp[i]);
+ glUniform4fv(sub_ctx->prog->clip_locs[i], 1, (const GLfloat *)&sub_ctx->ucp_state.ucp[i]);
}
}
if (has_feature(feat_gles31_vertex_attrib_binding))
- vrend_draw_bind_vertex_binding(ctx, ctx->sub->ve);
+ vrend_draw_bind_vertex_binding(ctx, sub_ctx->ve);
else
- vrend_draw_bind_vertex_legacy(ctx, ctx->sub->ve);
-
- for (i = 0 ; i < ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs; i++) {
- struct vrend_vertex_element_array *va = ctx->sub->ve;
- struct vrend_vertex_element *ve = &va->elements[i];
- int vbo_index = ve->base.vertex_buffer_index;
- if (!ctx->sub->vbo[vbo_index].buffer) {
- vrend_printf( "VBO missing vertex buffer\n");
- return 0;
- }
- }
+ vrend_draw_bind_vertex_legacy(ctx, sub_ctx->ve);
if (info->indexed) {
- struct vrend_resource *res = (struct vrend_resource *)ctx->sub->ib.buffer;
+ struct vrend_resource *res = (struct vrend_resource *)sub_ctx->ib.buffer;
if (!res) {
vrend_printf( "VBO missing indexed array buffer\n");
return 0;
@@ -4585,19 +4688,19 @@ int vrend_draw_vbo(struct vrend_context *ctx,
} else
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
- if (ctx->sub->current_so) {
- if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- glBeginTransformFeedback(get_gs_xfb_mode(ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
- else if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- glBeginTransformFeedback(get_tess_xfb_mode(ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
+ if (sub_ctx->current_so) {
+ if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
+ if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY])
+ glBeginTransformFeedback(get_gs_xfb_mode(sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
+ else if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL])
+ glBeginTransformFeedback(get_tess_xfb_mode(sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
+ sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
else
glBeginTransformFeedback(get_xfb_mode(info->mode));
- ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
- } else if (ctx->sub->current_so->xfb_state == XFB_STATE_PAUSED) {
+ sub_ctx->current_so->xfb_state = XFB_STATE_STARTED;
+ } else if (sub_ctx->current_so->xfb_state == XFB_STATE_PAUSED) {
glResumeTransformFeedback();
- ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
+ sub_ctx->current_so->xfb_state = XFB_STATE_STARTED;
}
}
@@ -4615,16 +4718,16 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (has_feature(feat_indirect_draw)) {
GLint buf = indirect_res ? indirect_res->id : 0;
- if (ctx->sub->draw_indirect_buffer != buf) {
+ if (sub_ctx->draw_indirect_buffer != buf) {
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buf);
- ctx->sub->draw_indirect_buffer = buf;
+ sub_ctx->draw_indirect_buffer = buf;
}
if (has_feature(feat_indirect_params)) {
GLint buf = indirect_params_res ? indirect_params_res->id : 0;
- if (ctx->sub->draw_indirect_params_buffer != buf) {
+ if (sub_ctx->draw_indirect_params_buffer != buf) {
glBindBuffer(GL_PARAMETER_BUFFER_ARB, buf);
- ctx->sub->draw_indirect_params_buffer = buf;
+ sub_ctx->draw_indirect_params_buffer = buf;
}
}
}
@@ -4637,9 +4740,9 @@ int vrend_draw_vbo(struct vrend_context *ctx,
* accept those blend equations.
* When we transmit the blend mode through alpha_src_factor, alpha_dst_factor is always 0.
*/
- uint32_t blend_mask_shader = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced;
- uint32_t blend_mode = ctx->sub->blend_state.rt[0].alpha_src_factor;
- uint32_t alpha_dst_factor = ctx->sub->blend_state.rt[0].alpha_dst_factor;
+ uint32_t blend_mask_shader = sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced;
+ uint32_t blend_mode = sub_ctx->blend_state.rt[0].alpha_src_factor;
+ uint32_t alpha_dst_factor = sub_ctx->blend_state.rt[0].alpha_dst_factor;
bool use_advanced_blending = !has_feature(feat_framebuffer_fetch) &&
has_feature(feat_blend_equation_advanced) &&
blend_mask_shader != 0 &&
@@ -4674,7 +4777,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
} else {
GLenum elsz;
GLenum mode = info->mode;
- switch (ctx->sub->ib.index_size) {
+ switch (sub_ctx->ib.index_size) {
case 1:
elsz = GL_UNSIGNED_BYTE;
break;
@@ -4697,17 +4800,17 @@ int vrend_draw_vbo(struct vrend_context *ctx,
glDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset);
} else if (info->index_bias) {
if (info->instance_count > 1)
- glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count, info->index_bias);
+ glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count, info->index_bias);
else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
+ glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
else
- glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
+ glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
} else if (info->instance_count > 1) {
- glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count);
+ glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count);
} else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
+ glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
else
- glDrawElements(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
+ glDrawElements(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
}
if (info->primitive_restart) {
@@ -4720,10 +4823,10 @@ int vrend_draw_vbo(struct vrend_context *ctx,
}
}
- if (ctx->sub->current_so && has_feature(feat_transform_feedback2)) {
- if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED) {
+ if (sub_ctx->current_so && has_feature(feat_transform_feedback2)) {
+ if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED) {
glPauseTransformFeedback();
- ctx->sub->current_so->xfb_state = XFB_STATE_PAUSED;
+ sub_ctx->current_so->xfb_state = XFB_STATE_PAUSED;
}
}
return 0;
@@ -4741,56 +4844,58 @@ void vrend_launch_grid(struct vrend_context *ctx,
if (!has_feature(feat_compute_shader))
return;
- if (ctx->sub->cs_shader_dirty) {
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
+ if (sub_ctx->cs_shader_dirty) {
struct vrend_linked_shader_program *prog;
bool cs_dirty;
- ctx->sub->cs_shader_dirty = false;
+ sub_ctx->cs_shader_dirty = false;
- if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]) {
+ if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return;
}
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
- if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current) {
+ vrend_shader_select(sub_ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
+ if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current) {
vrend_printf( "failure to compile shader variants: %s\n", ctx->debug_name);
return;
}
- if (ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_COMPUTE]) {
- prog = lookup_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id);
+ if (sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)sub_ctx->prog_ids[PIPE_SHADER_COMPUTE]) {
+ prog = lookup_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id);
if (!prog) {
- prog = add_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current);
+ prog = add_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current);
if (!prog)
return;
}
} else
- prog = ctx->sub->prog;
+ prog = sub_ctx->prog;
- if (ctx->sub->prog != prog) {
+ if (sub_ctx->prog != prog) {
new_program = true;
- ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = -1;
- ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id;
- ctx->sub->prog = prog;
- prog->ref_context = ctx->sub;
+ sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = 0;
+ sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id;
+ sub_ctx->prog = prog;
+ prog->ref_context = sub_ctx;
}
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
}
- if (!ctx->sub->prog) {
+ if (!sub_ctx->prog) {
vrend_printf("%s: Skipping compute shader execution due to missing shaders: %s\n",
__func__, ctx->debug_name);
return;
}
- vrend_use_program(ctx, ctx->sub->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog->id);
- vrend_draw_bind_ubo_shader(ctx, PIPE_SHADER_COMPUTE, 0);
- vrend_draw_bind_const_shader(ctx, PIPE_SHADER_COMPUTE, new_program);
- vrend_draw_bind_samplers_shader(ctx, PIPE_SHADER_COMPUTE, 0);
- vrend_draw_bind_images_shader(ctx, PIPE_SHADER_COMPUTE);
- vrend_draw_bind_ssbo_shader(ctx, PIPE_SHADER_COMPUTE);
- vrend_draw_bind_abo_shader(ctx);
+ vrend_draw_bind_ubo_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
+ vrend_draw_bind_const_shader(sub_ctx, PIPE_SHADER_COMPUTE, new_program);
+ vrend_draw_bind_samplers_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
+ vrend_draw_bind_images_shader(sub_ctx, PIPE_SHADER_COMPUTE);
+ vrend_draw_bind_ssbo_shader(sub_ctx, PIPE_SHADER_COMPUTE);
+ vrend_draw_bind_abo_shader(sub_ctx);
if (indirect_handle) {
indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
@@ -4943,15 +5048,15 @@ static inline bool is_const_blend(int blend_factor)
blend_factor == PIPE_BLENDFACTOR_INV_CONST_ALPHA);
}
-static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_state *state)
+static void vrend_hw_emit_blend(struct vrend_sub_context *sub_ctx, struct pipe_blend_state *state)
{
- if (state->logicop_enable != ctx->sub->hw_blend_state.logicop_enable) {
- ctx->sub->hw_blend_state.logicop_enable = state->logicop_enable;
+ if (state->logicop_enable != sub_ctx->hw_blend_state.logicop_enable) {
+ sub_ctx->hw_blend_state.logicop_enable = state->logicop_enable;
if (vrend_state.use_gles) {
if (can_emulate_logicop(state->logicop_func))
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
else
- report_gles_warn(ctx, GLES_WARN_LOGIC_OP);
+ report_gles_warn(sub_ctx->parent, GLES_WARN_LOGIC_OP);
} else if (state->logicop_enable) {
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(translate_logicop(state->logicop_func));
@@ -4969,7 +5074,7 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
if (state->rt[i].blend_enable) {
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, i);
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, i);
if (dual_src && !has_feature(feat_dual_src_blend)) {
vrend_printf( "dual src blend requested but not supported for rt %d\n", i);
continue;
@@ -4985,8 +5090,8 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
} else
glDisableIndexedEXT(GL_BLEND, i);
- if (state->rt[i].colormask != ctx->sub->hw_blend_state.rt[i].colormask) {
- ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
+ if (state->rt[i].colormask != sub_ctx->hw_blend_state.rt[i].colormask) {
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
@@ -4995,7 +5100,7 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
}
} else {
if (state->rt[0].blend_enable) {
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
if (dual_src && !has_feature(feat_dual_src_blend)) {
vrend_printf( "dual src blend requested but not supported for rt 0\n");
}
@@ -5010,19 +5115,19 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
else
glDisable(GL_BLEND);
- if (state->rt[0].colormask != ctx->sub->hw_blend_state.rt[0].colormask ||
- (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (state->rt[0].colormask != sub_ctx->hw_blend_state.rt[0].colormask ||
+ (sub_ctx->hw_blend_state.independent_blend_enable &&
!state->independent_blend_enable)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
- ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
- ctx->sub->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
+ sub_ctx->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
if (has_feature(feat_multisample)) {
if (state->alpha_to_coverage)
@@ -5049,22 +5154,22 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
b) patching colormask/blendcolor/blendfactors for A8/A16 format
emulation using GL_R8/GL_R16.
*/
-static void vrend_patch_blend_state(struct vrend_context *ctx)
+static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_blend_state new_state = ctx->sub->blend_state;
- struct pipe_blend_state *state = &ctx->sub->blend_state;
+ struct pipe_blend_state new_state = sub_ctx->blend_state;
+ struct pipe_blend_state *state = &sub_ctx->blend_state;
bool swizzle_blend_color = false;
- struct pipe_blend_color blend_color = ctx->sub->blend_color;
+ struct pipe_blend_color blend_color = sub_ctx->blend_color;
int i;
- if (ctx->sub->nr_cbufs == 0) {
- ctx->sub->blend_state_dirty = false;
+ if (sub_ctx->nr_cbufs == 0) {
+ sub_ctx->blend_state_dirty = false;
return;
}
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
- if (i < ctx->sub->nr_cbufs && ctx->sub->surf[i]) {
- if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format)) {
+ if (i < sub_ctx->nr_cbufs && sub_ctx->surf[i]) {
+ if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format)) {
if (state->rt[i].blend_enable) {
new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor);
@@ -5078,7 +5183,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
is_const_blend(new_state.rt[i].rgb_dst_factor)) {
swizzle_blend_color = true;
}
- } else if (!util_format_has_alpha(ctx->sub->surf[i]->format)) {
+ } else if (!util_format_has_alpha(sub_ctx->surf[i]->format)) {
if (!(is_dst_blend(state->rt[i].rgb_src_factor) ||
is_dst_blend(state->rt[i].rgb_dst_factor) ||
is_dst_blend(state->rt[i].alpha_src_factor) ||
@@ -5092,7 +5197,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
}
}
- vrend_hw_emit_blend(ctx, &new_state);
+ vrend_hw_emit_blend(sub_ctx, &new_state);
if (swizzle_blend_color) {
blend_color.color[0] = blend_color.color[3];
@@ -5106,7 +5211,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
blend_color.color[2],
blend_color.color[3]);
- ctx->sub->blend_state_dirty = false;
+ sub_ctx->blend_state_dirty = false;
}
void vrend_object_bind_blend(struct vrend_context *ctx,
@@ -5184,41 +5289,41 @@ void vrend_object_bind_dsa(struct vrend_context *ctx,
vrend_hw_emit_dsa(ctx);
}
-static void vrend_update_frontface_state(struct vrend_context *ctx)
+static void vrend_update_frontface_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
+ struct pipe_rasterizer_state *state = &sub_ctx->rs_state;
int front_ccw = state->front_ccw;
- front_ccw ^= (ctx->sub->inverted_fbo_content ? 0 : 1);
+ front_ccw ^= (sub_ctx->inverted_fbo_content ? 0 : 1);
if (front_ccw)
glFrontFace(GL_CCW);
else
glFrontFace(GL_CW);
}
-void vrend_update_stencil_state(struct vrend_context *ctx)
+void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_depth_stencil_alpha_state *state = ctx->sub->dsa;
+ struct pipe_depth_stencil_alpha_state *state = sub_ctx->dsa;
int i;
if (!state)
return;
if (!state->stencil[1].enabled) {
if (state->stencil[0].enabled) {
- vrend_stencil_test_enable(ctx, true);
+ vrend_stencil_test_enable(sub_ctx, true);
glStencilOp(translate_stencil_op(state->stencil[0].fail_op),
translate_stencil_op(state->stencil[0].zfail_op),
translate_stencil_op(state->stencil[0].zpass_op));
glStencilFunc(GL_NEVER + state->stencil[0].func,
- ctx->sub->stencil_refs[0],
+ sub_ctx->stencil_refs[0],
state->stencil[0].valuemask);
glStencilMask(state->stencil[0].writemask);
} else
- vrend_stencil_test_enable(ctx, false);
+ vrend_stencil_test_enable(sub_ctx, false);
} else {
- vrend_stencil_test_enable(ctx, true);
+ vrend_stencil_test_enable(sub_ctx, true);
for (i = 0; i < 2; i++) {
GLenum face = (i == 1) ? GL_BACK : GL_FRONT;
@@ -5228,12 +5333,12 @@ void vrend_update_stencil_state(struct vrend_context *ctx)
translate_stencil_op(state->stencil[i].zpass_op));
glStencilFuncSeparate(face, GL_NEVER + state->stencil[i].func,
- ctx->sub->stencil_refs[i],
+ sub_ctx->stencil_refs[i],
state->stencil[i].valuemask);
glStencilMaskSeparate(face, state->stencil[i].writemask);
}
}
- ctx->sub->stencil_state_dirty = false;
+ sub_ctx->stencil_state_dirty = false;
}
static inline GLenum translate_fill(uint32_t mode)
@@ -5595,7 +5700,7 @@ static bool get_swizzled_border_color(enum virgl_formats fmt,
return false;
}
-static void vrend_apply_sampler_state(struct vrend_context *ctx,
+static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id,
@@ -5603,7 +5708,7 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
struct vrend_sampler_view *tview)
{
struct vrend_texture *tex = (struct vrend_texture *)res;
- struct vrend_sampler_state *vstate = ctx->sub->sampler_state[shader_type][id];
+ struct vrend_sampler_state *vstate = sub_ctx->sampler_state[shader_type][id];
struct pipe_sampler_state *state = &vstate->base;
bool set_all = false;
GLenum target = tex->base.target;
@@ -5635,11 +5740,11 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
border_color = state->border_color;
border_color.ui[0] = border_color.ui[3];
border_color.ui[3] = 0;
- glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, border_color.ui);
+ apply_sampler_border_color(sampler, border_color.ui);
} else {
union pipe_color_union border_color;
if (get_swizzled_border_color(tview->format, &state->border_color, &border_color))
- glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, border_color.ui);
+ apply_sampler_border_color(sampler, border_color.ui);
}
glBindSampler(sampler_id, sampler);
@@ -5668,7 +5773,7 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
if (tex->state.lod_bias != state->lod_bias || set_all) {
if (vrend_state.use_gles) {
if (state->lod_bias)
- report_gles_warn(ctx, GLES_WARN_LOD_BIAS);
+ report_gles_warn(sub_ctx->parent, GLES_WARN_LOD_BIAS);
} else {
glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias);
}
@@ -5785,6 +5890,33 @@ static void vrend_free_fences(void)
free_fence_locked(fence);
}
+static void vrend_free_fences_for_context(struct vrend_context *ctx)
+{
+ struct vrend_fence *fence, *stor;
+
+ if (vrend_state.sync_thread) {
+ pipe_mutex_lock(vrend_state.fence_mutex);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ if (vrend_state.fence_waiting) {
+ /* mark the fence invalid as the sync thread is still waiting on it */
+ vrend_state.fence_waiting->ctx = NULL;
+ }
+ pipe_mutex_unlock(vrend_state.fence_mutex);
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ }
+}
+
static bool do_wait(struct vrend_fence *fence, bool can_block)
{
bool done = false;
@@ -5816,6 +5948,7 @@ static void wait_sync(struct vrend_fence *fence)
pipe_mutex_lock(vrend_state.fence_mutex);
list_addtail(&fence->fences, &vrend_state.fence_list);
+ vrend_state.fence_waiting = NULL;
pipe_mutex_unlock(vrend_state.fence_mutex);
if (write_eventfd(vrend_state.eventfd, 1)) {
@@ -5843,6 +5976,7 @@ static int thread_sync(UNUSED void *arg)
if (vrend_state.stop_sync_thread)
break;
list_del(&fence->fences);
+ vrend_state.fence_waiting = fence;
pipe_mutex_unlock(vrend_state.fence_mutex);
wait_sync(fence);
pipe_mutex_lock(vrend_state.fence_mutex);
@@ -5977,6 +6111,8 @@ static bool use_integer() {
return true;
const char * a = (const char *) glGetString(GL_VENDOR);
+ if (!a)
+ return false;
if (strcmp(a, "ARM") == 0)
return true;
return false;
@@ -6203,6 +6339,7 @@ void vrend_destroy_context(struct vrend_context *ctx)
bool switch_0 = (ctx == vrend_state.current_ctx);
struct vrend_context *cur = vrend_state.current_ctx;
struct vrend_sub_context *sub, *tmp;
+ struct vrend_untyped_resource *untyped_res, *untyped_res_tmp;
if (switch_0) {
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
@@ -6234,6 +6371,10 @@ void vrend_destroy_context(struct vrend_context *ctx)
if(ctx->ctx_id)
vrend_renderer_force_ctx_0();
+ vrend_free_fences_for_context(ctx);
+
+ LIST_FOR_EACH_ENTRY_SAFE(untyped_res, untyped_res_tmp, &ctx->untyped_resources, head)
+ free(untyped_res);
vrend_ctx_resource_fini_table(ctx->res_hash);
FREE(ctx);
@@ -6265,6 +6406,7 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
list_inithead(&grctx->active_nontimer_query_list);
grctx->res_hash = vrend_ctx_resource_init_table();
+ list_inithead(&grctx->untyped_resources);
grctx->shader_cfg.use_gles = vrend_state.use_gles;
grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
@@ -6282,10 +6424,13 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
vrender_get_glsl_version(&grctx->shader_cfg.glsl_version);
+ if (!grctx->ctx_id)
+ grctx->fence_retire = vrend_clicbs->ctx0_fence_retire;
+
return grctx;
}
-static int check_resource_valid(struct vrend_renderer_resource_create_args *args,
+static int check_resource_valid(const struct vrend_renderer_resource_create_args *args,
char errmsg[256])
{
/* limit the target */
@@ -6320,6 +6465,10 @@ static int check_resource_valid(struct vrend_renderer_resource_create_args *args
snprintf(errmsg, 256, "Multisample textures don't support mipmaps");
return -1;
}
+ if (!format_can_texture_storage && vrend_state.use_gles) {
+ snprintf(errmsg, 256, "Unsupported multisample texture format %d", args->format);
+ return -1;
+ }
}
if (args->last_level > 0) {
@@ -6386,8 +6535,8 @@ static int check_resource_valid(struct vrend_renderer_resource_create_args *args
}
}
- if (format_can_texture_storage && !args->width) {
- snprintf(errmsg, 256, "Texture storage texture width must be >0");
+ if (args->target != PIPE_BUFFER && !args->width) {
+ snprintf(errmsg, 256, "Texture width must be >0");
return -1;
}
@@ -6589,8 +6738,67 @@ static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint3
glBindBufferARB(gr->target, 0);
}
+static int
+vrend_resource_alloc_buffer(struct vrend_resource *gr, uint32_t flags)
+{
+ const uint32_t bind = gr->base.bind;
+ const uint32_t size = gr->base.width0;
+
+ if (bind == VIRGL_BIND_CUSTOM) {
+ /* use iovec directly when attached */
+ gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
+ gr->ptr = malloc(size);
+ if (!gr->ptr)
+ return -ENOMEM;
+ } else if (bind == VIRGL_BIND_STAGING) {
+ /* staging buffers only use guest memory -- nothing to do. */
+ } else if (bind == VIRGL_BIND_INDEX_BUFFER) {
+ gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_STREAM_OUTPUT) {
+ gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_VERTEX_BUFFER) {
+ gr->target = GL_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_CONSTANT_BUFFER) {
+ gr->target = GL_UNIFORM_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_QUERY_BUFFER) {
+ gr->target = GL_QUERY_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_COMMAND_ARGS) {
+ gr->target = GL_DRAW_INDIRECT_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == 0 || bind == VIRGL_BIND_SHADER_BUFFER) {
+ gr->target = GL_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind & VIRGL_BIND_SAMPLER_VIEW) {
+ /*
+ * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
+ * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
+ */
+#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
+#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
+#endif
+
+ /* need to check GL version here */
+ if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
+ gr->target = GL_TEXTURE_BUFFER;
+ } else {
+ gr->target = GL_PIXEL_PACK_BUFFER_ARB;
+ }
+ vrend_create_buffer(gr, size, flags);
+ } else {
+ vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, bind);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline void
-vrend_renderer_resource_copy_args(struct vrend_renderer_resource_create_args *args,
+vrend_renderer_resource_copy_args(const struct vrend_renderer_resource_create_args *args,
struct vrend_resource *gr)
{
assert(gr);
@@ -6647,7 +6855,7 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
if (!virgl_gbm_gpu_import_required(gr->base.bind))
return;
- gr->egl_image = virgl_egl_image_from_dmabuf(egl, bo);
+ gr->egl_image = virgl_egl_image_from_gbm_bo(egl, bo);
if (!gr->egl_image) {
gr->gbm_bo = NULL;
gbm_bo_destroy(bo);
@@ -6661,19 +6869,12 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
#endif
}
-static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
- void *image_oes)
+static enum virgl_formats vrend_resource_fixup_emulated_bgra(struct vrend_resource *gr,
+ bool imported)
{
- uint level;
- GLenum internalformat, glformat, gltype;
- enum virgl_formats format = gr->base.format;
- struct vrend_texture *gt = (struct vrend_texture *)gr;
- struct pipe_resource *pr = &gr->base;
-
- if (pr->width0 == 0)
- return EINVAL;
-
- bool format_can_texture_storage = has_feature(feat_texture_storage) &&
+ const struct pipe_resource *pr = &gr->base;
+ const enum virgl_formats format = pr->format;
+ const bool format_can_texture_storage = has_feature(feat_texture_storage) &&
(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE);
/* On GLES there is no support for glTexImage*DMultisample and
@@ -6685,7 +6886,7 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
gr->base.bind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
}
- if (image_oes && !has_feature(feat_egl_image_storage))
+ if (imported && !has_feature(feat_egl_image_storage))
gr->base.bind &= ~VIRGL_BIND_PREFER_EMULATED_BGRA;
#ifdef ENABLE_MINIGBM_ALLOCATION
@@ -6694,8 +6895,19 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
gr->base.bind &= ~VIRGL_BIND_PREFER_EMULATED_BGRA;
#endif
- format = vrend_format_replace_emulated(gr->base.bind, gr->base.format);
- format_can_texture_storage = has_feature(feat_texture_storage) &&
+ return vrend_format_replace_emulated(gr->base.bind, format);
+}
+
+static int vrend_resource_alloc_texture(struct vrend_resource *gr,
+ enum virgl_formats format,
+ void *image_oes)
+{
+ uint level;
+ GLenum internalformat, glformat, gltype;
+ struct vrend_texture *gt = (struct vrend_texture *)gr;
+ struct pipe_resource *pr = &gr->base;
+
+ const bool format_can_texture_storage = has_feature(feat_texture_storage) &&
(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE);
if (format_can_texture_storage)
@@ -6855,21 +7067,21 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
!vrend_format_can_texture_view(gr->base.format)) {
for (int i = 0; i < gbm_bo_get_plane_count(gr->gbm_bo); i++) {
gr->aux_plane_egl_image[i] =
- virgl_egl_aux_plane_image_from_dmabuf(egl, gr->gbm_bo, i);
+ virgl_egl_aux_plane_image_from_gbm_bo(egl, gr->gbm_bo, i);
}
}
#endif
}
gt->state.max_lod = -1;
- gt->cur_swizzle_r = gt->cur_swizzle_g = gt->cur_swizzle_b = gt->cur_swizzle_a = -1;
+ gt->cur_swizzle[0] = gt->cur_swizzle[1] = gt->cur_swizzle[2] = gt->cur_swizzle[3] = -1;
gt->cur_base = -1;
gt->cur_max = 10000;
return 0;
}
-struct pipe_resource *
-vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, void *image_oes)
+static struct vrend_resource *
+vrend_resource_create(const struct vrend_renderer_resource_create_args *args)
{
struct vrend_resource *gr;
int ret;
@@ -6893,65 +7105,31 @@ vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args,
pipe_reference_init(&gr->base.reference, 1);
- if (args->target == PIPE_BUFFER) {
- if (args->bind == VIRGL_BIND_CUSTOM) {
- /* use iovec directly when attached */
- gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
- gr->ptr = malloc(args->width);
- if (!gr->ptr) {
- FREE(gr);
- return NULL;
- }
- } else if (args->bind == VIRGL_BIND_STAGING) {
- /* staging buffers only use guest memory -- nothing to do. */
- } else if (args->bind == VIRGL_BIND_INDEX_BUFFER) {
- gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_STREAM_OUTPUT) {
- gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_VERTEX_BUFFER) {
- gr->target = GL_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_CONSTANT_BUFFER) {
- gr->target = GL_UNIFORM_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_QUERY_BUFFER) {
- gr->target = GL_QUERY_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_COMMAND_ARGS) {
- gr->target = GL_DRAW_INDIRECT_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == 0 || args->bind == VIRGL_BIND_SHADER_BUFFER) {
- gr->target = GL_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind & VIRGL_BIND_SAMPLER_VIEW) {
- /*
- * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
- * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
- */
-#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
-#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
-#endif
+ return gr;
+}
- /* need to check GL version here */
- if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
- gr->target = GL_TEXTURE_BUFFER;
- } else {
- gr->target = GL_PIXEL_PACK_BUFFER_ARB;
- }
- vrend_create_buffer(gr, args->width, args->flags);
- } else {
- vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, args->bind);
- FREE(gr);
- return NULL;
- }
+struct pipe_resource *
+vrend_renderer_resource_create(const struct vrend_renderer_resource_create_args *args,
+ void *image_oes)
+{
+ struct vrend_resource *gr;
+ int ret;
+
+ gr = vrend_resource_create(args);
+ if (!gr)
+ return NULL;
+
+ if (args->target == PIPE_BUFFER) {
+ ret = vrend_resource_alloc_buffer(gr, args->flags);
} else {
- int r = vrend_renderer_resource_allocate_texture(gr, image_oes);
- if (r) {
- FREE(gr);
- return NULL;
- }
+ const enum virgl_formats format =
+ vrend_resource_fixup_emulated_bgra(gr, image_oes);
+ ret = vrend_resource_alloc_texture(gr, format, image_oes);
+ }
+
+ if (ret) {
+ FREE(gr);
+ return NULL;
}
return &gr->base;
@@ -6959,9 +7137,6 @@ vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args,
void vrend_renderer_resource_destroy(struct vrend_resource *res)
{
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
if (has_bit(res->storage_bits, VREND_STORAGE_GL_TEXTURE)) {
glDeleteTextures(1, &res->id);
} else if (has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER)) {
@@ -6976,7 +7151,7 @@ void vrend_renderer_resource_destroy(struct vrend_resource *res)
glDeleteMemoryObjectsEXT(1, &res->memobj);
}
-#ifdef ENABLE_MINIGBM_ALLOCATION
+#if HAVE_EPOXY_EGL_H
if (res->egl_image) {
virgl_egl_image_destroy(egl, res->egl_image);
for (unsigned i = 0; i < ARRAY_SIZE(res->aux_plane_egl_image); i++) {
@@ -6985,6 +7160,8 @@ void vrend_renderer_resource_destroy(struct vrend_resource *res)
}
}
}
+#endif
+#ifdef ENABLE_MINIGBM_ALLOCATION
if (res->gbm_bo)
gbm_bo_destroy(res->gbm_bo);
#endif
@@ -7274,8 +7451,8 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
{
void *data;
- if (is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) ||
- (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY) && res->iov)) {
+ if ((is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) ||
+ has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) && res->iov) {
return vrend_copy_iovec(iov, num_iovs, info->offset,
res->iov, res->num_iovs, info->box->x,
info->box->width, res->ptr);
@@ -7321,7 +7498,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
uint32_t layer_stride = info->layer_stride;
if (ctx)
- vrend_use_program(ctx, 0);
+ vrend_use_program(ctx->sub, 0);
else
glUseProgram(0);
@@ -7393,21 +7570,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if ((!vrend_state.use_core_profile) && (res->y_0_top)) {
GLuint buffers;
+ GLuint fb_id;
- if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level) {
- GLuint fb_id;
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
- vrend_fb_bind_texture(res, 0, info->level, 0);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = info->level;
- } else {
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
- }
+ glGenFramebuffers(1, &fb_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
+ vrend_fb_bind_texture(res, 0, info->level, 0);
buffers = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, &buffers);
@@ -7415,7 +7582,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if (ctx) {
vrend_depth_test_enable(ctx, false);
vrend_alpha_test_enable(ctx, false);
- vrend_stencil_test_enable(ctx, false);
+ vrend_stencil_test_enable(ctx->sub, false);
} else {
glDisable(GL_DEPTH_TEST);
glDisable(GL_ALPHA_TEST);
@@ -7425,6 +7592,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
glWindowPos2i(info->box->x, res->y_0_top ? (int)res->base.height0 - info->box->y : info->box->y);
glDrawPixels(info->box->width, info->box->height, glformat, gltype,
data);
+ glDeleteFramebuffers(1, &fb_id);
} else {
uint32_t comp_size;
GLint old_tex = 0;
@@ -7624,17 +7792,28 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
return 0;
}
-static void do_readpixels(GLint x, GLint y,
+static void do_readpixels(struct vrend_resource *res,
+ int idx, uint32_t level, uint32_t layer,
+ GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
GLsizei bufSize, void *data)
{
+ GLuint fb_id;
+
+ glGenFramebuffers(1, &fb_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
+
+ vrend_fb_bind_texture(res, idx, level, layer);
+
if (has_feature(feat_arb_robustness))
glReadnPixelsARB(x, y, width, height, format, type, bufSize, data);
else if (has_feature(feat_gles_khr_robustness))
glReadnPixelsKHR(x, y, width, height, format, type, bufSize, data);
else
glReadPixels(x, y, width, height, format, type, data);
+
+ glDeleteFramebuffers(1, &fb_id);
}
static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
@@ -7644,7 +7823,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
{
char *myptr = (char*)iov[0].iov_base + info->offset;
int need_temp = 0;
- GLuint fb_id;
char *data;
bool actually_invert, separate_invert = false;
GLenum format, type;
@@ -7657,7 +7835,7 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
GLint old_fbo;
if (ctx)
- vrend_use_program(ctx, 0);
+ vrend_use_program(ctx->sub, 0);
else
glUseProgram(0);
@@ -7705,22 +7883,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &old_fbo);
- if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level ||
- (int)res->readback_fb_z != info->box->z) {
-
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
-
- vrend_fb_bind_texture(res, 0, info->level, info->box->z);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = info->level;
- res->readback_fb_z = info->box->z;
- } else
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
if (actually_invert)
y1 = h - info->box->y - info->box->height;
else
@@ -7728,8 +7890,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
if (has_feature(feat_mesa_invert) && actually_invert)
glPixelStorei(GL_PACK_INVERT_MESA, 1);
- if (!vrend_format_is_ds(res->base.format))
- glReadBuffer(GL_COLOR_ATTACHMENT0);
if (!need_temp && row_stride)
glPixelStorei(GL_PACK_ROW_LENGTH, row_stride);
@@ -7787,7 +7947,8 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
}
}
- do_readpixels(info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data);
+ do_readpixels(res, 0, info->level, info->box->z, info->box->x, y1,
+ info->box->width, info->box->height, format, type, send_size, data);
if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) {
if (!vrend_state.use_core_profile)
@@ -8812,7 +8973,8 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
args.array_size = src_res->base.array_size;
intermediate_copy = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
vrend_renderer_resource_copy_args(&args, intermediate_copy);
- MAYBE_UNUSED int r = vrend_renderer_resource_allocate_texture(intermediate_copy, NULL);
+ /* this is PIPE_MASK_ZS and bgra fixup is not needed */
+ MAYBE_UNUSED int r = vrend_resource_alloc_texture(intermediate_copy, args.format, NULL);
assert(!r);
glGenFramebuffers(1, &intermediate_fbo);
@@ -9003,16 +9165,32 @@ void vrend_renderer_blit(struct vrend_context *ctx,
vrend_pause_render_condition(ctx, false);
}
-int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
+void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
+ vrend_context_fence_retire retire,
+ void *retire_data)
+{
+ assert(ctx->ctx_id);
+ ctx->fence_retire = retire;
+ ctx->fence_retire_data = retire_data;
+}
+
+int vrend_renderer_create_fence(struct vrend_context *ctx,
+ uint32_t flags,
+ void *fence_cookie)
{
struct vrend_fence *fence;
+ if (!ctx)
+ return EINVAL;
+
fence = malloc(sizeof(struct vrend_fence));
if (!fence)
return ENOMEM;
- fence->ctx_id = ctx_id;
- fence->fence_id = client_fence_id;
+ fence->ctx = ctx;
+ fence->flags = flags;
+ fence->fence_cookie = fence_cookie;
+
#ifdef HAVE_EPOXY_EGL_H
if (vrend_state.use_egl_fence) {
fence->eglsyncobj = virgl_egl_fence_create(egl);
@@ -9043,18 +9221,51 @@ int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
static void vrend_renderer_check_queries(void);
+static bool need_fence_retire_signal_locked(struct vrend_fence *fence)
+{
+ struct vrend_fence *next;
+
+ /* last fence */
+ if (fence->fences.next == &vrend_state.fence_list)
+ return true;
+
+ /* next fence belongs to a different context */
+ next = LIST_ENTRY(struct vrend_fence, fence->fences.next, fences);
+ if (next->ctx != fence->ctx)
+ return true;
+
+ /* not mergeable */
+ if (!(fence->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
+ return true;
+
+ return false;
+}
+
void vrend_renderer_check_fences(void)
{
+ struct list_head retired_fences;
struct vrend_fence *fence, *stor;
- uint32_t latest_id = 0;
+
+ list_inithead(&retired_fences);
if (vrend_state.sync_thread) {
flush_eventfd(vrend_state.eventfd);
pipe_mutex_lock(vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
- if (fence->fence_id > latest_id)
- latest_id = fence->fence_id;
- free_fence_locked(fence);
+ /* vrend_free_fences_for_context might have marked the fence invalid
+ * by setting fence->ctx to NULL
+ */
+ if (!fence->ctx) {
+ free_fence_locked(fence);
+ continue;
+ }
+
+ if (need_fence_retire_signal_locked(fence)) {
+ list_del(&fence->fences);
+ list_addtail(&fence->fences, &retired_fences);
+ } else {
+ free_fence_locked(fence);
+ }
}
pipe_mutex_unlock(vrend_state.fence_mutex);
} else {
@@ -9062,8 +9273,12 @@ void vrend_renderer_check_fences(void)
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
if (do_wait(fence, /* can_block */ false)) {
- latest_id = fence->fence_id;
- free_fence_locked(fence);
+ if (need_fence_retire_signal_locked(fence)) {
+ list_del(&fence->fences);
+ list_addtail(&fence->fences, &retired_fences);
+ } else {
+ free_fence_locked(fence);
+ }
} else {
/* don't bother checking any subsequent ones */
break;
@@ -9071,12 +9286,17 @@ void vrend_renderer_check_fences(void)
}
}
- if (latest_id == 0)
+ if (LIST_IS_EMPTY(&retired_fences))
return;
vrend_renderer_check_queries();
- vrend_clicbs->write_fence(latest_id);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) {
+ struct vrend_context *ctx = fence->ctx;
+ ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data);
+
+ free_fence_locked(fence);
+ }
}
static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result)
@@ -9387,7 +9607,11 @@ void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
if (buf) memcpy(buf, &value, size); \
glUnmapBuffer(GL_QUERY_BUFFER);
-#define BUFFER_OFFSET(i) ((void *)((char *)NULL + i))
+static inline void *buffer_offset(intptr_t i)
+{
+ return (void *)i;
+}
+
void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle,
uint32_t qbo_handle,
uint32_t wait, uint32_t result_type, uint32_t offset,
@@ -9422,16 +9646,16 @@ void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle,
glBindBuffer(GL_QUERY_BUFFER, res->id);
switch ((enum pipe_query_value_type)result_type) {
case PIPE_QUERY_TYPE_I32:
- glGetQueryObjectiv(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectiv(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_U32:
- glGetQueryObjectuiv(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectuiv(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_I64:
- glGetQueryObjecti64v(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjecti64v(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_U64:
- glGetQueryObjectui64v(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectui64v(q->id, qtype, buffer_offset(offset));
break;
}
} else {
@@ -9877,6 +10101,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
{
GLint max;
GLfloat range[2];
+ uint32_t video_memory;
/* Count this up when you add a feature flag that is used to set a CAP in
* the guest that was set unconditionally before. Then check that flag and
@@ -9990,7 +10215,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
else
caps->v2.max_vertex_attrib_stride = 2048;
- if (has_feature(feat_compute_shader)) {
+ if (has_feature(feat_compute_shader) && (vrend_state.use_gles || gl_ver >= 33)) {
glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, (GLint*)&caps->v2.max_compute_work_group_invocations);
glGetIntegerv(GL_MAX_COMPUTE_SHARED_MEMORY_SIZE, (GLint*)&caps->v2.max_compute_shared_memory_size);
glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0, (GLint*)&caps->v2.max_compute_grid_size[0]);
@@ -10177,6 +10402,22 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (has_feature(feat_blend_equation_advanced))
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_BLEND_EQUATION;
+
+#ifdef HAVE_EPOXY_EGL_H
+ if (egl)
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_UNTYPED_RESOURCE;
+#endif
+
+ video_memory = vrend_winsys_query_video_memory();
+ if (video_memory) {
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_VIDEO_MEMORY;
+ caps->v2.max_video_memory = video_memory;
+ }
+
+ if (has_feature(feat_ati_meminfo) || has_feature(feat_nvx_gpu_memory_info)) {
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_MEMINFO;
+ }
+
}
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
@@ -10279,26 +10520,7 @@ void *vrend_renderer_get_cursor_contents(struct pipe_resource *pres,
glBindTexture(res->target, res->id);
glGetnTexImageARB(res->target, 0, format, type, size, data);
} else if (vrend_state.use_gles) {
- GLuint fb_id;
-
- if (res->readback_fb_id == 0 || res->readback_fb_level != 0 || res->readback_fb_z != 0) {
-
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
-
- vrend_fb_bind_texture(res, 0, 0, 0);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = 0;
- res->readback_fb_z = 0;
- } else {
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
- }
-
- do_readpixels(0, 0, *width, *height, format, type, size, data);
+ do_readpixels(res, 0, 0, 0, 0, 0, *width, *height, format, type, size, data);
} else {
glBindTexture(res->target, res->id);
glGetTexImage(res->target, 0, format, type, data);
@@ -10355,17 +10577,52 @@ void vrend_renderer_get_rect(struct pipe_resource *pres,
}
void vrend_renderer_attach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id,
- struct pipe_resource *pres)
-{
- struct vrend_resource *res = (struct vrend_resource *)pres;
- vrend_ctx_resource_insert(ctx->res_hash, res_id, res);
+ struct virgl_resource *res)
+{
+ if (!res->pipe_resource) {
+ /* move the last untyped resource from cache to list */
+ if (unlikely(ctx->untyped_resource_cache)) {
+ struct virgl_resource *last = ctx->untyped_resource_cache;
+ struct vrend_untyped_resource *wrapper = malloc(sizeof(*wrapper));
+ if (wrapper) {
+ wrapper->resource = last;
+ list_add(&wrapper->head, &ctx->untyped_resources);
+ } else {
+ vrend_printf("dropping attached resource %d due to OOM\n", last->res_id);
+ }
+ }
+
+ ctx->untyped_resource_cache = res;
+ /* defer to vrend_renderer_pipe_resource_set_type */
+ return;
+ }
+
+ vrend_ctx_resource_insert(ctx->res_hash,
+ res->res_id,
+ (struct vrend_resource *)res->pipe_resource);
}
void vrend_renderer_detach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id)
+ struct virgl_resource *res)
{
- vrend_ctx_resource_remove(ctx->res_hash, res_id);
+ if (!res->pipe_resource) {
+ if (ctx->untyped_resource_cache == res) {
+ ctx->untyped_resource_cache = NULL;
+ } else {
+ struct vrend_untyped_resource *iter;
+ LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) {
+ if (iter->resource == res) {
+ list_del(&iter->head);
+ free(iter);
+ break;
+ }
+ }
+ }
+
+ return;
+ }
+
+ vrend_ctx_resource_remove(ctx->res_hash, res->res_id);
}
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
@@ -10438,6 +10695,7 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
+ sub->parent = ctx;
vrend_clicbs->make_current(sub->gl_context);
/* enable if vrend_renderer_init function has done it as well */
@@ -10463,7 +10721,9 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
glBindFramebuffer(GL_FRAMEBUFFER, sub->fb_id);
glGenFramebuffers(2, sub->blit_fb_ids);
- list_inithead(&sub->programs);
+ for (int i = 0; i < VREND_PROGRAM_NQUEUES; ++i)
+ list_inithead(&sub->gl_programs[i]);
+ list_inithead(&sub->cs_programs);
list_inithead(&sub->streamout_list);
sub->object_hash = vrend_object_init_ctx_table();
@@ -10571,6 +10831,7 @@ int vrend_renderer_export_query(struct pipe_resource *pres,
*/
export_query->out_num_fds = 0;
export_query->out_fourcc = 0;
+ export_query->out_modifier = DRM_FORMAT_MOD_INVALID;
if (export_query->in_export_fds)
return -EINVAL;
@@ -10578,7 +10839,7 @@ int vrend_renderer_export_query(struct pipe_resource *pres,
}
int vrend_renderer_pipe_resource_create(struct vrend_context *ctx, uint32_t blob_id,
- struct vrend_renderer_resource_create_args *args)
+ const struct vrend_renderer_resource_create_args *args)
{
struct vrend_resource *res;
res = (struct vrend_resource *)vrend_renderer_resource_create(args, NULL);
@@ -10608,16 +10869,128 @@ struct pipe_resource *vrend_get_blob_pipe(struct vrend_context *ctx, uint64_t bl
return NULL;
}
-int vrend_renderer_resource_get_map_info(struct pipe_resource *pres, uint32_t *map_info)
+int
+vrend_renderer_pipe_resource_set_type(struct vrend_context *ctx,
+ uint32_t res_id,
+ const struct vrend_renderer_resource_set_type_args *args)
{
- struct vrend_resource *res = (struct vrend_resource *)pres;
- if (!res->map_info)
- return -EINVAL;
+ struct virgl_resource *res = NULL;
+
+ /* look up the untyped resource */
+ if (ctx->untyped_resource_cache &&
+ ctx->untyped_resource_cache->res_id == res_id) {
+ res = ctx->untyped_resource_cache;
+ ctx->untyped_resource_cache = NULL;
+ } else {
+ /* cache miss */
+ struct vrend_untyped_resource *iter;
+ LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) {
+ if (iter->resource->res_id == res_id) {
+ res = iter->resource;
+ list_del(&iter->head);
+ free(iter);
+ break;
+ }
+ }
+ }
+
+ /* either a bad res_id or the resource is already typed */
+ if (!res) {
+ if (vrend_renderer_ctx_res_lookup(ctx, res_id))
+ return 0;
+
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_id);
+ return EINVAL;
+ }
+
+ /* resource is still untyped */
+ if (!res->pipe_resource) {
+#ifdef HAVE_EPOXY_EGL_H
+ const struct vrend_renderer_resource_create_args create_args = {
+ .target = PIPE_TEXTURE_2D,
+ .format = args->format,
+ .bind = args->bind,
+ .width = args->width,
+ .height = args->height,
+ .depth = 1,
+ .array_size = 1,
+ .last_level = 0,
+ .nr_samples = 0,
+ .flags = 0,
+ };
+ int plane_fds[VIRGL_GBM_MAX_PLANES];
+ struct vrend_resource *gr;
+ uint32_t virgl_format;
+ uint32_t drm_format;
+ int ret;
+
+ if (res->fd_type != VIRGL_RESOURCE_FD_DMABUF)
+ return EINVAL;
+
+ for (uint32_t i = 0; i < args->plane_count; i++)
+ plane_fds[i] = res->fd;
+
+ gr = vrend_resource_create(&create_args);
+ if (!gr)
+ return ENOMEM;
+
+ virgl_format = vrend_resource_fixup_emulated_bgra(gr, true);
+ drm_format = 0;
+ if (virgl_gbm_convert_format(&virgl_format, &drm_format)) {
+ vrend_printf("%s: unsupported format %d\n", __func__, virgl_format);
+ FREE(gr);
+ return EINVAL;
+ }
+
+ gr->egl_image = virgl_egl_image_from_dmabuf(egl,
+ args->width,
+ args->height,
+ drm_format,
+ args->modifier,
+ args->plane_count,
+ plane_fds,
+ args->plane_strides,
+ args->plane_offsets);
+ if (!gr->egl_image) {
+ vrend_printf("%s: failed to create egl image\n", __func__);
+ FREE(gr);
+ return EINVAL;
+ }
+
+ gr->storage_bits |= VREND_STORAGE_EGL_IMAGE;
+
+ ret = vrend_resource_alloc_texture(gr, virgl_format, gr->egl_image);
+ if (ret) {
+ virgl_egl_image_destroy(egl, gr->egl_image);
+ FREE(gr);
+ return ret;
+ }
+
+ /* "promote" the fd to pipe_resource */
+ close(res->fd);
+ res->fd = -1;
+ res->fd_type = VIRGL_RESOURCE_FD_INVALID;
+ res->pipe_resource = &gr->base;
+#else /* HAVE_EPOXY_EGL_H */
+ (void)args;
+ vrend_printf("%s: no EGL support \n", __func__);
+ return EINVAL;
+#endif /* HAVE_EPOXY_EGL_H */
+ }
+
+ vrend_ctx_resource_insert(ctx->res_hash,
+ res->res_id,
+ (struct vrend_resource *)res->pipe_resource);
- *map_info = res->map_info;
return 0;
}
+uint32_t vrend_renderer_resource_get_map_info(struct pipe_resource *pres)
+{
+ struct vrend_resource *res = (struct vrend_resource *)pres;
+ return res->map_info;
+}
+
int vrend_renderer_resource_map(struct pipe_resource *pres, void **map, uint64_t *out_size)
{
struct vrend_resource *res = (struct vrend_resource *)pres;
@@ -10646,7 +11019,41 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres)
return 0;
}
-int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
+int vrend_renderer_create_ctx0_fence(uint32_t fence_id)
+{
+ void *fence_cookie = (void *)(uintptr_t)fence_id;
+ return vrend_renderer_create_fence(vrend_state.ctx0,
+ VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_cookie);
+}
+
+static bool find_ctx0_fence_locked(struct list_head *fence_list,
+ void *fence_cookie,
+ bool *seen_first,
+ struct vrend_fence **fence)
+{
+ struct vrend_fence *iter;
+
+ LIST_FOR_EACH_ENTRY(iter, fence_list, fences) {
+ /* only consider ctx0 fences */
+ if (iter->ctx != vrend_state.ctx0)
+ continue;
+
+ if (iter->fence_cookie == fence_cookie) {
+ *fence = iter;
+ return true;
+ }
+
+ if (!*seen_first) {
+ if (fence_cookie < iter->fence_cookie)
+ return true;
+ *seen_first = true;
+ }
+ }
+
+ return false;
+}
+
+int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd) {
#ifdef HAVE_EPOXY_EGL_H
if (!vrend_state.use_egl_fence) {
return -EINVAL;
@@ -10655,45 +11062,61 @@ int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
if (vrend_state.sync_thread)
pipe_mutex_lock(vrend_state.fence_mutex);
+ void *fence_cookie = (void *)(uintptr_t)fence_id;
+ bool seen_first = false;
struct vrend_fence *fence = NULL;
- struct vrend_fence *iter;
- uint32_t min_fence_id = UINT_MAX;
-
- if (!LIST_IS_EMPTY(&vrend_state.fence_list)) {
- min_fence_id = LIST_ENTRY(struct vrend_fence, vrend_state.fence_list.next, fences)->fence_id;
- } else if (!LIST_IS_EMPTY(&vrend_state.fence_wait_list)) {
- min_fence_id =
- LIST_ENTRY(struct vrend_fence, vrend_state.fence_wait_list.next, fences)->fence_id;
- }
-
- if (fence_id < min_fence_id) {
- if (vrend_state.sync_thread)
- pipe_mutex_unlock(vrend_state.fence_mutex);
- return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
- }
-
- LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_list, fences) {
- if (iter->fence_id == fence_id) {
- fence = iter;
- break;
- }
- }
-
- if (!fence) {
- LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_wait_list, fences) {
- if (iter->fence_id == fence_id) {
- fence = iter;
- break;
- }
- }
+ bool found = find_ctx0_fence_locked(&vrend_state.fence_list,
+ fence_cookie,
+ &seen_first,
+ &fence);
+ if (!found) {
+ found = find_ctx0_fence_locked(&vrend_state.fence_wait_list,
+ fence_cookie,
+ &seen_first,
+ &fence);
+ /* consider signaled when no active ctx0 fence at all */
+ if (!found && !seen_first)
+ found = true;
}
if (vrend_state.sync_thread)
pipe_mutex_unlock(vrend_state.fence_mutex);
- if (fence && virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd)) {
- return 0;
+ if (found) {
+ if (fence)
+ return virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd) ? 0 : -EINVAL;
+ else
+ return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
}
#endif
return -EINVAL;
}
+
+void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle)
+{
+ struct vrend_resource *res;
+ struct virgl_memory_info *info;
+
+ res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
+
+ info = (struct virgl_memory_info *)res->iov->iov_base;
+
+ if (has_feature(feat_nvx_gpu_memory_info)) {
+ int i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &i);
+ info->total_device_memory = i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &i);
+ info->total_staging_memory = i - info->total_device_memory;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX, &i);
+ info->nr_device_memory_evictions = i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX, &i);
+ info->device_memory_evicted = i;
+ }
+
+ if (has_feature(feat_ati_meminfo)) {
+ int i[4];
+ glGetIntegerv(GL_VBO_FREE_MEMORY_ATI, i);
+ info->avail_device_memory = i[0];
+ info->avail_staging_memory = i[2];
+ }
+}
diff --git a/src/vrend_renderer.h b/src/vrend_renderer.h
index da4ffe9c..297fc5c9 100644
--- a/src/vrend_renderer.h
+++ b/src/vrend_renderer.h
@@ -46,6 +46,7 @@ struct virgl_gl_ctx_param {
};
struct virgl_context;
+struct virgl_resource;
struct vrend_context;
/* Number of mipmap levels for which to keep the backing iov offsets.
@@ -70,11 +71,6 @@ struct vrend_resource {
GLuint id;
GLenum target;
- /* fb id if we need to readback this resource */
- GLuint readback_fb_id;
- GLuint readback_fb_level;
- GLuint readback_fb_z;
-
GLuint tbo_tex_id;/* tbos have two ids to track */
bool y_0_top;
@@ -111,8 +107,11 @@ struct vrend_format_table {
uint32_t flags;
};
+typedef void (*vrend_context_fence_retire)(void *fence_cookie,
+ void *retire_data);
+
struct vrend_if_cbs {
- void (*write_fence)(unsigned fence_id);
+ vrend_context_fence_retire ctx0_fence_retire;
virgl_gl_context (*create_gl_context)(int scanout, struct virgl_gl_ctx_param *params);
void (*destroy_gl_context)(virgl_gl_context ctx);
@@ -185,8 +184,22 @@ struct vrend_renderer_resource_create_args {
uint32_t flags;
};
+/* set the type info of an untyped blob resource */
+struct vrend_renderer_resource_set_type_args {
+ uint32_t format;
+ uint32_t bind;
+ uint32_t width;
+ uint32_t height;
+ uint32_t usage;
+ uint64_t modifier;
+ uint32_t plane_count;
+ uint32_t plane_strides[VIRGL_GBM_MAX_PLANES];
+ uint32_t plane_offsets[VIRGL_GBM_MAX_PLANES];
+};
+
struct pipe_resource *
-vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, void *image_eos);
+vrend_renderer_resource_create(const struct vrend_renderer_resource_create_args *args,
+ void *image_eos);
int vrend_create_surface(struct vrend_context *ctx,
uint32_t handle,
@@ -326,7 +339,6 @@ void vrend_set_min_samples(struct vrend_context *ctx, unsigned min_samples);
void vrend_set_constants(struct vrend_context *ctx,
uint32_t shader,
- uint32_t index,
uint32_t num_constant,
const float *data);
@@ -343,11 +355,18 @@ void vrend_set_tess_state(struct vrend_context *ctx, const float tess_factors[6]
void vrend_renderer_fini(void);
-int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id);
+void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
+ vrend_context_fence_retire retire,
+ void *retire_data);
+
+int vrend_renderer_create_fence(struct vrend_context *ctx,
+ uint32_t flags,
+ void *fence_cookie);
void vrend_renderer_check_fences(void);
-int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd);
+int vrend_renderer_create_ctx0_fence(uint32_t fence_id);
+int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd);
bool vrend_hw_switch_context(struct vrend_context *ctx, bool now);
uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
@@ -405,10 +424,9 @@ void vrend_renderer_get_rect(struct pipe_resource *pres,
int x, int y, int width, int height);
void vrend_renderer_attach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id,
- struct pipe_resource *pres);
+ struct virgl_resource *res);
void vrend_renderer_detach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id);
+ struct virgl_resource *res);
struct vrend_context_tweaks *vrend_get_context_tweaks(struct vrend_context *ctx);
@@ -487,14 +505,20 @@ void vrend_sync_make_current(virgl_gl_context);
int
vrend_renderer_pipe_resource_create(struct vrend_context *ctx, uint32_t blob_id,
- struct vrend_renderer_resource_create_args *args);
+ const struct vrend_renderer_resource_create_args *args);
struct pipe_resource *vrend_get_blob_pipe(struct vrend_context *ctx, uint64_t blob_id);
-int vrend_renderer_resource_get_map_info(struct pipe_resource *pres, uint32_t *map_info);
+int
+vrend_renderer_pipe_resource_set_type(struct vrend_context *ctx,
+ uint32_t res_id,
+ const struct vrend_renderer_resource_set_type_args *args);
+
+uint32_t vrend_renderer_resource_get_map_info(struct pipe_resource *pres);
int vrend_renderer_resource_map(struct pipe_resource *pres, void **map, uint64_t *out_size);
int vrend_renderer_resource_unmap(struct pipe_resource *pres);
+void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle);
#endif
diff --git a/src/vrend_winsys.c b/src/vrend_winsys.c
index 4e4bbc10..43f2e4e6 100644
--- a/src/vrend_winsys.c
+++ b/src/vrend_winsys.c
@@ -198,3 +198,12 @@ int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int
return -1;
#endif
}
+
+uint32_t vrend_winsys_query_video_memory(void)
+{
+#ifdef HAVE_EPOXY_GLX_H
+ return virgl_glx_query_video_memory(glx_info);
+#else
+ return 0;
+#endif
+} \ No newline at end of file
diff --git a/src/vrend_winsys.h b/src/vrend_winsys.h
index 24b1e5bb..5be90ea0 100644
--- a/src/vrend_winsys.h
+++ b/src/vrend_winsys.h
@@ -34,6 +34,10 @@
#include "virglrenderer.h"
+#ifndef DRM_FORMAT_MOD_INVALID
+#define DRM_FORMAT_MOD_INVALID 0x00ffffffffffffffULL
+#endif
+
struct virgl_gl_ctx_param;
#ifdef HAVE_EPOXY_EGL_H
@@ -54,4 +58,6 @@ int vrend_winsys_get_fourcc_for_texture(uint32_t tex_id, uint32_t format, int *f
int vrend_winsys_get_fd_for_texture(uint32_t tex_id, int *fd);
int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset);
+uint32_t vrend_winsys_query_video_memory(void);
+
#endif /* VREND_WINSYS_H */
diff --git a/src/vrend_winsys_egl.c b/src/vrend_winsys_egl.c
index 6a6e7f77..de116d5a 100644
--- a/src/vrend_winsys_egl.c
+++ b/src/vrend_winsys_egl.c
@@ -39,6 +39,7 @@
#include "util/u_memory.h"
#include "virglrenderer.h"
+#include "vrend_winsys.h"
#include "vrend_winsys_egl.h"
#include "virgl_hw.h"
#include "vrend_winsys_gbm.h"
@@ -405,121 +406,148 @@ bool virgl_has_egl_khr_gl_colorspace(struct virgl_egl *egl)
return has_bit(egl->extension_bits, EGL_KHR_GL_COLORSPACE);
}
+void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl,
+ uint32_t width,
+ uint32_t height,
+ uint32_t drm_format,
+ uint64_t drm_modifier,
+ uint32_t plane_count,
+ const int *plane_fds,
+ const uint32_t *plane_strides,
+ const uint32_t *plane_offsets)
+{
+ EGLint attrs[6 + VIRGL_GBM_MAX_PLANES * 10 + 1];
+ uint32_t count;
+
+ assert(VIRGL_GBM_MAX_PLANES <= 4);
+ assert(plane_count && plane_count <= VIRGL_GBM_MAX_PLANES);
+
+ count = 0;
+ attrs[count++] = EGL_WIDTH;
+ attrs[count++] = width;
+ attrs[count++] = EGL_HEIGHT;
+ attrs[count++] = height;
+ attrs[count++] = EGL_LINUX_DRM_FOURCC_EXT;
+ attrs[count++] = drm_format;
+ for (uint32_t i = 0; i < plane_count; i++) {
+ if (i < 3) {
+ attrs[count++] = EGL_DMA_BUF_PLANE0_FD_EXT + i * 3;
+ attrs[count++] = plane_fds[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE0_PITCH_EXT + i * 3;
+ attrs[count++] = plane_strides[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT + i * 3;
+ attrs[count++] = plane_offsets[i];
+ }
+
+ if (has_bit(egl->extension_bits, EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
+ if (i == 3) {
+ attrs[count++] = EGL_DMA_BUF_PLANE3_FD_EXT;
+ attrs[count++] = plane_fds[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
+ attrs[count++] = plane_strides[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
+ attrs[count++] = plane_offsets[i];
+ }
+
+ if (drm_modifier != DRM_FORMAT_MOD_INVALID) {
+ attrs[count++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT + i * 2;
+ attrs[count++] = (uint32_t)drm_modifier;
+ attrs[count++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT + i * 2;
+ attrs[count++] = (uint32_t)(drm_modifier >> 32);
+ }
+ }
+ }
+ attrs[count++] = EGL_NONE;
+ assert(count <= ARRAY_SIZE(attrs));
+
+ return (void *)eglCreateImageKHR(egl->egl_display,
+ EGL_NO_CONTEXT,
+ EGL_LINUX_DMA_BUF_EXT,
+ (EGLClientBuffer)NULL,
+ attrs);
+}
+
+void virgl_egl_image_destroy(struct virgl_egl *egl, void *image)
+{
+ eglDestroyImageKHR(egl->egl_display, image);
+}
+
#ifdef ENABLE_MINIGBM_ALLOCATION
-void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo)
+void *virgl_egl_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo)
{
int ret;
- EGLImageKHR image;
+ void *image = NULL;
int fds[VIRGL_GBM_MAX_PLANES] = {-1, -1, -1, -1};
+ uint32_t strides[VIRGL_GBM_MAX_PLANES];
+ uint32_t offsets[VIRGL_GBM_MAX_PLANES];
int num_planes = gbm_bo_get_plane_count(bo);
- // When the bo has 3 planes with modifier support, it requires 37 components.
- EGLint khr_image_attrs[37] = {
- EGL_WIDTH,
- gbm_bo_get_width(bo),
- EGL_HEIGHT,
- gbm_bo_get_height(bo),
- EGL_LINUX_DRM_FOURCC_EXT,
- (int)gbm_bo_get_format(bo),
- EGL_NONE,
- };
if (num_planes < 0 || num_planes > VIRGL_GBM_MAX_PLANES)
- return (void *)EGL_NO_IMAGE_KHR;
+ return NULL;
for (int plane = 0; plane < num_planes; plane++) {
uint32_t handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
ret = virgl_gbm_export_fd(egl->gbm->device, handle, &fds[plane]);
if (ret < 0) {
vrend_printf( "failed to export plane handle\n");
- image = (void *)EGL_NO_IMAGE_KHR;
goto out_close;
}
- }
- size_t attrs_index = 6;
- for (int plane = 0; plane < num_planes; plane++) {
- khr_image_attrs[attrs_index++] = EGL_DMA_BUF_PLANE0_FD_EXT + plane * 3;
- khr_image_attrs[attrs_index++] = fds[plane];
- khr_image_attrs[attrs_index++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT + plane * 3;
- khr_image_attrs[attrs_index++] = gbm_bo_get_offset(bo, plane);
- khr_image_attrs[attrs_index++] = EGL_DMA_BUF_PLANE0_PITCH_EXT + plane * 3;
- khr_image_attrs[attrs_index++] = gbm_bo_get_stride_for_plane(bo, plane);
- if (has_bit(egl->extension_bits, EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
- const uint64_t modifier = gbm_bo_get_modifier(bo);
- khr_image_attrs[attrs_index++] =
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT + plane * 2;
- khr_image_attrs[attrs_index++] = modifier & 0xfffffffful;
- khr_image_attrs[attrs_index++] =
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT + plane * 2;
- khr_image_attrs[attrs_index++] = modifier >> 32;
- }
+ strides[plane] = gbm_bo_get_stride_for_plane(bo, plane);
+ offsets[plane] = gbm_bo_get_offset(bo, plane);
}
- khr_image_attrs[attrs_index++] = EGL_NONE;
- image = eglCreateImageKHR(egl->egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL,
- khr_image_attrs);
+ image = virgl_egl_image_from_dmabuf(egl,
+ gbm_bo_get_width(bo),
+ gbm_bo_get_height(bo),
+ gbm_bo_get_format(bo),
+ gbm_bo_get_modifier(bo),
+ num_planes,
+ fds,
+ strides,
+ offsets);
out_close:
for (int plane = 0; plane < num_planes; plane++)
close(fds[plane]);
- return (void*)image;
+ return image;
}
-void *virgl_egl_aux_plane_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo, int plane)
+void *virgl_egl_aux_plane_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo, int plane)
{
int ret;
- EGLImageKHR image = EGL_NO_IMAGE_KHR;
+ void *image = NULL;
int fd = -1;
int bytes_per_pixel = virgl_gbm_get_plane_bytes_per_pixel(bo, plane);
if (bytes_per_pixel != 1 && bytes_per_pixel != 2)
- return (void *)EGL_NO_IMAGE_KHR;
+ return NULL;
uint32_t handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
ret = drmPrimeHandleToFD(gbm_device_get_fd(egl->gbm->device), handle, DRM_CLOEXEC, &fd);
if (ret < 0) {
vrend_printf("failed to export plane handle %d\n", errno);
- return (void *)EGL_NO_IMAGE_KHR;
- }
-
- EGLint khr_image_attrs[17] = {
- EGL_WIDTH,
- virgl_gbm_get_plane_width(bo, plane),
- EGL_HEIGHT,
- virgl_gbm_get_plane_height(bo, plane),
- EGL_LINUX_DRM_FOURCC_EXT,
- (int) (bytes_per_pixel == 1 ? GBM_FORMAT_R8 : GBM_FORMAT_GR88),
- EGL_DMA_BUF_PLANE0_FD_EXT,
- fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT,
- gbm_bo_get_offset(bo, plane),
- EGL_DMA_BUF_PLANE0_PITCH_EXT,
- gbm_bo_get_stride_for_plane(bo, plane),
- };
-
- if (has_bit(egl->extension_bits, EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
- const uint64_t modifier = gbm_bo_get_modifier(bo);
- khr_image_attrs[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
- khr_image_attrs[13] = modifier & 0xfffffffful;
- khr_image_attrs[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
- khr_image_attrs[15] = modifier >> 32;
- khr_image_attrs[16] = EGL_NONE;
- } else {
- khr_image_attrs[12] = EGL_NONE;
+ return NULL;
}
- image = eglCreateImageKHR(egl->egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, khr_image_attrs);
-
+ const uint32_t format = bytes_per_pixel == 1 ? GBM_FORMAT_R8 : GBM_FORMAT_GR88;
+ const uint32_t stride = gbm_bo_get_stride_for_plane(bo, plane);
+ const uint32_t offset = gbm_bo_get_offset(bo, plane);
+ image = virgl_egl_image_from_dmabuf(egl,
+ virgl_gbm_get_plane_width(bo, plane),
+ virgl_gbm_get_plane_height(bo, plane),
+ format,
+ gbm_bo_get_modifier(bo),
+ 1,
+ &fd,
+ &stride,
+ &offset);
close(fd);
- return (void*)image;
-}
-void virgl_egl_image_destroy(struct virgl_egl *egl, void *image)
-{
- eglDestroyImageKHR(egl->egl_display, image);
+ return image;
}
-#endif
+#endif /* ENABLE_MINIGBM_ALLOCATION */
bool virgl_egl_supports_fences(struct virgl_egl *egl)
{
diff --git a/src/vrend_winsys_egl.h b/src/vrend_winsys_egl.h
index 1fb0ccbc..b4c9b21e 100644
--- a/src/vrend_winsys_egl.h
+++ b/src/vrend_winsys_egl.h
@@ -56,10 +56,20 @@ int virgl_egl_get_fd_for_texture(struct virgl_egl *egl, uint32_t tex_id, int *fd
int virgl_egl_get_fd_for_texture2(struct virgl_egl *egl, uint32_t tex_id, int *fd, int *stride,
int *offset);
-void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo);
-void *virgl_egl_aux_plane_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo, int plane);
+void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl,
+ uint32_t width,
+ uint32_t height,
+ uint32_t drm_format,
+ uint64_t drm_modifier,
+ uint32_t plane_count,
+ const int *plane_fds,
+ const uint32_t *plane_strides,
+ const uint32_t *plane_offsets);
void virgl_egl_image_destroy(struct virgl_egl *egl, void *image);
+void *virgl_egl_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo);
+void *virgl_egl_aux_plane_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo, int plane);
+
bool virgl_egl_supports_fences(struct virgl_egl *egl);
EGLSyncKHR virgl_egl_fence_create(struct virgl_egl *egl);
void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence);
diff --git a/src/vrend_winsys_gbm.c b/src/vrend_winsys_gbm.c
index 578086d4..65197b4a 100644
--- a/src/vrend_winsys_gbm.c
+++ b/src/vrend_winsys_gbm.c
@@ -38,6 +38,7 @@
#include "util/u_memory.h"
#include "pipe/p_state.h"
+#include "vrend_winsys.h"
#include "vrend_winsys_gbm.h"
#include "virgl_hw.h"
#include "vrend_debug.h"
@@ -461,7 +462,7 @@ int virgl_gbm_export_query(struct gbm_bo *bo, struct virgl_renderer_export_query
query->out_num_fds = 0;
query->out_fourcc = 0;
- query->out_modifier = 0;
+ query->out_modifier = DRM_FORMAT_MOD_INVALID;
for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
query->out_fds[plane] = -1;
query->out_strides[plane] = 0;
diff --git a/src/vrend_winsys_glx.c b/src/vrend_winsys_glx.c
index 23bb9834..5b907ad6 100644
--- a/src/vrend_winsys_glx.c
+++ b/src/vrend_winsys_glx.c
@@ -102,3 +102,15 @@ int virgl_glx_make_context_current(struct virgl_glx *d, virgl_renderer_gl_contex
{
return glXMakeContextCurrent(d->display, d->pbuffer, d->pbuffer, virglctx);
}
+
+uint32_t virgl_glx_query_video_memory(struct virgl_glx *d)
+{
+ uint32_t video_memory = 0;
+ if (d) {
+ if (epoxy_has_glx_extension(d->display, DefaultScreen(d->display), "GLX_MESA_query_renderer")) {
+ glXQueryCurrentRendererIntegerMESA(GLX_RENDERER_VIDEO_MEMORY_MESA, &video_memory);
+ }
+ }
+
+ return video_memory;
+} \ No newline at end of file
diff --git a/src/vrend_winsys_glx.h b/src/vrend_winsys_glx.h
index e5cecbac..e8f7697a 100644
--- a/src/vrend_winsys_glx.h
+++ b/src/vrend_winsys_glx.h
@@ -33,5 +33,6 @@ void virgl_glx_destroy(struct virgl_glx *ve);
virgl_renderer_gl_context virgl_glx_create_context(struct virgl_glx *ve, struct virgl_gl_ctx_param *vparams);
void virgl_glx_destroy_context(struct virgl_glx *ve, virgl_renderer_gl_context virglctx);
int virgl_glx_make_context_current(struct virgl_glx *ve, virgl_renderer_gl_context virglctx);
+uint32_t virgl_glx_query_video_memory(struct virgl_glx *ve);
#endif