aboutsummaryrefslogtreecommitdiff
path: root/src/vrend_renderer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vrend_renderer.c')
-rw-r--r--src/vrend_renderer.c2113
1 files changed, 1268 insertions, 845 deletions
diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
index 93b8ca2a..15474d23 100644
--- a/src/vrend_renderer.c
+++ b/src/vrend_renderer.c
@@ -55,9 +55,14 @@
#include "virgl_resource.h"
#include "virglrenderer.h"
#include "virglrenderer_hw.h"
+#include "virgl_protocol.h"
#include "tgsi/tgsi_text.h"
+#ifdef HAVE_EPOXY_GLX_H
+#include <epoxy/glx.h>
+#endif
+
/*
* VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both
* virgl_caps_v1 and are exactly the same.
@@ -76,8 +81,14 @@ static const uint32_t fake_occlusion_query_samples_passed_default = 1024;
const struct vrend_if_cbs *vrend_clicbs;
struct vrend_fence {
- uint32_t fence_id;
- uint32_t ctx_id;
+ /* When the sync thread is waiting on the fence and the main thread
+ * destroys the context, ctx is set to NULL. Otherwise, ctx is always
+ * valid.
+ */
+ struct vrend_context *ctx;
+ uint32_t flags;
+ void *fence_cookie;
+
union {
GLsync glsyncobj;
#ifdef HAVE_EPOXY_EGL_H
@@ -110,6 +121,7 @@ enum features_id
feat_arb_robustness,
feat_arb_buffer_storage,
feat_arrays_of_arrays,
+ feat_ati_meminfo,
feat_atomic_counters,
feat_base_instance,
feat_barrier,
@@ -154,6 +166,7 @@ enum features_id
feat_multi_draw_indirect,
feat_nv_conditional_render,
feat_nv_prim_restart,
+ feat_nvx_gpu_memory_info,
feat_polygon_offset_clamp,
feat_occlusion_query,
feat_occlusion_query_boolean,
@@ -162,6 +175,7 @@ enum features_id
feat_sample_mask,
feat_sample_shading,
feat_samplers,
+ feat_sampler_border_colors,
feat_shader_clock,
feat_separate_shader_objects,
feat_ssbo,
@@ -206,6 +220,7 @@ static const struct {
FEAT(arb_robustness, UNAVAIL, UNAVAIL, "GL_ARB_robustness" ),
FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage", "GL_EXT_buffer_storage"),
FEAT(arrays_of_arrays, 43, 31, "GL_ARB_arrays_of_arrays"),
+ FEAT(ati_meminfo, UNAVAIL, UNAVAIL, "GL_ATI_meminfo" ),
FEAT(atomic_counters, 42, 31, "GL_ARB_shader_atomic_counters" ),
FEAT(base_instance, 42, UNAVAIL, "GL_ARB_base_instance", "GL_EXT_base_instance" ),
FEAT(barrier, 42, 31, NULL),
@@ -250,6 +265,7 @@ static const struct {
FEAT(multi_draw_indirect, 43, UNAVAIL, "GL_ARB_multi_draw_indirect", "GL_EXT_multi_draw_indirect" ),
FEAT(nv_conditional_render, UNAVAIL, UNAVAIL, "GL_NV_conditional_render" ),
FEAT(nv_prim_restart, UNAVAIL, UNAVAIL, "GL_NV_primitive_restart" ),
+ FEAT(nvx_gpu_memory_info, UNAVAIL, UNAVAIL, "GL_NVX_gpu_memory_info" ),
FEAT(polygon_offset_clamp, 46, UNAVAIL, "GL_ARB_polygon_offset_clamp", "GL_EXT_polygon_offset_clamp"),
FEAT(occlusion_query, 15, UNAVAIL, "GL_ARB_occlusion_query"),
FEAT(occlusion_query_boolean, 33, 30, "GL_EXT_occlusion_query_boolean", "GL_ARB_occlusion_query2"),
@@ -258,6 +274,7 @@ static const struct {
FEAT(sample_mask, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(sample_shading, 40, 32, "GL_ARB_sample_shading", "GL_OES_sample_shading" ),
FEAT(samplers, 33, 30, "GL_ARB_sampler_objects" ),
+ FEAT(sampler_border_colors, 33, 32, "GL_ARB_sampler_objects", "GL_EXT_texture_border_clamp", "GL_OES_texture_border_clamp" ),
FEAT(separate_shader_objects, 41, 31, "GL_ARB_seperate_shader_objects"),
FEAT(shader_clock, UNAVAIL, UNAVAIL, "GL_ARB_shader_clock" ),
FEAT(ssbo, 43, 31, "GL_ARB_shader_storage_buffer_object" ),
@@ -270,7 +287,7 @@ static const struct {
FEAT(texture_barrier, 45, UNAVAIL, "GL_ARB_texture_barrier" ),
FEAT(texture_buffer_range, 43, 32, "GL_ARB_texture_buffer_range" ),
FEAT(texture_gather, 40, 31, "GL_ARB_texture_gather" ),
- FEAT(texture_multisample, 32, 30, "GL_ARB_texture_multisample" ),
+ FEAT(texture_multisample, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(texture_query_lod, 40, UNAVAIL, "GL_ARB_texture_query_lod", "GL_EXT_texture_query_lod"),
FEAT(texture_srgb_decode, UNAVAIL, UNAVAIL, "GL_EXT_texture_sRGB_decode" ),
FEAT(texture_storage, 42, 30, "GL_ARB_texture_storage" ),
@@ -316,8 +333,12 @@ struct global_renderer_state {
int eventfd;
pipe_mutex fence_mutex;
+ /* a fence is always on either of the lists, or is pointed to by
+ * fence_waiting
+ */
struct list_head fence_list;
struct list_head fence_wait_list;
+ struct vrend_fence *fence_waiting;
pipe_condvar fence_cond;
struct vrend_context *ctx0;
@@ -355,6 +376,7 @@ struct vrend_linked_shader_program {
bool dual_src_linked;
struct vrend_shader *ss[PIPE_SHADER_TYPES];
+ uint64_t vs_fs_key;
uint32_t ubo_used_mask[PIPE_SHADER_TYPES];
uint32_t samplers_used_mask[PIPE_SHADER_TYPES];
@@ -415,10 +437,7 @@ struct vrend_shader_selector {
struct vrend_texture {
struct vrend_resource base;
struct pipe_sampler_state state;
- GLenum cur_swizzle_r;
- GLenum cur_swizzle_g;
- GLenum cur_swizzle_b;
- GLenum cur_swizzle_a;
+ GLint cur_swizzle[4];
GLuint cur_srgb_decode;
GLuint cur_base, cur_max;
};
@@ -452,10 +471,7 @@ struct vrend_sampler_view {
enum virgl_formats format;
GLenum target;
GLuint val0, val1;
- GLuint gl_swizzle_r;
- GLuint gl_swizzle_g;
- GLuint gl_swizzle_b;
- GLuint gl_swizzle_a;
+ GLint gl_swizzle[4];
GLenum depth_texture_mode;
GLuint srgb_decode;
struct vrend_resource *texture;
@@ -540,6 +556,14 @@ struct vrend_streamout_object {
#define XFB_STATE_STARTED 2
#define XFB_STATE_PAUSED 3
+struct vrend_vertex_buffer {
+ struct pipe_vertex_buffer base;
+ uint32_t res_id;
+};
+
+#define VREND_PROGRAM_NQUEUES (1 << 8)
+#define VREND_PROGRAM_NQUEUE_MASK (VREND_PROGRAM_NQUEUES - 1)
+
struct vrend_sub_context {
struct list_head head;
@@ -550,14 +574,18 @@ struct vrend_sub_context {
GLuint vaoid;
uint32_t enabled_attribs_bitmask;
- struct list_head programs;
+ /* Using an array of lists only adds VREND_PROGRAM_NQUEUES - 1 list_head
+ * structures to the consumed memory, but looking up the program can
+ * be spead up by the factor VREND_PROGRAM_NQUEUES which makes this
+ * worthwile. */
+ struct list_head gl_programs[VREND_PROGRAM_NQUEUES];
+ struct list_head cs_programs;
struct util_hash_table *object_hash;
struct vrend_vertex_element_array *ve;
int num_vbos;
int old_num_vbos; /* for cleaning up */
- struct pipe_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
- uint32_t vbo_res_ids[PIPE_MAX_ATTRIBS];
+ struct vrend_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer ib;
uint32_t index_buffer_res_id;
@@ -573,7 +601,7 @@ struct vrend_sub_context {
struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES];
struct vrend_linked_shader_program *prog;
- int prog_ids[PIPE_SHADER_TYPES];
+ GLuint prog_ids[PIPE_SHADER_TYPES];
struct vrend_shader_view views[PIPE_SHADER_TYPES];
struct vrend_constants consts[PIPE_SHADER_TYPES];
@@ -655,6 +683,13 @@ struct vrend_sub_context {
int fake_occlusion_query_samples_passed_multiplier;
int prim_mode;
+ bool drawing;
+ struct vrend_context *parent;
+};
+
+struct vrend_untyped_resource {
+ struct virgl_resource *resource;
+ struct list_head head;
};
struct vrend_context {
@@ -671,7 +706,6 @@ struct vrend_context {
bool in_error;
bool ctx_switch_pending;
bool pstip_inited;
- bool drawing;
GLuint pstipple_tex_id;
@@ -680,31 +714,55 @@ struct vrend_context {
/* resource bounds to this context */
struct util_hash_table *res_hash;
+ /*
+ * vrend_context only works with typed virgl_resources. More specifically,
+ * it works with vrend_resources that are inherited from pipe_resources
+ * wrapped in virgl_resources.
+ *
+ * Normally, a vrend_resource is created first by
+ * vrend_renderer_resource_create. It is then wrapped in a virgl_resource
+ * by virgl_resource_create_from_pipe. Depending on whether it is a blob
+ * resource or not, the two functions can be called from different paths.
+ * But we always get both a virgl_resource and a vrend_resource as a
+ * result.
+ *
+ * It is however possible that we encounter untyped virgl_resources that
+ * have no pipe_resources. To work with untyped virgl_resources, we park
+ * them in untyped_resources first when they are attached. We move them
+ * into res_hash only after we get the type information and create the
+ * vrend_resources in vrend_decode_pipe_resource_set_type.
+ */
+ struct list_head untyped_resources;
+ struct virgl_resource *untyped_resource_cache;
+
struct list_head active_nontimer_query_list;
struct vrend_shader_cfg shader_cfg;
unsigned debug_flags;
+
+ vrend_context_fence_retire fence_retire;
+ void *fence_retire_data;
};
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle);
static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause);
-static void vrend_update_viewport_state(struct vrend_context *ctx);
-static void vrend_update_scissor_state(struct vrend_context *ctx);
+static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx);
+static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx);
static void vrend_destroy_query_object(void *obj_ptr);
static void vrend_finish_context_switch(struct vrend_context *ctx);
-static void vrend_patch_blend_state(struct vrend_context *ctx);
-static void vrend_update_frontface_state(struct vrend_context *ctx);
+static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx);
+static void vrend_update_frontface_state(struct vrend_sub_context *ctx);
static void vrender_get_glsl_version(int *glsl_version);
static void vrend_destroy_program(struct vrend_linked_shader_program *ent);
-static void vrend_apply_sampler_state(struct vrend_context *ctx,
+static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id, int sampler_id,
struct vrend_sampler_view *tview);
static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples);
-void vrend_update_stencil_state(struct vrend_context *ctx);
+void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx);
static struct vrend_format_table tex_conv_table[VIRGL_FORMAT_MAX_EXTENDED];
@@ -1053,7 +1111,7 @@ static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
free(sel);
}
-static bool vrend_compile_shader(struct vrend_context *ctx,
+static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx,
struct vrend_shader *shader)
{
GLint param;
@@ -1068,7 +1126,7 @@ static bool vrend_compile_shader(struct vrend_context *ctx,
char infolog[65536];
int len;
glGetShaderInfoLog(shader->id, 65536, &len, infolog);
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_printf("shader failed to compile\n%s\n", infolog);
vrend_shader_dump(shader);
return false;
@@ -1151,11 +1209,11 @@ static bool vrend_is_timer_query(GLenum gltype)
gltype == GL_TIME_ELAPSED;
}
-static void vrend_use_program(struct vrend_context *ctx, GLuint program_id)
+static void vrend_use_program(struct vrend_sub_context *sub_ctx, GLuint program_id)
{
- if (ctx->sub->program_id != program_id) {
+ if (sub_ctx->program_id != program_id) {
glUseProgram(program_id);
- ctx->sub->program_id = program_id;
+ sub_ctx->program_id = program_id;
}
}
@@ -1198,10 +1256,10 @@ static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_e
}
}
-static void vrend_stencil_test_enable(struct vrend_context *ctx, bool stencil_test_enable)
+static void vrend_stencil_test_enable(struct vrend_sub_context *sub_ctx, bool stencil_test_enable)
{
- if (ctx->sub->stencil_test_enabled != stencil_test_enable) {
- ctx->sub->stencil_test_enabled = stencil_test_enable;
+ if (sub_ctx->stencil_test_enabled != stencil_test_enable) {
+ sub_ctx->stencil_test_enabled = stencil_test_enable;
if (stencil_test_enable)
glEnable(GL_STENCIL_TEST);
else
@@ -1257,7 +1315,7 @@ static char *get_skip_str(int *skip_val)
return start_skip;
}
-static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_context *ctx,
+static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_sub_context *sub_ctx,
int prog_id,
struct vrend_shader_info *sinfo)
{
@@ -1272,7 +1330,7 @@ static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_context *ctx,
if (!so->num_outputs)
return;
- VREND_DEBUG_EXT(dbg_shader_streamout, ctx, dump_stream_out(so));
+ VREND_DEBUG_EXT(dbg_shader_streamout, sub_ctx->parent, dump_stream_out(so));
for (i = 0; i < so->num_outputs; i++) {
if (last_buffer != so->output[i].output_buffer) {
@@ -1496,9 +1554,9 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs);
sprog->id = prog_id;
- list_addtail(&sprog->head, &ctx->sub->programs);
+ list_addtail(&sprog->head, &ctx->sub->cs_programs);
- vrend_use_program(ctx, prog_id);
+ vrend_use_program(ctx->sub, prog_id);
bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE, 0);
bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE, 0);
@@ -1508,7 +1566,7 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
return sprog;
}
-static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
+static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_context *sub_ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs,
@@ -1535,20 +1593,20 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
if (gs) {
if (gs->id > 0)
glAttachShader(prog_id, gs->id);
- set_stream_out_varyings(ctx, prog_id, &gs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &gs->sel->sinfo);
} else if (tes)
- set_stream_out_varyings(ctx, prog_id, &tes->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &tes->sel->sinfo);
else
- set_stream_out_varyings(ctx, prog_id, &vs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &vs->sel->sinfo);
glAttachShader(prog_id, fs->id);
if (fs->sel->sinfo.num_outputs > 1) {
- if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
+ if (util_blend_state_is_dual(&sub_ctx->blend_state, 0)) {
if (has_feature(feat_dual_src_blend)) {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
} else {
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
}
sprog->dual_src_linked = true;
} else {
@@ -1579,7 +1637,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
glGetProgramInfoLog(prog_id, 65536, &len, infolog);
vrend_printf("got error linking\n%s\n", infolog);
/* dump shaders */
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_shader_dump(vs);
if (gs)
vrend_shader_dump(gs);
@@ -1591,6 +1649,9 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
+ sprog->vs_fs_key = (((uint64_t)fs->id) << 32) | (vs->id & ~VREND_PROGRAM_NQUEUE_MASK) |
+ (sprog->dual_src_linked ? 1 : 0);
+
sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
sprog->ss[PIPE_SHADER_TESS_CTRL] = tcs;
sprog->ss[PIPE_SHADER_TESS_EVAL] = tes;
@@ -1607,7 +1668,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
sprog->id = prog_id;
- list_addtail(&sprog->head, &ctx->sub->programs);
+ list_addtail(&sprog->head, &sub_ctx->gl_programs[vs->id & VREND_PROGRAM_NQUEUE_MASK]);
if (fs->key.pstipple_tex)
sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
@@ -1619,7 +1680,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
sprog->fs_alpha_ref_val_loc = -1;
sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust_y");
- vrend_use_program(ctx, prog_id);
+ vrend_use_program(sub_ctx, prog_id);
int next_ubo_id = 0, next_sampler_id = 0;
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
@@ -1659,16 +1720,17 @@ static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend
GLuint cs_id)
{
struct vrend_linked_shader_program *ent;
- LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
- if (!ent->ss[PIPE_SHADER_COMPUTE])
- continue;
- if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id)
+ LIST_FOR_EACH_ENTRY(ent, &ctx->sub->cs_programs, head) {
+ if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id) {
+ list_del(&ent->head);
+ list_add(&ent->head, &ctx->sub->cs_programs);
return ent;
+ }
}
return NULL;
}
-static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_context *ctx,
+static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_sub_context *sub_ctx,
GLuint vs_id,
GLuint fs_id,
GLuint gs_id,
@@ -1676,16 +1738,15 @@ static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_co
GLuint tes_id,
bool dual_src)
{
+ uint64_t vs_fs_key = (((uint64_t)fs_id) << 32) | (vs_id & ~VREND_PROGRAM_NQUEUE_MASK) |
+ (dual_src ? 1 : 0);
+
struct vrend_linked_shader_program *ent;
- LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
- if (ent->dual_src_linked != dual_src)
- continue;
- if (ent->ss[PIPE_SHADER_COMPUTE])
+
+ struct list_head *programs = &sub_ctx->gl_programs[vs_id & VREND_PROGRAM_NQUEUE_MASK];
+ LIST_FOR_EACH_ENTRY(ent, programs, head) {
+ if (likely(ent->vs_fs_key != vs_fs_key))
continue;
- if (ent->ss[PIPE_SHADER_VERTEX]->id != vs_id)
- continue;
- if (ent->ss[PIPE_SHADER_FRAGMENT]->id != fs_id)
- continue;
if (ent->ss[PIPE_SHADER_GEOMETRY] &&
ent->ss[PIPE_SHADER_GEOMETRY]->id != gs_id)
continue;
@@ -1695,8 +1756,14 @@ static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_co
if (ent->ss[PIPE_SHADER_TESS_EVAL] &&
ent->ss[PIPE_SHADER_TESS_EVAL]->id != tes_id)
continue;
+ /* put the entry in front */
+ if (programs->next != &ent->head) {
+ list_del(&ent->head);
+ list_add(&ent->head, programs);
+ }
return ent;
}
+
return NULL;
}
@@ -1725,11 +1792,16 @@ static void vrend_free_programs(struct vrend_sub_context *sub)
{
struct vrend_linked_shader_program *ent, *tmp;
- if (LIST_IS_EMPTY(&sub->programs))
- return;
+ if (!LIST_IS_EMPTY(&sub->cs_programs)) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->cs_programs, head)
+ vrend_destroy_program(ent);
+ }
- LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
- vrend_destroy_program(ent);
+ for (unsigned i = 0; i < VREND_PROGRAM_NQUEUES; ++i) {
+ if (!LIST_IS_EMPTY(&sub->gl_programs[i])) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->gl_programs[i], head)
+ vrend_destroy_program(ent);
+ }
}
}
@@ -1944,6 +2016,16 @@ static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_fi
return 0;
}
+static void apply_sampler_border_color(GLuint sampler,
+ const GLuint colors[static 4])
+{
+ if (has_feature(feat_sampler_border_colors)) {
+ glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, colors);
+ } else if (colors[0] || colors[1] || colors[2] || colors[3]) {
+ vrend_printf("sampler border color setting requested but not supported\n");
+ }
+}
+
int vrend_create_sampler_state(struct vrend_context *ctx,
uint32_t handle,
struct pipe_sampler_state *templ)
@@ -1984,7 +2066,7 @@ int vrend_create_sampler_state(struct vrend_context *ctx,
}
- glSamplerParameterIuiv(state->ids[i], GL_TEXTURE_BORDER_COLOR, templ->border_color.ui);
+ apply_sampler_border_color(state->ids[i], templ->border_color.ui);
glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT, i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT);
}
}
@@ -2105,10 +2187,8 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]];
}
- view->gl_swizzle_r = to_gl_swizzle(swizzle[0]);
- view->gl_swizzle_g = to_gl_swizzle(swizzle[1]);
- view->gl_swizzle_b = to_gl_swizzle(swizzle[2]);
- view->gl_swizzle_a = to_gl_swizzle(swizzle[3]);
+ for (unsigned i = 0; i < 4; ++i)
+ view->gl_swizzle[i] = to_gl_swizzle(swizzle[i]);
if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
enum virgl_formats format;
@@ -2172,10 +2252,12 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
glTexParameteri(view->target, GL_TEXTURE_BASE_LEVEL, base_level);
glTexParameteri(view->target, GL_TEXTURE_MAX_LEVEL, max_level);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]);
+ }
+ } else
+ glTexParameteriv(view->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle);
if (util_format_is_srgb(view->format) &&
has_feature(feat_texture_srgb_decode)) {
glTexParameteri(view->target, GL_TEXTURE_SRGB_DECODE_EXT,
@@ -2330,9 +2412,9 @@ static void vrend_hw_set_zsurf_texture(struct vrend_context *ctx)
}
}
-static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
+static void vrend_hw_set_color_surface(struct vrend_sub_context *sub_ctx, int index)
{
- struct vrend_surface *surf = ctx->sub->surf[index];
+ struct vrend_surface *surf = sub_ctx->surf[index];
if (!surf) {
GLenum attachment = GL_COLOR_ATTACHMENT0 + index;
@@ -2340,15 +2422,15 @@ static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
glFramebufferTexture2D(GL_FRAMEBUFFER, attachment,
GL_TEXTURE_2D, 0, 0);
} else {
- uint32_t first_layer = ctx->sub->surf[index]->val1 & 0xffff;
- uint32_t last_layer = (ctx->sub->surf[index]->val1 >> 16) & 0xffff;
+ uint32_t first_layer = sub_ctx->surf[index]->val1 & 0xffff;
+ uint32_t last_layer = (sub_ctx->surf[index]->val1 >> 16) & 0xffff;
vrend_fb_bind_texture_id(surf->texture, surf->id, index, surf->val0,
first_layer != last_layer ? 0xffffffff : first_layer);
}
}
-static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
+static void vrend_hw_emit_framebuffer_state(struct vrend_sub_context *sub_ctx)
{
static const GLenum buffers[8] = {
GL_COLOR_ATTACHMENT0,
@@ -2361,19 +2443,19 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
GL_COLOR_ATTACHMENT7,
};
- if (ctx->sub->nr_cbufs == 0) {
+ if (sub_ctx->nr_cbufs == 0) {
glReadBuffer(GL_NONE);
if (has_feature(feat_srgb_write_control)) {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
- ctx->sub->framebuffer_srgb_enabled = false;
+ sub_ctx->framebuffer_srgb_enabled = false;
}
} else if (has_feature(feat_srgb_write_control)) {
struct vrend_surface *surf = NULL;
bool use_srgb = false;
int i;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- surf = ctx->sub->surf[i];
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ surf = sub_ctx->surf[i];
if (util_format_is_srgb(surf->format)) {
use_srgb = true;
}
@@ -2384,25 +2466,25 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
} else {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
}
- ctx->sub->framebuffer_srgb_enabled = use_srgb;
+ sub_ctx->framebuffer_srgb_enabled = use_srgb;
}
if (vrend_state.use_gles &&
- vrend_get_tweak_is_active(&ctx->sub->tweaks, virgl_tweak_gles_brga_apply_dest_swizzle)) {
- ctx->sub->swizzle_output_rgb_to_bgr = 0;
- for (int i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- struct vrend_surface *surf = ctx->sub->surf[i];
+ vrend_get_tweak_is_active(&sub_ctx->tweaks, virgl_tweak_gles_brga_apply_dest_swizzle)) {
+ sub_ctx->swizzle_output_rgb_to_bgr = 0;
+ for (int i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ struct vrend_surface *surf = sub_ctx->surf[i];
if (surf->texture->base.bind & VIRGL_BIND_PREFER_EMULATED_BGRA) {
- VREND_DEBUG(dbg_tweak, ctx, "Swizzled BGRA output for 0x%x (%s)\n", i, util_format_name(surf->format));
- ctx->sub->swizzle_output_rgb_to_bgr |= 1 << i;
+ VREND_DEBUG(dbg_tweak, sub_ctx->parent, "Swizzled BGRA output for 0x%x (%s)\n", i, util_format_name(surf->format));
+ sub_ctx->swizzle_output_rgb_to_bgr |= 1 << i;
}
}
}
}
- glDrawBuffers(ctx->sub->nr_cbufs, buffers);
+ glDrawBuffers(sub_ctx->nr_cbufs, buffers);
}
void vrend_set_framebuffer_state(struct vrend_context *ctx,
@@ -2416,10 +2498,12 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
GLint new_height = -1;
bool new_ibf = false;
- glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->fb_id);
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
+ glBindFramebuffer(GL_FRAMEBUFFER, sub_ctx->fb_id);
if (zsurf_handle) {
- zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
+ zsurf = vrend_object_lookup(sub_ctx->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
@@ -2427,18 +2511,18 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
} else
zsurf = NULL;
- if (ctx->sub->zsurf != zsurf) {
- vrend_surface_reference(&ctx->sub->zsurf, zsurf);
+ if (sub_ctx->zsurf != zsurf) {
+ vrend_surface_reference(&sub_ctx->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
- old_num = ctx->sub->nr_cbufs;
- ctx->sub->nr_cbufs = nr_cbufs;
- ctx->sub->old_nr_cbufs = old_num;
+ old_num = sub_ctx->nr_cbufs;
+ sub_ctx->nr_cbufs = nr_cbufs;
+ sub_ctx->old_nr_cbufs = old_num;
for (i = 0; i < (int)nr_cbufs; i++) {
if (surf_handle[i] != 0) {
- surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
+ surf = vrend_object_lookup(sub_ctx->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
@@ -2446,32 +2530,32 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
} else
surf = NULL;
- if (ctx->sub->surf[i] != surf) {
- vrend_surface_reference(&ctx->sub->surf[i], surf);
- vrend_hw_set_color_surface(ctx, i);
+ if (sub_ctx->surf[i] != surf) {
+ vrend_surface_reference(&sub_ctx->surf[i], surf);
+ vrend_hw_set_color_surface(sub_ctx, i);
}
}
- if (old_num > ctx->sub->nr_cbufs) {
- for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
- vrend_surface_reference(&ctx->sub->surf[i], NULL);
- vrend_hw_set_color_surface(ctx, i);
+ if (old_num > sub_ctx->nr_cbufs) {
+ for (i = sub_ctx->nr_cbufs; i < old_num; i++) {
+ vrend_surface_reference(&sub_ctx->surf[i], NULL);
+ vrend_hw_set_color_surface(sub_ctx, i);
}
}
/* find a buffer to set fb_height from */
- if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
+ if (sub_ctx->nr_cbufs == 0 && !sub_ctx->zsurf) {
new_height = 0;
new_ibf = false;
- } else if (ctx->sub->nr_cbufs == 0) {
- new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
- new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
+ } else if (sub_ctx->nr_cbufs == 0) {
+ new_height = u_minify(sub_ctx->zsurf->texture->base.height0, sub_ctx->zsurf->val0);
+ new_ibf = sub_ctx->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- surf = ctx->sub->surf[i];
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ surf = sub_ctx->surf[i];
break;
}
}
@@ -2484,23 +2568,23 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
}
if (new_height != -1) {
- if (ctx->sub->fb_height != (uint32_t)new_height || ctx->sub->inverted_fbo_content != new_ibf) {
- ctx->sub->fb_height = new_height;
- ctx->sub->inverted_fbo_content = new_ibf;
- ctx->sub->viewport_state_dirty = (1 << 0);
+ if (sub_ctx->fb_height != (uint32_t)new_height || sub_ctx->inverted_fbo_content != new_ibf) {
+ sub_ctx->fb_height = new_height;
+ sub_ctx->inverted_fbo_content = new_ibf;
+ sub_ctx->viewport_state_dirty = (1 << 0);
}
}
- vrend_hw_emit_framebuffer_state(ctx);
+ vrend_hw_emit_framebuffer_state(sub_ctx);
- if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
+ if (sub_ctx->nr_cbufs > 0 || sub_ctx->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
vrend_printf("failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
- ctx->sub->shader_dirty = true;
- ctx->sub->blend_state_dirty = true;
+ sub_ctx->shader_dirty = true;
+ sub_ctx->blend_state_dirty = true;
}
void vrend_set_framebuffer_state_no_attach(UNUSED struct vrend_context *ctx,
@@ -2622,41 +2706,48 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
}
type = GL_FALSE;
- if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
- if (desc->channel[0].size == 32)
- type = GL_FLOAT;
- else if (desc->channel[0].size == 64)
- type = GL_DOUBLE;
- else if (desc->channel[0].size == 16)
- type = GL_HALF_FLOAT;
- } else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 8)
- type = GL_UNSIGNED_BYTE;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 8)
- type = GL_BYTE;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 16)
- type = GL_UNSIGNED_SHORT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 16)
- type = GL_SHORT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 32)
- type = GL_UNSIGNED_INT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 32)
- type = GL_INT;
- else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
- elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
- elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
- type = GL_INT_2_10_10_10_REV;
- else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
- elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
- elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
- type = GL_UNSIGNED_INT_2_10_10_10_REV;
- else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
- type = GL_UNSIGNED_INT_10F_11F_11F_REV;
+ switch (desc->channel[0].type) {
+ case UTIL_FORMAT_TYPE_FLOAT:
+ switch (desc->channel[0].size) {
+ case 16: type = GL_HALF_FLOAT; break;
+ case 32: type = GL_FLOAT; break;
+ case 64: type = GL_DOUBLE; break;
+ }
+ break;
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ switch (desc->channel[0].size) {
+ case 8: type = GL_UNSIGNED_BYTE; break;
+ case 16: type = GL_UNSIGNED_SHORT; break;
+ case 32: type = GL_UNSIGNED_INT; break;
+ }
+ break;
+ case UTIL_FORMAT_TYPE_SIGNED:
+ switch (desc->channel[0].size) {
+ case 8: type = GL_BYTE; break;
+ case 16: type = GL_SHORT; break;
+ case 32: type = GL_INT; break;
+ }
+ break;
+ }
+ if (type == GL_FALSE) {
+ switch (elements[i].src_format) {
+ case PIPE_FORMAT_R10G10B10A2_SSCALED:
+ case PIPE_FORMAT_R10G10B10A2_SNORM:
+ case PIPE_FORMAT_B10G10R10A2_SNORM:
+ type = GL_INT_2_10_10_10_REV;
+ break;
+ case PIPE_FORMAT_R10G10B10A2_USCALED:
+ case PIPE_FORMAT_R10G10B10A2_UNORM:
+ case PIPE_FORMAT_B10G10R10A2_UNORM:
+ type = GL_UNSIGNED_INT_2_10_10_10_REV;
+ break;
+ case PIPE_FORMAT_R11G11B10_FLOAT:
+ type = GL_UNSIGNED_INT_10F_11F_11F_REV;
+ break;
+ default:
+ ;
+ }
+ }
if (type == GL_FALSE) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
@@ -2725,7 +2816,6 @@ void vrend_bind_vertex_elements_state(struct vrend_context *ctx,
void vrend_set_constants(struct vrend_context *ctx,
uint32_t shader,
- UNUSED uint32_t index,
uint32_t num_constant,
const float *data)
{
@@ -2759,6 +2849,9 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
if (!has_feature(feat_ubo))
return;
+ struct pipe_constant_buffer *cbs = &ctx->sub->cbs[shader][index];
+ const uint32_t mask = 1u << index;
+
if (res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
@@ -2766,18 +2859,17 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
- ctx->sub->cbs[shader][index].buffer = (struct pipe_resource *)res;
- ctx->sub->cbs[shader][index].buffer_offset = offset;
- ctx->sub->cbs[shader][index].buffer_size = length;
-
- ctx->sub->const_bufs_used_mask[shader] |= (1u << index);
+ cbs->buffer = (struct pipe_resource *)res;
+ cbs->buffer_offset = offset;
+ cbs->buffer_size = length;
+ ctx->sub->const_bufs_used_mask[shader] |= mask;
} else {
- ctx->sub->cbs[shader][index].buffer = NULL;
- ctx->sub->cbs[shader][index].buffer_offset = 0;
- ctx->sub->cbs[shader][index].buffer_size = 0;
- ctx->sub->const_bufs_used_mask[shader] &= ~(1u << index);
+ cbs->buffer = NULL;
+ cbs->buffer_offset = 0;
+ cbs->buffer_size = 0;
+ ctx->sub->const_bufs_used_mask[shader] &= ~mask;
}
- ctx->sub->const_bufs_dirty[shader] |= (1u << index);
+ ctx->sub->const_bufs_dirty[shader] |= mask;
}
void vrend_set_index_buffer(struct vrend_context *ctx,
@@ -2814,27 +2906,28 @@ void vrend_set_single_vbo(struct vrend_context *ctx,
uint32_t res_handle)
{
struct vrend_resource *res;
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[index];
- if (ctx->sub->vbo[index].stride != stride ||
- ctx->sub->vbo[index].buffer_offset != buffer_offset ||
- ctx->sub->vbo_res_ids[index] != res_handle)
+ if (vbo->base.stride != stride ||
+ vbo->base.buffer_offset != buffer_offset ||
+ vbo->res_id != res_handle)
ctx->sub->vbo_dirty = true;
- ctx->sub->vbo[index].stride = stride;
- ctx->sub->vbo[index].buffer_offset = buffer_offset;
+ vbo->base.stride = stride;
+ vbo->base.buffer_offset = buffer_offset;
if (res_handle == 0) {
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, NULL);
- ctx->sub->vbo_res_ids[index] = 0;
- } else if (ctx->sub->vbo_res_ids[index] != res_handle) {
+ vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, NULL);
+ vbo->res_id = 0;
+ } else if (vbo->res_id != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
- ctx->sub->vbo_res_ids[index] = 0;
+ vbo->res_id = 0;
return;
}
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, res);
- ctx->sub->vbo_res_ids[index] = res_handle;
+ vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, res);
+ vbo->res_id = res_handle;
}
}
@@ -2851,8 +2944,8 @@ void vrend_set_num_vbo(struct vrend_context *ctx,
ctx->sub->vbo_dirty = true;
for (i = num_vbo; i < old_num; i++) {
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].buffer, NULL);
- ctx->sub->vbo_res_ids[i] = 0;
+ vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].base.buffer, NULL);
+ ctx->sub->vbo[i].res_id = 0;
}
}
@@ -2916,22 +3009,18 @@ void vrend_set_single_sampler_view(struct vrend_context *ctx,
glTexParameteri(view->texture->target, GL_TEXTURE_MAX_LEVEL, max_level);
tex->cur_max = max_level;
}
- if (tex->cur_swizzle_r != view->gl_swizzle_r) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
- tex->cur_swizzle_r = view->gl_swizzle_r;
- }
- if (tex->cur_swizzle_g != view->gl_swizzle_g) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
- tex->cur_swizzle_g = view->gl_swizzle_g;
- }
- if (tex->cur_swizzle_b != view->gl_swizzle_b) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
- tex->cur_swizzle_b = view->gl_swizzle_b;
- }
- if (tex->cur_swizzle_a != view->gl_swizzle_a) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
- tex->cur_swizzle_a = view->gl_swizzle_a;
+ if (memcmp(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint))) {
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ if (tex->cur_swizzle[i] != view->gl_swizzle[i]) {
+ glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]);
+ }
+ }
+ } else
+ glTexParameteriv(view->texture->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle);
+ memcpy(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint));
}
+
if (tex->cur_srgb_decode != view->srgb_decode && util_format_is_srgb(tex->base.base.format)) {
if (has_feature(feat_samplers))
ctx->sub->sampler_views_dirty[shader_type] |= (1u << index);
@@ -3146,7 +3235,7 @@ static inline bool can_emulate_logicop(enum pipe_logicop op)
}
-static inline void vrend_fill_shader_key(struct vrend_context *ctx,
+static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
struct vrend_shader_key *key)
{
@@ -3157,54 +3246,54 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
bool add_alpha_test = true;
key->cbufs_are_a8_bitmask = 0;
// Only use integer info when drawing to avoid stale info.
- if (vrend_state.use_integer && ctx->drawing) {
- key->attrib_signed_int_bitmask = ctx->sub->ve->signed_int_bitmask;
- key->attrib_unsigned_int_bitmask = ctx->sub->ve->unsigned_int_bitmask;
+ if (vrend_state.use_integer && sub_ctx->drawing) {
+ key->attrib_signed_int_bitmask = sub_ctx->ve->signed_int_bitmask;
+ key->attrib_unsigned_int_bitmask = sub_ctx->ve->unsigned_int_bitmask;
}
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (!ctx->sub->surf[i])
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (!sub_ctx->surf[i])
continue;
- if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format))
+ if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format))
key->cbufs_are_a8_bitmask |= (1 << i);
- if (util_format_is_pure_integer(ctx->sub->surf[i]->format)) {
+ if (util_format_is_pure_integer(sub_ctx->surf[i]->format)) {
add_alpha_test = false;
- update_int_sign_masks(ctx->sub->surf[i]->format, i,
+ update_int_sign_masks(sub_ctx->surf[i]->format, i,
&key->cbufs_signed_int_bitmask,
&key->cbufs_unsigned_int_bitmask);
}
- key->surface_component_bits[i] = util_format_get_component_bits(ctx->sub->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
+ key->surface_component_bits[i] = util_format_get_component_bits(sub_ctx->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
}
if (add_alpha_test) {
- key->add_alpha_test = ctx->sub->dsa_state.alpha.enabled;
- key->alpha_test = ctx->sub->dsa_state.alpha.func;
+ key->add_alpha_test = sub_ctx->dsa_state.alpha.enabled;
+ key->alpha_test = sub_ctx->dsa_state.alpha.func;
}
- key->pstipple_tex = ctx->sub->rs_state.poly_stipple_enable;
- key->color_two_side = ctx->sub->rs_state.light_twoside;
+ key->pstipple_tex = sub_ctx->rs_state.poly_stipple_enable;
+ key->color_two_side = sub_ctx->rs_state.light_twoside;
- key->clip_plane_enable = ctx->sub->rs_state.clip_plane_enable;
- key->flatshade = ctx->sub->rs_state.flatshade ? true : false;
+ key->clip_plane_enable = sub_ctx->rs_state.clip_plane_enable;
+ key->flatshade = sub_ctx->rs_state.flatshade ? true : false;
} else {
key->add_alpha_test = 0;
key->pstipple_tex = 0;
}
- if (type == PIPE_SHADER_FRAGMENT && vrend_state.use_gles && can_emulate_logicop(ctx->sub->blend_state.logicop_func)) {
- key->fs_logicop_enabled = ctx->sub->blend_state.logicop_enable;
- key->fs_logicop_func = ctx->sub->blend_state.logicop_func;
+ if (type == PIPE_SHADER_FRAGMENT && vrend_state.use_gles && can_emulate_logicop(sub_ctx->blend_state.logicop_func)) {
+ key->fs_logicop_enabled = sub_ctx->blend_state.logicop_enable;
+ key->fs_logicop_func = sub_ctx->blend_state.logicop_func;
key->fs_logicop_emulate_coherent = !has_feature(feat_framebuffer_fetch_non_coherent);
}
- key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
+ key->invert_fs_origin = !sub_ctx->inverted_fbo_content;
if (type == PIPE_SHADER_FRAGMENT)
- key->fs_swizzle_output_rgb_to_bgr = ctx->sub->swizzle_output_rgb_to_bgr;
+ key->fs_swizzle_output_rgb_to_bgr = sub_ctx->swizzle_output_rgb_to_bgr;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
+ if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY])
key->gs_present = true;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_CTRL])
key->tcs_present = true;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL])
key->tes_present = true;
int prev_type = -1;
@@ -3213,7 +3302,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
* old shader is still bound we should ignore the "previous" (as in
* execution order) shader when the key is evaluated, unless the currently
* bound shader selector is actually refers to the current shader. */
- if (ctx->sub->shaders[type] == sel) {
+ if (sub_ctx->shaders[type] == sel) {
switch (type) {
case PIPE_SHADER_GEOMETRY:
if (key->tcs_present || key->tes_present)
@@ -3243,36 +3332,36 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
}
}
- if (prev_type != -1 && ctx->sub->shaders[prev_type]) {
- key->prev_stage_num_clip_out = ctx->sub->shaders[prev_type]->sinfo.num_clip_out;
- key->prev_stage_num_cull_out = ctx->sub->shaders[prev_type]->sinfo.num_cull_out;
- key->num_indirect_generic_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
- key->num_indirect_patch_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
- key->num_prev_generic_and_patch_outputs = ctx->sub->shaders[prev_type]->sinfo.num_generic_and_patch_outputs;
- key->guest_sent_io_arrays = ctx->sub->shaders[prev_type]->sinfo.guest_sent_io_arrays;
+ if (prev_type != -1 && sub_ctx->shaders[prev_type]) {
+ key->prev_stage_num_clip_out = sub_ctx->shaders[prev_type]->sinfo.num_clip_out;
+ key->prev_stage_num_cull_out = sub_ctx->shaders[prev_type]->sinfo.num_cull_out;
+ key->num_indirect_generic_inputs = sub_ctx->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
+ key->num_indirect_patch_inputs = sub_ctx->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
+ key->num_prev_generic_and_patch_outputs = sub_ctx->shaders[prev_type]->sinfo.num_generic_and_patch_outputs;
+ key->guest_sent_io_arrays = sub_ctx->shaders[prev_type]->sinfo.guest_sent_io_arrays;
memcpy(key->prev_stage_generic_and_patch_outputs_layout,
- ctx->sub->shaders[prev_type]->sinfo.generic_outputs_layout,
+ sub_ctx->shaders[prev_type]->sinfo.generic_outputs_layout,
64 * sizeof (struct vrend_layout_info));
- key->force_invariant_inputs = ctx->sub->shaders[prev_type]->sinfo.invariant_outputs;
+ key->force_invariant_inputs = sub_ctx->shaders[prev_type]->sinfo.invariant_outputs;
}
// Only use coord_replace if frag shader receives GL_POINTS
if (type == PIPE_SHADER_FRAGMENT) {
- int fs_prim_mode = ctx->sub->prim_mode; // inherit draw-call's mode
+ int fs_prim_mode = sub_ctx->prim_mode; // inherit draw-call's mode
switch (prev_type) {
case PIPE_SHADER_TESS_EVAL:
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
fs_prim_mode = PIPE_PRIM_POINTS;
break;
case PIPE_SHADER_GEOMETRY:
- fs_prim_mode = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
+ fs_prim_mode = sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
break;
}
key->fs_prim_is_points = (fs_prim_mode == PIPE_PRIM_POINTS);
- key->coord_replace = ctx->sub->rs_state.point_quad_rasterization
+ key->coord_replace = sub_ctx->rs_state.point_quad_rasterization
&& key->fs_prim_is_points
- ? ctx->sub->rs_state.sprite_coord_enable
+ ? sub_ctx->rs_state.sprite_coord_enable
: 0x0;
}
@@ -3284,7 +3373,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
else if (key->gs_present)
next_type = PIPE_SHADER_GEOMETRY;
else if (key->tes_present) {
- if (!ctx->shader_cfg.use_gles)
+ if (!vrend_state.use_gles)
next_type = PIPE_SHADER_TESS_EVAL;
else
next_type = PIPE_SHADER_TESS_CTRL;
@@ -3306,17 +3395,17 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
break;
}
- if (next_type != -1 && ctx->sub->shaders[next_type]) {
- key->next_stage_pervertex_in = ctx->sub->shaders[next_type]->sinfo.has_pervertex_in;
- key->num_indirect_generic_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_generic_inputs;
- key->num_indirect_patch_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_patch_inputs;
- key->generic_outputs_expected_mask = ctx->sub->shaders[next_type]->sinfo.generic_inputs_emitted_mask;
+ if (next_type != -1 && sub_ctx->shaders[next_type]) {
+ key->next_stage_pervertex_in = sub_ctx->shaders[next_type]->sinfo.has_pervertex_in;
+ key->num_indirect_generic_outputs = sub_ctx->shaders[next_type]->sinfo.num_indirect_generic_inputs;
+ key->num_indirect_patch_outputs = sub_ctx->shaders[next_type]->sinfo.num_indirect_patch_inputs;
+ key->generic_outputs_expected_mask = sub_ctx->shaders[next_type]->sinfo.generic_inputs_emitted_mask;
}
if (type != PIPE_SHADER_FRAGMENT &&
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]) {
struct vrend_shader *fs =
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current;
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current;
key->compiled_fs_uid = fs->uid;
key->fs_info = &fs->sel->sinfo;
}
@@ -3363,7 +3452,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
if (1) {//shader->sel->type == PIPE_SHADER_FRAGMENT || shader->sel->type == PIPE_SHADER_GEOMETRY) {
bool ret;
- ret = vrend_compile_shader(ctx, shader);
+ ret = vrend_compile_shader(ctx->sub, shader);
if (ret == false) {
glDeleteShader(shader->id);
strarray_free(&shader->glsl_strings, true);
@@ -3373,7 +3462,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
return 0;
}
-static int vrend_shader_select(struct vrend_context *ctx,
+static int vrend_shader_select(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
bool *dirty)
{
@@ -3382,7 +3471,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
int r;
memset(&key, 0, sizeof(key));
- vrend_fill_shader_key(ctx, sel, &key);
+ vrend_fill_shader_key(sub_ctx, sel, &key);
if (sel->current && !memcmp(&sel->current->key, &key, sizeof(key)))
return 0;
@@ -3406,7 +3495,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- r = vrend_shader_create(ctx, shader, &key);
+ r = vrend_shader_create(sub_ctx->parent, shader, &key);
if (r) {
sel->current = NULL;
FREE(shader);
@@ -3422,8 +3511,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
return 0;
}
-static void *vrend_create_shader_state(UNUSED struct vrend_context *ctx,
- const struct pipe_stream_output_info *so_info,
+static void *vrend_create_shader_state(const struct pipe_stream_output_info *so_info,
uint32_t req_local_mem,
unsigned pipe_shader_type)
{
@@ -3448,7 +3536,7 @@ static int vrend_finish_shader(struct vrend_context *ctx,
sel->tokens = tgsi_dup_tokens(tokens);
- r = vrend_shader_select(ctx, sel, NULL);
+ r = vrend_shader_select(ctx->sub, sel, NULL);
if (r) {
return EINVAL;
}
@@ -3489,17 +3577,19 @@ int vrend_create_shader(struct vrend_context *ctx,
else if (((offlen + 3) / 4) > pkt_length)
long_shader = true;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
/* if we have an in progress one - don't allow a new shader
of that type or a different handle. */
- if (ctx->sub->long_shader_in_progress_handle[type]) {
+ if (sub_ctx->long_shader_in_progress_handle[type]) {
if (new_shader == true)
return EINVAL;
- if (handle != ctx->sub->long_shader_in_progress_handle[type])
+ if (handle != sub_ctx->long_shader_in_progress_handle[type])
return EINVAL;
}
if (new_shader) {
- sel = vrend_create_shader_state(ctx, so_info, req_local_mem, type);
+ sel = vrend_create_shader_state(so_info, req_local_mem, type);
if (sel == NULL)
return ENOMEM;
@@ -3512,11 +3602,11 @@ int vrend_create_shader(struct vrend_context *ctx,
}
memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
sel->buf_offset = pkt_length * 4;
- ctx->sub->long_shader_in_progress_handle[type] = handle;
+ sub_ctx->long_shader_in_progress_handle[type] = handle;
} else
finished = true;
} else {
- sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
+ sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
vrend_printf( "got continuation without original shader %d\n", handle);
ret = EINVAL;
@@ -3588,7 +3678,7 @@ int vrend_create_shader(struct vrend_context *ctx,
sel->tmp_buf = NULL;
}
free(tokens);
- ctx->sub->long_shader_in_progress_handle[type] = 0;
+ sub_ctx->long_shader_in_progress_handle[type] = 0;
}
if (new_shader) {
@@ -3618,31 +3708,33 @@ void vrend_bind_shader(struct vrend_context *ctx,
if (type > PIPE_SHADER_COMPUTE)
return;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
if (handle == 0) {
if (type == PIPE_SHADER_COMPUTE)
- ctx->sub->cs_shader_dirty = true;
+ sub_ctx->cs_shader_dirty = true;
else
- ctx->sub->shader_dirty = true;
- vrend_shader_state_reference(&ctx->sub->shaders[type], NULL);
+ sub_ctx->shader_dirty = true;
+ vrend_shader_state_reference(&sub_ctx->shaders[type], NULL);
return;
}
- sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
+ sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel)
return;
if (sel->type != type)
return;
- if (ctx->sub->shaders[sel->type] != sel) {
+ if (sub_ctx->shaders[sel->type] != sel) {
if (type == PIPE_SHADER_COMPUTE)
- ctx->sub->cs_shader_dirty = true;
+ sub_ctx->cs_shader_dirty = true;
else
- ctx->sub->shader_dirty = true;
- ctx->sub->prog_ids[sel->type] = 0;
+ sub_ctx->shader_dirty = true;
+ sub_ctx->prog_ids[sel->type] = 0;
}
- vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
+ vrend_shader_state_reference(&sub_ctx->shaders[sel->type], sel);
}
void vrend_clear(struct vrend_context *ctx,
@@ -3651,6 +3743,7 @@ void vrend_clear(struct vrend_context *ctx,
double depth, unsigned stencil)
{
GLbitfield bits = 0;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
if (ctx->in_error)
return;
@@ -3658,20 +3751,20 @@ void vrend_clear(struct vrend_context *ctx,
if (ctx->ctx_switch_pending)
vrend_finish_context_switch(ctx);
- vrend_update_frontface_state(ctx);
- if (ctx->sub->stencil_state_dirty)
- vrend_update_stencil_state(ctx);
- if (ctx->sub->scissor_state_dirty)
- vrend_update_scissor_state(ctx);
- if (ctx->sub->viewport_state_dirty)
- vrend_update_viewport_state(ctx);
+ vrend_update_frontface_state(sub_ctx);
+ if (sub_ctx->stencil_state_dirty)
+ vrend_update_stencil_state(sub_ctx);
+ if (sub_ctx->scissor_state_dirty)
+ vrend_update_scissor_state(sub_ctx);
+ if (sub_ctx->viewport_state_dirty)
+ vrend_update_viewport_state(sub_ctx);
- vrend_use_program(ctx, 0);
+ vrend_use_program(sub_ctx, 0);
glDisable(GL_SCISSOR_TEST);
if (buffers & PIPE_CLEAR_COLOR) {
- if (ctx->sub->nr_cbufs && ctx->sub->surf[0] && vrend_format_is_emulated_alpha(ctx->sub->surf[0]->format)) {
+ if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_format_is_emulated_alpha(sub_ctx->surf[0]->format)) {
glClearColor(color->f[3], 0.0, 0.0, 0.0);
} else {
glClearColor(color->f[0], color->f[1], color->f[2], color->f[3]);
@@ -3680,7 +3773,7 @@ void vrend_clear(struct vrend_context *ctx,
/* This function implements Gallium's full clear callback (st->pipe->clear) on the host. This
callback requires no color component be masked. We must unmask all components before
calling glClear* and restore the previous colormask afterwards, as Gallium expects. */
- if (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (sub_ctx->hw_blend_state.independent_blend_enable &&
has_feature(feat_indep_blend)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
@@ -3708,24 +3801,24 @@ void vrend_clear(struct vrend_context *ctx,
glClearStencil(stencil);
}
- if (ctx->sub->hw_rs_state.rasterizer_discard)
+ if (sub_ctx->hw_rs_state.rasterizer_discard)
glDisable(GL_RASTERIZER_DISCARD);
if (buffers & PIPE_CLEAR_COLOR) {
uint32_t mask = 0;
int i;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i])
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i])
mask |= (1 << i);
}
if (mask != (buffers >> 2)) {
mask = buffers >> 2;
while (mask) {
i = u_bit_scan(&mask);
- if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_uint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
+ if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_uint(sub_ctx->surf[i] && sub_ctx->surf[i]->format))
glClearBufferuiv(GL_COLOR,
i, (GLuint *)color);
- else if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_sint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
+ else if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_sint(sub_ctx->surf[i] && sub_ctx->surf[i]->format))
glClearBufferiv(GL_COLOR,
i, (GLint *)color);
else
@@ -3748,40 +3841,40 @@ void vrend_clear(struct vrend_context *ctx,
* get here is because the guest cleared all those states but gallium
* didn't forward them before calling the clear command
*/
- if (ctx->sub->hw_rs_state.rasterizer_discard)
+ if (sub_ctx->hw_rs_state.rasterizer_discard)
glEnable(GL_RASTERIZER_DISCARD);
if (buffers & PIPE_CLEAR_DEPTH) {
- if (!ctx->sub->dsa_state.depth.writemask)
+ if (!sub_ctx->dsa_state.depth.writemask)
glDepthMask(GL_FALSE);
}
/* Restore previous stencil buffer write masks for both front and back faces */
if (buffers & PIPE_CLEAR_STENCIL) {
- glStencilMaskSeparate(GL_FRONT, ctx->sub->dsa_state.stencil[0].writemask);
- glStencilMaskSeparate(GL_BACK, ctx->sub->dsa_state.stencil[1].writemask);
+ glStencilMaskSeparate(GL_FRONT, sub_ctx->dsa_state.stencil[0].writemask);
+ glStencilMaskSeparate(GL_BACK, sub_ctx->dsa_state.stencil[1].writemask);
}
/* Restore previous colormask */
if (buffers & PIPE_CLEAR_COLOR) {
- if (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (sub_ctx->hw_blend_state.independent_blend_enable &&
has_feature(feat_indep_blend)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
- struct pipe_blend_state *blend = &ctx->sub->hw_blend_state;
+ struct pipe_blend_state *blend = &sub_ctx->hw_blend_state;
glColorMaskIndexedEXT(i, blend->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
} else {
- glColorMask(ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
+ glColorMask(sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
- if (ctx->sub->hw_rs_state.scissor)
+ if (sub_ctx->hw_rs_state.scissor)
glEnable(GL_SCISSOR_TEST);
else
glDisable(GL_SCISSOR_TEST);
@@ -3819,20 +3912,20 @@ void vrend_clear_texture(struct vrend_context* ctx,
}
}
-static void vrend_update_scissor_state(struct vrend_context *ctx)
+static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx)
{
struct pipe_scissor_state *ss;
GLint y;
GLuint idx;
- unsigned mask = ctx->sub->scissor_state_dirty;
+ unsigned mask = sub_ctx->scissor_state_dirty;
while (mask) {
idx = u_bit_scan(&mask);
if (idx >= PIPE_MAX_VIEWPORTS) {
- vrend_report_buffer_error(ctx, 0);
+ vrend_report_buffer_error(sub_ctx->parent, 0);
break;
}
- ss = &ctx->sub->ss[idx];
+ ss = &sub_ctx->ss[idx];
y = ss->miny;
if (idx > 0 && has_feature(feat_viewport_array))
@@ -3840,39 +3933,39 @@ static void vrend_update_scissor_state(struct vrend_context *ctx)
else
glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
}
- ctx->sub->scissor_state_dirty = 0;
+ sub_ctx->scissor_state_dirty = 0;
}
-static void vrend_update_viewport_state(struct vrend_context *ctx)
+static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx)
{
GLint cy;
- unsigned mask = ctx->sub->viewport_state_dirty;
+ unsigned mask = sub_ctx->viewport_state_dirty;
int idx;
while (mask) {
idx = u_bit_scan(&mask);
- if (ctx->sub->viewport_is_negative)
- cy = ctx->sub->vps[idx].cur_y - ctx->sub->vps[idx].height;
+ if (sub_ctx->viewport_is_negative)
+ cy = sub_ctx->vps[idx].cur_y - sub_ctx->vps[idx].height;
else
- cy = ctx->sub->vps[idx].cur_y;
+ cy = sub_ctx->vps[idx].cur_y;
if (idx > 0 && has_feature(feat_viewport_array))
- glViewportIndexedf(idx, ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
+ glViewportIndexedf(idx, sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height);
else
- glViewport(ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
+ glViewport(sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height);
if (idx && has_feature(feat_viewport_array))
if (vrend_state.use_gles) {
- glDepthRangeIndexedfOES(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangeIndexedfOES(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
} else
- glDepthRangeIndexed(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangeIndexed(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
else
if (vrend_state.use_gles)
- glDepthRangefOES(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangefOES(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
else
- glDepthRange(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRange(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
}
- ctx->sub->viewport_state_dirty = 0;
+ sub_ctx->viewport_state_dirty = 0;
}
static GLenum get_gs_xfb_mode(GLenum mode)
@@ -3947,7 +4040,7 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
/* XYZZY: debug this? */
break;
}
- res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].buffer;
+ res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].base.buffer;
if (!res) {
vrend_printf("cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
@@ -3978,10 +4071,12 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
glBindBuffer(GL_ARRAY_BUFFER, res->id);
- if (ctx->sub->vbo[vbo_index].stride == 0) {
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[vbo_index];
+
+ if (vbo->base.stride == 0) {
void *data;
/* for 0 stride we are kinda screwed */
- data = glMapBufferRange(GL_ARRAY_BUFFER, ctx->sub->vbo[vbo_index].buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
+ data = glMapBufferRange(GL_ARRAY_BUFFER, vbo->base.buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
switch (ve->nr_chan) {
case 1:
@@ -4003,9 +4098,9 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
} else {
enable_bitmask |= (1 << loc);
if (util_format_is_pure_integer(ve->base.src_format)) {
- glVertexAttribIPointer(loc, ve->nr_chan, ve->type, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
+ glVertexAttribIPointer(loc, ve->nr_chan, ve->type, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
} else {
- glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
+ glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
}
glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
}
@@ -4037,69 +4132,86 @@ static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
glBindVertexArray(va->id);
if (ctx->sub->vbo_dirty) {
- GLsizei count = 0;
- GLuint buffers[PIPE_MAX_ATTRIBS];
- GLintptr offsets[PIPE_MAX_ATTRIBS];
- GLsizei strides[PIPE_MAX_ATTRIBS];
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[0];
- for (i = 0; i < ctx->sub->num_vbos; i++) {
- struct vrend_resource *res = (struct vrend_resource *)ctx->sub->vbo[i].buffer;
- if (!res) {
- buffers[count] = 0;
- offsets[count] = 0;
- strides[count++] = 0;
- } else {
- buffers[count] = res->id;
- offsets[count] = ctx->sub->vbo[i].buffer_offset,
- strides[count++] = ctx->sub->vbo[i].stride;
+ if (has_feature(feat_bind_vertex_buffers)) {
+ GLsizei count = MAX2(ctx->sub->num_vbos, ctx->sub->old_num_vbos);
+
+ GLuint buffers[PIPE_MAX_ATTRIBS];
+ GLintptr offsets[PIPE_MAX_ATTRIBS];
+ GLsizei strides[PIPE_MAX_ATTRIBS];
+
+ for (i = 0; i < ctx->sub->num_vbos; i++) {
+ struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
+ if (res) {
+ buffers[i] = res->id;
+ offsets[i] = vbo[i].base.buffer_offset;
+ strides[i] = vbo[i].base.stride;
+ } else {
+ buffers[i] = 0;
+ offsets[i] = 0;
+ strides[i] = 0;
+ }
+ }
+
+ for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
+ buffers[i] = 0;
+ offsets[i] = 0;
+ strides[i] = 0;
}
- }
- for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
- buffers[count] = 0;
- offsets[count] = 0;
- strides[count++] = 0;
- }
- if (has_feature(feat_bind_vertex_buffers))
glBindVertexBuffers(0, count, buffers, offsets, strides);
- else {
- for (i = 0; i < count; ++i)
- glBindVertexBuffer(i, buffers[i], offsets[i], strides[i]);
+ } else {
+ for (i = 0; i < ctx->sub->num_vbos; i++) {
+ struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
+ if (res)
+ glBindVertexBuffer(i, res->id, vbo[i].base.buffer_offset, vbo[i].base.stride);
+ else
+ glBindVertexBuffer(i, 0, 0, 0);
+ }
+ for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++)
+ glBindVertexBuffer(i, 0, 0, 0);
}
ctx->sub->vbo_dirty = false;
}
}
-static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
+static int vrend_draw_bind_samplers_shader(struct vrend_sub_context *sub_ctx,
int shader_type,
int next_sampler_id)
{
int index = 0;
- uint32_t dirty = ctx->sub->sampler_views_dirty[shader_type];
+ uint32_t dirty = sub_ctx->sampler_views_dirty[shader_type];
+
+ uint32_t mask = sub_ctx->prog->samplers_used_mask[shader_type];
+
+ struct vrend_shader_view *sviews = &sub_ctx->views[shader_type];
- uint32_t mask = ctx->sub->prog->samplers_used_mask[shader_type];
while (mask) {
int i = u_bit_scan(&mask);
- struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
- if (dirty & (1 << i) && tview) {
- if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
- glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
- (tview->gl_swizzle_r == GL_ZERO || tview->gl_swizzle_r == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_g == GL_ZERO || tview->gl_swizzle_g == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_b == GL_ZERO || tview->gl_swizzle_b == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_a == GL_ZERO || tview->gl_swizzle_a == GL_ONE) ? 0.0 : 1.0);
- glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
- tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
+ if (!(dirty & (1 << i)))
+ continue;
+
+ struct vrend_sampler_view *tview = sviews->views[i];
+ if (tview) {
+ if (sub_ctx->prog->shadow_samp_mask[shader_type] & (1 << i)) {
+ glUniform4f(sub_ctx->prog->shadow_samp_mask_locs[shader_type][index],
+ (tview->gl_swizzle[0] == GL_ZERO || tview->gl_swizzle[0] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[1] == GL_ZERO || tview->gl_swizzle[1] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[2] == GL_ZERO || tview->gl_swizzle[2] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[3] == GL_ZERO || tview->gl_swizzle[3] == GL_ONE) ? 0.0 : 1.0);
+ glUniform4f(sub_ctx->prog->shadow_samp_add_locs[shader_type][index],
+ tview->gl_swizzle[0] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[1] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[2] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[3] == GL_ONE ? 1.0 : 0.0);
}
if (tview->texture) {
- GLuint id;
+ GLuint id = tview->id;
struct vrend_resource *texture = tview->texture;
GLenum target = tview->target;
@@ -4108,17 +4220,16 @@ static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
if (has_bit(tview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
id = texture->tbo_tex_id;
target = GL_TEXTURE_BUFFER;
- } else
- id = tview->id;
+ }
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
glBindTexture(target, id);
- if (ctx->sub->views[shader_type].old_ids[i] != id ||
- ctx->sub->sampler_views_dirty[shader_type] & (1 << i)) {
- vrend_apply_sampler_state(ctx, texture, shader_type, i,
+ if (sviews->old_ids[i] != id ||
+ sub_ctx->sampler_views_dirty[shader_type] & (1 << i)) {
+ vrend_apply_sampler_state(sub_ctx, texture, shader_type, i,
next_sampler_id, tview);
- ctx->sub->views[shader_type].old_ids[i] = id;
+ sviews->old_ids[i] = id;
}
dirty &= ~(1 << i);
}
@@ -4126,12 +4237,12 @@ static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
next_sampler_id++;
index++;
}
- ctx->sub->sampler_views_dirty[shader_type] = dirty;
+ sub_ctx->sampler_views_dirty[shader_type] = dirty;
return next_sampler_id;
}
-static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
+static int vrend_draw_bind_ubo_shader(struct vrend_sub_context *sub_ctx,
int shader_type, int next_ubo_id)
{
uint32_t mask, dirty, update;
@@ -4141,9 +4252,9 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
if (!has_feature(feat_ubo))
return next_ubo_id;
- mask = ctx->sub->prog->ubo_used_mask[shader_type];
- dirty = ctx->sub->const_bufs_dirty[shader_type];
- update = dirty & ctx->sub->const_bufs_used_mask[shader_type];
+ mask = sub_ctx->prog->ubo_used_mask[shader_type];
+ dirty = sub_ctx->const_bufs_dirty[shader_type];
+ update = dirty & sub_ctx->const_bufs_used_mask[shader_type];
if (!update)
return next_ubo_id + util_bitcount(mask);
@@ -4154,7 +4265,7 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
if (update & (1 << i)) {
/* The cbs array is indexed using the gallium uniform buffer index */
- cb = &ctx->sub->cbs[shader_type][i];
+ cb = &sub_ctx->cbs[shader_type][i];
res = (struct vrend_resource *)cb->buffer;
glBindBufferRange(GL_UNIFORM_BUFFER, next_ubo_id, res->id,
@@ -4163,26 +4274,26 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
}
next_ubo_id++;
}
- ctx->sub->const_bufs_dirty[shader_type] = dirty;
+ sub_ctx->const_bufs_dirty[shader_type] = dirty;
return next_ubo_id;
}
-static void vrend_draw_bind_const_shader(struct vrend_context *ctx,
+static void vrend_draw_bind_const_shader(struct vrend_sub_context *sub_ctx,
int shader_type, bool new_program)
{
- if (ctx->sub->consts[shader_type].consts &&
- ctx->sub->shaders[shader_type] &&
- (ctx->sub->prog->const_location[shader_type] != -1) &&
- (ctx->sub->const_dirty[shader_type] || new_program)) {
- glUniform4uiv(ctx->sub->prog->const_location[shader_type],
- ctx->sub->shaders[shader_type]->sinfo.num_consts,
- ctx->sub->consts[shader_type].consts);
- ctx->sub->const_dirty[shader_type] = false;
+ if (sub_ctx->consts[shader_type].consts &&
+ sub_ctx->shaders[shader_type] &&
+ (sub_ctx->prog->const_location[shader_type] != -1) &&
+ (sub_ctx->const_dirty[shader_type] || new_program)) {
+ glUniform4uiv(sub_ctx->prog->const_location[shader_type],
+ sub_ctx->shaders[shader_type]->sinfo.num_consts,
+ sub_ctx->consts[shader_type].consts);
+ sub_ctx->const_dirty[shader_type] = false;
}
}
-static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_type)
+static void vrend_draw_bind_ssbo_shader(struct vrend_sub_context *sub_ctx, int shader_type)
{
uint32_t mask;
struct vrend_ssbo *ssbo;
@@ -4192,30 +4303,30 @@ static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_ty
if (!has_feature(feat_ssbo))
return;
- if (!ctx->sub->prog->ssbo_locs[shader_type])
+ if (!sub_ctx->prog->ssbo_locs[shader_type])
return;
- if (!ctx->sub->ssbo_used_mask[shader_type])
+ if (!sub_ctx->ssbo_used_mask[shader_type])
return;
- mask = ctx->sub->ssbo_used_mask[shader_type];
+ mask = sub_ctx->ssbo_used_mask[shader_type];
while (mask) {
i = u_bit_scan(&mask);
- ssbo = &ctx->sub->ssbo[shader_type][i];
+ ssbo = &sub_ctx->ssbo[shader_type][i];
res = (struct vrend_resource *)ssbo->res;
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, i, res->id,
ssbo->buffer_offset, ssbo->buffer_size);
- if (ctx->sub->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
+ if (sub_ctx->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
if (!vrend_state.use_gles)
- glShaderStorageBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ssbo_locs[shader_type][i], i);
+ glShaderStorageBlockBinding(sub_ctx->prog->id, sub_ctx->prog->ssbo_locs[shader_type][i], i);
else
debug_printf("glShaderStorageBlockBinding not supported on gles \n");
}
}
}
-static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
+static void vrend_draw_bind_abo_shader(struct vrend_sub_context *sub_ctx)
{
uint32_t mask;
struct vrend_abo *abo;
@@ -4225,18 +4336,18 @@ static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
if (!has_feature(feat_atomic_counters))
return;
- mask = ctx->sub->abo_used_mask;
+ mask = sub_ctx->abo_used_mask;
while (mask) {
i = u_bit_scan(&mask);
- abo = &ctx->sub->abo[i];
+ abo = &sub_ctx->abo[i];
res = (struct vrend_resource *)abo->res;
glBindBufferRange(GL_ATOMIC_COUNTER_BUFFER, i, res->id,
abo->buffer_offset, abo->buffer_size);
}
}
-static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_type)
+static void vrend_draw_bind_images_shader(struct vrend_sub_context *sub_ctx, int shader_type)
{
GLenum access;
GLboolean layered;
@@ -4244,22 +4355,22 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
uint32_t mask, tex_id, level, first_layer;
- if (!ctx->sub->images_used_mask[shader_type])
+ if (!sub_ctx->images_used_mask[shader_type])
return;
- if (!ctx->sub->prog->img_locs[shader_type])
+ if (!sub_ctx->prog->img_locs[shader_type])
return;
if (!has_feature(feat_images))
return;
- mask = ctx->sub->images_used_mask[shader_type];
+ mask = sub_ctx->images_used_mask[shader_type];
while (mask) {
unsigned i = u_bit_scan(&mask);
- if (!(ctx->sub->prog->images_used_mask[shader_type] & (1 << i)))
+ if (!(sub_ctx->prog->images_used_mask[shader_type] & (1 << i)))
continue;
- iview = &ctx->sub->image_views[shader_type][i];
+ iview = &sub_ctx->image_views[shader_type][i];
tex_id = iview->texture->id;
if (has_bit(iview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
if (!iview->texture->tbo_tex_id)
@@ -4285,7 +4396,7 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
}
if (!vrend_state.use_gles)
- glUniform1i(ctx->sub->prog->img_locs[shader_type][i], i);
+ glUniform1i(sub_ctx->prog->img_locs[shader_type][i], i);
switch (iview->access) {
case PIPE_IMAGE_ACCESS_READ:
@@ -4306,61 +4417,162 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
}
}
-static void vrend_draw_bind_objects(struct vrend_context *ctx, bool new_program)
+static void vrend_draw_bind_objects(struct vrend_sub_context *sub_ctx, bool new_program)
{
int next_ubo_id = 0, next_sampler_id = 0;
- for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
- next_ubo_id = vrend_draw_bind_ubo_shader(ctx, shader_type, next_ubo_id);
- vrend_draw_bind_const_shader(ctx, shader_type, new_program);
- next_sampler_id = vrend_draw_bind_samplers_shader(ctx, shader_type,
+ for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= sub_ctx->last_shader_idx; shader_type++) {
+ next_ubo_id = vrend_draw_bind_ubo_shader(sub_ctx, shader_type, next_ubo_id);
+ vrend_draw_bind_const_shader(sub_ctx, shader_type, new_program);
+ next_sampler_id = vrend_draw_bind_samplers_shader(sub_ctx, shader_type,
next_sampler_id);
- vrend_draw_bind_images_shader(ctx, shader_type);
- vrend_draw_bind_ssbo_shader(ctx, shader_type);
+ vrend_draw_bind_images_shader(sub_ctx, shader_type);
+ vrend_draw_bind_ssbo_shader(sub_ctx, shader_type);
}
- vrend_draw_bind_abo_shader(ctx);
+ vrend_draw_bind_abo_shader(sub_ctx);
- if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
+ if (vrend_state.use_core_profile && sub_ctx->prog->fs_stipple_loc != -1) {
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
- glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
- glUniform1i(ctx->sub->prog->fs_stipple_loc, next_sampler_id);
+ glBindTexture(GL_TEXTURE_2D, sub_ctx->parent->pstipple_tex_id);
+ glUniform1i(sub_ctx->prog->fs_stipple_loc, next_sampler_id);
}
- if (vrend_state.use_core_profile && ctx->sub->prog->fs_alpha_ref_val_loc != -1) {
- glUniform1f(ctx->sub->prog->fs_alpha_ref_val_loc, ctx->sub->dsa_state.alpha.ref_value);
+ if (vrend_state.use_core_profile && sub_ctx->prog->fs_alpha_ref_val_loc != -1) {
+ glUniform1f(sub_ctx->prog->fs_alpha_ref_val_loc, sub_ctx->dsa_state.alpha.ref_value);
}
}
static
-void vrend_inject_tcs(struct vrend_context *ctx, int vertices_per_patch)
+void vrend_inject_tcs(struct vrend_sub_context *sub_ctx, int vertices_per_patch)
{
struct pipe_stream_output_info so_info;
memset(&so_info, 0, sizeof(so_info));
- struct vrend_shader_selector *sel = vrend_create_shader_state(ctx,
- &so_info,
+ struct vrend_shader_selector *sel = vrend_create_shader_state(&so_info,
false, PIPE_SHADER_TESS_CTRL);
struct vrend_shader *shader;
shader = CALLOC_STRUCT(vrend_shader);
- vrend_fill_shader_key(ctx, sel, &shader->key);
+ vrend_fill_shader_key(sub_ctx, sel, &shader->key);
shader->sel = sel;
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- vrend_shader_create_passthrough_tcs(ctx, &ctx->shader_cfg,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->tokens,
+ vrend_shader_create_passthrough_tcs(sub_ctx->parent, &sub_ctx->parent->shader_cfg,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->tokens,
&shader->key, vrend_state.tess_factors, &sel->sinfo,
&shader->glsl_strings, vertices_per_patch);
// Need to add inject the selected shader to the shader selector and then the code below
// can continue
sel->tokens = NULL;
sel->current = shader;
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] = sel;
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
+ sub_ctx->shaders[PIPE_SHADER_TESS_CTRL] = sel;
+ sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
shader->id = glCreateShader(conv_shader_type(shader->sel->type));
- vrend_compile_shader(ctx, shader);
+ vrend_compile_shader(sub_ctx, shader);
+}
+
+
+static bool
+vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_info *info)
+{
+ struct vrend_linked_shader_program *prog;
+ bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
+ bool new_program = false;
+
+ struct vrend_shader_selector **shaders = sub_ctx->shaders;
+
+ sub_ctx->shader_dirty = false;
+
+ if (!shaders[PIPE_SHADER_VERTEX] || !shaders[PIPE_SHADER_FRAGMENT]) {
+ vrend_printf("dropping rendering due to missing shaders: %s\n", sub_ctx->parent->debug_name);
+ return false;
+ }
+
+ // For some GPU, we'd like to use integer variable in generated GLSL if
+ // the input buffers are integer formats. But we actually don't know the
+ // buffer formats when the shader is created, we only know it here.
+ // Set it to true so the underlying code knows to use the buffer formats
+ // now.
+ sub_ctx->drawing = true;
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
+ sub_ctx->drawing = false;
+
+ if (shaders[PIPE_SHADER_TESS_CTRL] && shaders[PIPE_SHADER_TESS_CTRL]->tokens)
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
+ else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) {
+ VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n");
+ vrend_inject_tcs(sub_ctx, info->vertices_per_patch);
+
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
+ }
+
+ if (shaders[PIPE_SHADER_TESS_EVAL])
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
+ if (shaders[PIPE_SHADER_GEOMETRY])
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
+
+ if (!shaders[PIPE_SHADER_VERTEX]->current ||
+ !shaders[PIPE_SHADER_FRAGMENT]->current ||
+ (shaders[PIPE_SHADER_GEOMETRY] && !shaders[PIPE_SHADER_GEOMETRY]->current) ||
+ (shaders[PIPE_SHADER_TESS_CTRL] && !shaders[PIPE_SHADER_TESS_CTRL]->current) ||
+ (shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_TESS_EVAL]->current)) {
+ vrend_printf( "failure to compile shader variants: %s\n", sub_ctx->parent->debug_name);
+ return false;
+ }
+
+ GLuint vs_id = shaders[PIPE_SHADER_VERTEX]->current->id;
+ GLuint fs_id = shaders[PIPE_SHADER_FRAGMENT]->current->id;
+ GLuint gs_id = shaders[PIPE_SHADER_GEOMETRY] ? shaders[PIPE_SHADER_GEOMETRY]->current->id : 0;
+ GLuint tcs_id = shaders[PIPE_SHADER_TESS_CTRL] ? shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0;
+ GLuint tes_id = shaders[PIPE_SHADER_TESS_EVAL] ? shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0;
+
+ bool same_prog = sub_ctx->prog &&
+ vs_id == sub_ctx->prog_ids[PIPE_SHADER_VERTEX] &&
+ fs_id == sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] &&
+ gs_id == sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] &&
+ tcs_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] &&
+ tes_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] &&
+ sub_ctx->prog->dual_src_linked == dual_src;
+
+ if (!same_prog) {
+ prog = lookup_shader_program(sub_ctx, vs_id, fs_id, gs_id, tcs_id, tes_id, dual_src);
+ if (!prog) {
+ prog = add_shader_program(sub_ctx,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->current,
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current,
+ gs_id ? sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
+ tcs_id ? sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
+ tes_id ? sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
+ if (!prog)
+ return false;
+ }
+
+ sub_ctx->last_shader_idx = sub_ctx->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (sub_ctx->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
+ } else
+ prog = sub_ctx->prog;
+ if (sub_ctx->prog != prog) {
+ new_program = true;
+ sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = vs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] = fs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] = gs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] = tcs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] = tes_id;
+ sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = 0;
+ sub_ctx->prog = prog;
+
+ /* mark all constbufs and sampler views as dirty */
+ for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) {
+ sub_ctx->const_bufs_dirty[stage] = ~0;
+ sub_ctx->sampler_views_dirty[stage] = ~0;
+ }
+
+ prog->ref_context = sub_ctx;
+ }
+ return new_program;
}
int vrend_draw_vbo(struct vrend_context *ctx,
@@ -4372,6 +4584,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
bool new_program = false;
struct vrend_resource *indirect_res = NULL;
struct vrend_resource *indirect_params_res = NULL;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
if (ctx->in_error)
return 0;
@@ -4410,173 +4623,63 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (ctx->ctx_switch_pending)
vrend_finish_context_switch(ctx);
- vrend_update_frontface_state(ctx);
+ vrend_update_frontface_state(sub_ctx);
if (ctx->sub->stencil_state_dirty)
- vrend_update_stencil_state(ctx);
+ vrend_update_stencil_state(sub_ctx);
if (ctx->sub->scissor_state_dirty)
- vrend_update_scissor_state(ctx);
+ vrend_update_scissor_state(sub_ctx);
if (ctx->sub->viewport_state_dirty)
- vrend_update_viewport_state(ctx);
+ vrend_update_viewport_state(sub_ctx);
if (ctx->sub->blend_state_dirty)
- vrend_patch_blend_state(ctx);
+ vrend_patch_blend_state(sub_ctx);
// enable primitive-mode-dependent shader variants
- if (ctx->sub->prim_mode != (int)info->mode) {
+ if (sub_ctx->prim_mode != (int)info->mode) {
// Only refresh shader program when switching in/out of GL_POINTS primitive mode
- if (ctx->sub->prim_mode == PIPE_PRIM_POINTS
+ if (sub_ctx->prim_mode == PIPE_PRIM_POINTS
|| (int)info->mode == PIPE_PRIM_POINTS)
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
- ctx->sub->prim_mode = (int)info->mode;
+ sub_ctx->prim_mode = (int)info->mode;
}
- if (ctx->sub->shader_dirty || ctx->sub->swizzle_output_rgb_to_bgr) {
- struct vrend_linked_shader_program *prog;
- bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
- bool same_prog;
-
- ctx->sub->shader_dirty = false;
-
- if (!ctx->sub->shaders[PIPE_SHADER_VERTEX] || !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
- vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
- return 0;
- }
-
- // For some GPU, we'd like to use integer variable in generated GLSL if
- // the input buffers are integer formats. But we actually don't know the
- // buffer formats when the shader is created, we only know it here.
- // Set it to true so the underlying code knows to use the buffer formats
- // now.
- ctx->drawing = true;
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
- ctx->drawing = false;
-
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->tokens)
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
- else if (vrend_state.use_gles && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]) {
- VREND_DEBUG(dbg_shader, ctx, "Need to inject a TCS\n");
- vrend_inject_tcs(ctx, info->vertices_per_patch);
-
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
- }
-
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
-
- if (!ctx->sub->shaders[PIPE_SHADER_VERTEX]->current ||
- !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current ||
- (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && !ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current) ||
- (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && !ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current) ||
- (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && !ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current)) {
- vrend_printf( "failure to compile shader variants: %s\n", ctx->debug_name);
- return 0;
- }
- same_prog = true;
- if (ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_VERTEX])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY])
- same_prog = false;
- if (ctx->sub->prog && ctx->sub->prog->dual_src_linked != dual_src)
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL])
- same_prog = false;
-
- if (!same_prog) {
- prog = lookup_shader_program(ctx,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id,
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id,
- ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id : 0,
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0,
- dual_src);
- if (!prog) {
- prog = add_shader_program(ctx,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->current,
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current,
- ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
- if (!prog)
- return 0;
- }
-
- ctx->sub->last_shader_idx = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
- } else
- prog = ctx->sub->prog;
- if (ctx->sub->prog != prog) {
- new_program = true;
- ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id;
- ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT] = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY] = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
- ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL] = ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL] = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id;
- ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = -1;
- ctx->sub->prog = prog;
-
- /* mark all constbufs and sampler views as dirty */
- for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) {
- ctx->sub->const_bufs_dirty[stage] = ~0;
- ctx->sub->sampler_views_dirty[stage] = ~0;
- }
+ if (sub_ctx->shader_dirty || sub_ctx->swizzle_output_rgb_to_bgr)
+ new_program = vrend_select_program(sub_ctx, info);
- prog->ref_context = ctx->sub;
- }
- }
- if (!ctx->sub->prog) {
+ if (!sub_ctx->prog) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return 0;
}
- vrend_use_program(ctx, ctx->sub->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog->id);
- vrend_draw_bind_objects(ctx, new_program);
+ vrend_draw_bind_objects(sub_ctx, new_program);
- if (!ctx->sub->ve) {
+ if (!sub_ctx->ve) {
vrend_printf("illegal VE setup - skipping renderering\n");
return 0;
}
- float viewport_neg_val = ctx->sub->viewport_is_negative ? -1.0 : 1.0;
- if (ctx->sub->prog->viewport_neg_val != viewport_neg_val) {
- glUniform1f(ctx->sub->prog->vs_ws_adjust_loc, viewport_neg_val);
- ctx->sub->prog->viewport_neg_val = viewport_neg_val;
+ float viewport_neg_val = sub_ctx->viewport_is_negative ? -1.0 : 1.0;
+ if (sub_ctx->prog->viewport_neg_val != viewport_neg_val) {
+ glUniform1f(sub_ctx->prog->vs_ws_adjust_loc, viewport_neg_val);
+ sub_ctx->prog->viewport_neg_val = viewport_neg_val;
}
- if (ctx->sub->rs_state.clip_plane_enable) {
+ if (sub_ctx->rs_state.clip_plane_enable) {
for (i = 0 ; i < 8; i++) {
- glUniform4fv(ctx->sub->prog->clip_locs[i], 1, (const GLfloat *)&ctx->sub->ucp_state.ucp[i]);
+ glUniform4fv(sub_ctx->prog->clip_locs[i], 1, (const GLfloat *)&sub_ctx->ucp_state.ucp[i]);
}
}
if (has_feature(feat_gles31_vertex_attrib_binding))
- vrend_draw_bind_vertex_binding(ctx, ctx->sub->ve);
+ vrend_draw_bind_vertex_binding(ctx, sub_ctx->ve);
else
- vrend_draw_bind_vertex_legacy(ctx, ctx->sub->ve);
-
- for (i = 0 ; i < ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs; i++) {
- struct vrend_vertex_element_array *va = ctx->sub->ve;
- struct vrend_vertex_element *ve = &va->elements[i];
- int vbo_index = ve->base.vertex_buffer_index;
- if (!ctx->sub->vbo[vbo_index].buffer) {
- vrend_printf( "VBO missing vertex buffer\n");
- return 0;
- }
- }
+ vrend_draw_bind_vertex_legacy(ctx, sub_ctx->ve);
if (info->indexed) {
- struct vrend_resource *res = (struct vrend_resource *)ctx->sub->ib.buffer;
+ struct vrend_resource *res = (struct vrend_resource *)sub_ctx->ib.buffer;
if (!res) {
vrend_printf( "VBO missing indexed array buffer\n");
return 0;
@@ -4585,19 +4688,19 @@ int vrend_draw_vbo(struct vrend_context *ctx,
} else
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
- if (ctx->sub->current_so) {
- if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- glBeginTransformFeedback(get_gs_xfb_mode(ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
- else if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- glBeginTransformFeedback(get_tess_xfb_mode(ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
+ if (sub_ctx->current_so) {
+ if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
+ if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY])
+ glBeginTransformFeedback(get_gs_xfb_mode(sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
+ else if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL])
+ glBeginTransformFeedback(get_tess_xfb_mode(sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
+ sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
else
glBeginTransformFeedback(get_xfb_mode(info->mode));
- ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
- } else if (ctx->sub->current_so->xfb_state == XFB_STATE_PAUSED) {
+ sub_ctx->current_so->xfb_state = XFB_STATE_STARTED;
+ } else if (sub_ctx->current_so->xfb_state == XFB_STATE_PAUSED) {
glResumeTransformFeedback();
- ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
+ sub_ctx->current_so->xfb_state = XFB_STATE_STARTED;
}
}
@@ -4615,16 +4718,16 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (has_feature(feat_indirect_draw)) {
GLint buf = indirect_res ? indirect_res->id : 0;
- if (ctx->sub->draw_indirect_buffer != buf) {
+ if (sub_ctx->draw_indirect_buffer != buf) {
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buf);
- ctx->sub->draw_indirect_buffer = buf;
+ sub_ctx->draw_indirect_buffer = buf;
}
if (has_feature(feat_indirect_params)) {
GLint buf = indirect_params_res ? indirect_params_res->id : 0;
- if (ctx->sub->draw_indirect_params_buffer != buf) {
+ if (sub_ctx->draw_indirect_params_buffer != buf) {
glBindBuffer(GL_PARAMETER_BUFFER_ARB, buf);
- ctx->sub->draw_indirect_params_buffer = buf;
+ sub_ctx->draw_indirect_params_buffer = buf;
}
}
}
@@ -4637,9 +4740,9 @@ int vrend_draw_vbo(struct vrend_context *ctx,
* accept those blend equations.
* When we transmit the blend mode through alpha_src_factor, alpha_dst_factor is always 0.
*/
- uint32_t blend_mask_shader = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced;
- uint32_t blend_mode = ctx->sub->blend_state.rt[0].alpha_src_factor;
- uint32_t alpha_dst_factor = ctx->sub->blend_state.rt[0].alpha_dst_factor;
+ uint32_t blend_mask_shader = sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced;
+ uint32_t blend_mode = sub_ctx->blend_state.rt[0].alpha_src_factor;
+ uint32_t alpha_dst_factor = sub_ctx->blend_state.rt[0].alpha_dst_factor;
bool use_advanced_blending = !has_feature(feat_framebuffer_fetch) &&
has_feature(feat_blend_equation_advanced) &&
blend_mask_shader != 0 &&
@@ -4674,7 +4777,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
} else {
GLenum elsz;
GLenum mode = info->mode;
- switch (ctx->sub->ib.index_size) {
+ switch (sub_ctx->ib.index_size) {
case 1:
elsz = GL_UNSIGNED_BYTE;
break;
@@ -4697,17 +4800,17 @@ int vrend_draw_vbo(struct vrend_context *ctx,
glDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset);
} else if (info->index_bias) {
if (info->instance_count > 1)
- glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count, info->index_bias);
+ glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count, info->index_bias);
else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
+ glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
else
- glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
+ glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
} else if (info->instance_count > 1) {
- glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count);
+ glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count);
} else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
+ glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
else
- glDrawElements(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
+ glDrawElements(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
}
if (info->primitive_restart) {
@@ -4720,10 +4823,10 @@ int vrend_draw_vbo(struct vrend_context *ctx,
}
}
- if (ctx->sub->current_so && has_feature(feat_transform_feedback2)) {
- if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED) {
+ if (sub_ctx->current_so && has_feature(feat_transform_feedback2)) {
+ if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED) {
glPauseTransformFeedback();
- ctx->sub->current_so->xfb_state = XFB_STATE_PAUSED;
+ sub_ctx->current_so->xfb_state = XFB_STATE_PAUSED;
}
}
return 0;
@@ -4741,56 +4844,58 @@ void vrend_launch_grid(struct vrend_context *ctx,
if (!has_feature(feat_compute_shader))
return;
- if (ctx->sub->cs_shader_dirty) {
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
+ if (sub_ctx->cs_shader_dirty) {
struct vrend_linked_shader_program *prog;
bool cs_dirty;
- ctx->sub->cs_shader_dirty = false;
+ sub_ctx->cs_shader_dirty = false;
- if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]) {
+ if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return;
}
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
- if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current) {
+ vrend_shader_select(sub_ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
+ if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current) {
vrend_printf( "failure to compile shader variants: %s\n", ctx->debug_name);
return;
}
- if (ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_COMPUTE]) {
- prog = lookup_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id);
+ if (sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)sub_ctx->prog_ids[PIPE_SHADER_COMPUTE]) {
+ prog = lookup_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id);
if (!prog) {
- prog = add_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current);
+ prog = add_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current);
if (!prog)
return;
}
} else
- prog = ctx->sub->prog;
+ prog = sub_ctx->prog;
- if (ctx->sub->prog != prog) {
+ if (sub_ctx->prog != prog) {
new_program = true;
- ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = -1;
- ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id;
- ctx->sub->prog = prog;
- prog->ref_context = ctx->sub;
+ sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = 0;
+ sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id;
+ sub_ctx->prog = prog;
+ prog->ref_context = sub_ctx;
}
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
}
- if (!ctx->sub->prog) {
+ if (!sub_ctx->prog) {
vrend_printf("%s: Skipping compute shader execution due to missing shaders: %s\n",
__func__, ctx->debug_name);
return;
}
- vrend_use_program(ctx, ctx->sub->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog->id);
- vrend_draw_bind_ubo_shader(ctx, PIPE_SHADER_COMPUTE, 0);
- vrend_draw_bind_const_shader(ctx, PIPE_SHADER_COMPUTE, new_program);
- vrend_draw_bind_samplers_shader(ctx, PIPE_SHADER_COMPUTE, 0);
- vrend_draw_bind_images_shader(ctx, PIPE_SHADER_COMPUTE);
- vrend_draw_bind_ssbo_shader(ctx, PIPE_SHADER_COMPUTE);
- vrend_draw_bind_abo_shader(ctx);
+ vrend_draw_bind_ubo_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
+ vrend_draw_bind_const_shader(sub_ctx, PIPE_SHADER_COMPUTE, new_program);
+ vrend_draw_bind_samplers_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
+ vrend_draw_bind_images_shader(sub_ctx, PIPE_SHADER_COMPUTE);
+ vrend_draw_bind_ssbo_shader(sub_ctx, PIPE_SHADER_COMPUTE);
+ vrend_draw_bind_abo_shader(sub_ctx);
if (indirect_handle) {
indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
@@ -4943,15 +5048,15 @@ static inline bool is_const_blend(int blend_factor)
blend_factor == PIPE_BLENDFACTOR_INV_CONST_ALPHA);
}
-static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_state *state)
+static void vrend_hw_emit_blend(struct vrend_sub_context *sub_ctx, struct pipe_blend_state *state)
{
- if (state->logicop_enable != ctx->sub->hw_blend_state.logicop_enable) {
- ctx->sub->hw_blend_state.logicop_enable = state->logicop_enable;
+ if (state->logicop_enable != sub_ctx->hw_blend_state.logicop_enable) {
+ sub_ctx->hw_blend_state.logicop_enable = state->logicop_enable;
if (vrend_state.use_gles) {
if (can_emulate_logicop(state->logicop_func))
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
else
- report_gles_warn(ctx, GLES_WARN_LOGIC_OP);
+ report_gles_warn(sub_ctx->parent, GLES_WARN_LOGIC_OP);
} else if (state->logicop_enable) {
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(translate_logicop(state->logicop_func));
@@ -4969,7 +5074,7 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
if (state->rt[i].blend_enable) {
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, i);
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, i);
if (dual_src && !has_feature(feat_dual_src_blend)) {
vrend_printf( "dual src blend requested but not supported for rt %d\n", i);
continue;
@@ -4985,8 +5090,8 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
} else
glDisableIndexedEXT(GL_BLEND, i);
- if (state->rt[i].colormask != ctx->sub->hw_blend_state.rt[i].colormask) {
- ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
+ if (state->rt[i].colormask != sub_ctx->hw_blend_state.rt[i].colormask) {
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
@@ -4995,7 +5100,7 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
}
} else {
if (state->rt[0].blend_enable) {
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
if (dual_src && !has_feature(feat_dual_src_blend)) {
vrend_printf( "dual src blend requested but not supported for rt 0\n");
}
@@ -5010,19 +5115,19 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
else
glDisable(GL_BLEND);
- if (state->rt[0].colormask != ctx->sub->hw_blend_state.rt[0].colormask ||
- (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (state->rt[0].colormask != sub_ctx->hw_blend_state.rt[0].colormask ||
+ (sub_ctx->hw_blend_state.independent_blend_enable &&
!state->independent_blend_enable)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
- ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
- ctx->sub->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
+ sub_ctx->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
if (has_feature(feat_multisample)) {
if (state->alpha_to_coverage)
@@ -5049,22 +5154,22 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
b) patching colormask/blendcolor/blendfactors for A8/A16 format
emulation using GL_R8/GL_R16.
*/
-static void vrend_patch_blend_state(struct vrend_context *ctx)
+static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_blend_state new_state = ctx->sub->blend_state;
- struct pipe_blend_state *state = &ctx->sub->blend_state;
+ struct pipe_blend_state new_state = sub_ctx->blend_state;
+ struct pipe_blend_state *state = &sub_ctx->blend_state;
bool swizzle_blend_color = false;
- struct pipe_blend_color blend_color = ctx->sub->blend_color;
+ struct pipe_blend_color blend_color = sub_ctx->blend_color;
int i;
- if (ctx->sub->nr_cbufs == 0) {
- ctx->sub->blend_state_dirty = false;
+ if (sub_ctx->nr_cbufs == 0) {
+ sub_ctx->blend_state_dirty = false;
return;
}
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
- if (i < ctx->sub->nr_cbufs && ctx->sub->surf[i]) {
- if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format)) {
+ if (i < sub_ctx->nr_cbufs && sub_ctx->surf[i]) {
+ if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format)) {
if (state->rt[i].blend_enable) {
new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor);
@@ -5078,7 +5183,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
is_const_blend(new_state.rt[i].rgb_dst_factor)) {
swizzle_blend_color = true;
}
- } else if (!util_format_has_alpha(ctx->sub->surf[i]->format)) {
+ } else if (!util_format_has_alpha(sub_ctx->surf[i]->format)) {
if (!(is_dst_blend(state->rt[i].rgb_src_factor) ||
is_dst_blend(state->rt[i].rgb_dst_factor) ||
is_dst_blend(state->rt[i].alpha_src_factor) ||
@@ -5092,7 +5197,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
}
}
- vrend_hw_emit_blend(ctx, &new_state);
+ vrend_hw_emit_blend(sub_ctx, &new_state);
if (swizzle_blend_color) {
blend_color.color[0] = blend_color.color[3];
@@ -5106,7 +5211,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
blend_color.color[2],
blend_color.color[3]);
- ctx->sub->blend_state_dirty = false;
+ sub_ctx->blend_state_dirty = false;
}
void vrend_object_bind_blend(struct vrend_context *ctx,
@@ -5184,41 +5289,41 @@ void vrend_object_bind_dsa(struct vrend_context *ctx,
vrend_hw_emit_dsa(ctx);
}
-static void vrend_update_frontface_state(struct vrend_context *ctx)
+static void vrend_update_frontface_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
+ struct pipe_rasterizer_state *state = &sub_ctx->rs_state;
int front_ccw = state->front_ccw;
- front_ccw ^= (ctx->sub->inverted_fbo_content ? 0 : 1);
+ front_ccw ^= (sub_ctx->inverted_fbo_content ? 0 : 1);
if (front_ccw)
glFrontFace(GL_CCW);
else
glFrontFace(GL_CW);
}
-void vrend_update_stencil_state(struct vrend_context *ctx)
+void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_depth_stencil_alpha_state *state = ctx->sub->dsa;
+ struct pipe_depth_stencil_alpha_state *state = sub_ctx->dsa;
int i;
if (!state)
return;
if (!state->stencil[1].enabled) {
if (state->stencil[0].enabled) {
- vrend_stencil_test_enable(ctx, true);
+ vrend_stencil_test_enable(sub_ctx, true);
glStencilOp(translate_stencil_op(state->stencil[0].fail_op),
translate_stencil_op(state->stencil[0].zfail_op),
translate_stencil_op(state->stencil[0].zpass_op));
glStencilFunc(GL_NEVER + state->stencil[0].func,
- ctx->sub->stencil_refs[0],
+ sub_ctx->stencil_refs[0],
state->stencil[0].valuemask);
glStencilMask(state->stencil[0].writemask);
} else
- vrend_stencil_test_enable(ctx, false);
+ vrend_stencil_test_enable(sub_ctx, false);
} else {
- vrend_stencil_test_enable(ctx, true);
+ vrend_stencil_test_enable(sub_ctx, true);
for (i = 0; i < 2; i++) {
GLenum face = (i == 1) ? GL_BACK : GL_FRONT;
@@ -5228,12 +5333,12 @@ void vrend_update_stencil_state(struct vrend_context *ctx)
translate_stencil_op(state->stencil[i].zpass_op));
glStencilFuncSeparate(face, GL_NEVER + state->stencil[i].func,
- ctx->sub->stencil_refs[i],
+ sub_ctx->stencil_refs[i],
state->stencil[i].valuemask);
glStencilMaskSeparate(face, state->stencil[i].writemask);
}
}
- ctx->sub->stencil_state_dirty = false;
+ sub_ctx->stencil_state_dirty = false;
}
static inline GLenum translate_fill(uint32_t mode)
@@ -5595,7 +5700,7 @@ static bool get_swizzled_border_color(enum virgl_formats fmt,
return false;
}
-static void vrend_apply_sampler_state(struct vrend_context *ctx,
+static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id,
@@ -5603,7 +5708,7 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
struct vrend_sampler_view *tview)
{
struct vrend_texture *tex = (struct vrend_texture *)res;
- struct vrend_sampler_state *vstate = ctx->sub->sampler_state[shader_type][id];
+ struct vrend_sampler_state *vstate = sub_ctx->sampler_state[shader_type][id];
struct pipe_sampler_state *state = &vstate->base;
bool set_all = false;
GLenum target = tex->base.target;
@@ -5635,11 +5740,11 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
border_color = state->border_color;
border_color.ui[0] = border_color.ui[3];
border_color.ui[3] = 0;
- glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, border_color.ui);
+ apply_sampler_border_color(sampler, border_color.ui);
} else {
union pipe_color_union border_color;
if (get_swizzled_border_color(tview->format, &state->border_color, &border_color))
- glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, border_color.ui);
+ apply_sampler_border_color(sampler, border_color.ui);
}
glBindSampler(sampler_id, sampler);
@@ -5668,7 +5773,7 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
if (tex->state.lod_bias != state->lod_bias || set_all) {
if (vrend_state.use_gles) {
if (state->lod_bias)
- report_gles_warn(ctx, GLES_WARN_LOD_BIAS);
+ report_gles_warn(sub_ctx->parent, GLES_WARN_LOD_BIAS);
} else {
glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias);
}
@@ -5785,6 +5890,33 @@ static void vrend_free_fences(void)
free_fence_locked(fence);
}
+static void vrend_free_fences_for_context(struct vrend_context *ctx)
+{
+ struct vrend_fence *fence, *stor;
+
+ if (vrend_state.sync_thread) {
+ pipe_mutex_lock(vrend_state.fence_mutex);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ if (vrend_state.fence_waiting) {
+ /* mark the fence invalid as the sync thread is still waiting on it */
+ vrend_state.fence_waiting->ctx = NULL;
+ }
+ pipe_mutex_unlock(vrend_state.fence_mutex);
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ }
+}
+
static bool do_wait(struct vrend_fence *fence, bool can_block)
{
bool done = false;
@@ -5816,6 +5948,7 @@ static void wait_sync(struct vrend_fence *fence)
pipe_mutex_lock(vrend_state.fence_mutex);
list_addtail(&fence->fences, &vrend_state.fence_list);
+ vrend_state.fence_waiting = NULL;
pipe_mutex_unlock(vrend_state.fence_mutex);
if (write_eventfd(vrend_state.eventfd, 1)) {
@@ -5843,6 +5976,7 @@ static int thread_sync(UNUSED void *arg)
if (vrend_state.stop_sync_thread)
break;
list_del(&fence->fences);
+ vrend_state.fence_waiting = fence;
pipe_mutex_unlock(vrend_state.fence_mutex);
wait_sync(fence);
pipe_mutex_lock(vrend_state.fence_mutex);
@@ -5977,6 +6111,8 @@ static bool use_integer() {
return true;
const char * a = (const char *) glGetString(GL_VENDOR);
+ if (!a)
+ return false;
if (strcmp(a, "ARM") == 0)
return true;
return false;
@@ -6203,6 +6339,7 @@ void vrend_destroy_context(struct vrend_context *ctx)
bool switch_0 = (ctx == vrend_state.current_ctx);
struct vrend_context *cur = vrend_state.current_ctx;
struct vrend_sub_context *sub, *tmp;
+ struct vrend_untyped_resource *untyped_res, *untyped_res_tmp;
if (switch_0) {
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
@@ -6234,6 +6371,10 @@ void vrend_destroy_context(struct vrend_context *ctx)
if(ctx->ctx_id)
vrend_renderer_force_ctx_0();
+ vrend_free_fences_for_context(ctx);
+
+ LIST_FOR_EACH_ENTRY_SAFE(untyped_res, untyped_res_tmp, &ctx->untyped_resources, head)
+ free(untyped_res);
vrend_ctx_resource_fini_table(ctx->res_hash);
FREE(ctx);
@@ -6265,6 +6406,7 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
list_inithead(&grctx->active_nontimer_query_list);
grctx->res_hash = vrend_ctx_resource_init_table();
+ list_inithead(&grctx->untyped_resources);
grctx->shader_cfg.use_gles = vrend_state.use_gles;
grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
@@ -6282,10 +6424,13 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
vrender_get_glsl_version(&grctx->shader_cfg.glsl_version);
+ if (!grctx->ctx_id)
+ grctx->fence_retire = vrend_clicbs->ctx0_fence_retire;
+
return grctx;
}
-static int check_resource_valid(struct vrend_renderer_resource_create_args *args,
+static int check_resource_valid(const struct vrend_renderer_resource_create_args *args,
char errmsg[256])
{
/* limit the target */
@@ -6320,6 +6465,10 @@ static int check_resource_valid(struct vrend_renderer_resource_create_args *args
snprintf(errmsg, 256, "Multisample textures don't support mipmaps");
return -1;
}
+ if (!format_can_texture_storage && vrend_state.use_gles) {
+ snprintf(errmsg, 256, "Unsupported multisample texture format %d", args->format);
+ return -1;
+ }
}
if (args->last_level > 0) {
@@ -6386,8 +6535,8 @@ static int check_resource_valid(struct vrend_renderer_resource_create_args *args
}
}
- if (format_can_texture_storage && !args->width) {
- snprintf(errmsg, 256, "Texture storage texture width must be >0");
+ if (args->target != PIPE_BUFFER && !args->width) {
+ snprintf(errmsg, 256, "Texture width must be >0");
return -1;
}
@@ -6589,8 +6738,67 @@ static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint3
glBindBufferARB(gr->target, 0);
}
+static int
+vrend_resource_alloc_buffer(struct vrend_resource *gr, uint32_t flags)
+{
+ const uint32_t bind = gr->base.bind;
+ const uint32_t size = gr->base.width0;
+
+ if (bind == VIRGL_BIND_CUSTOM) {
+ /* use iovec directly when attached */
+ gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
+ gr->ptr = malloc(size);
+ if (!gr->ptr)
+ return -ENOMEM;
+ } else if (bind == VIRGL_BIND_STAGING) {
+ /* staging buffers only use guest memory -- nothing to do. */
+ } else if (bind == VIRGL_BIND_INDEX_BUFFER) {
+ gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_STREAM_OUTPUT) {
+ gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_VERTEX_BUFFER) {
+ gr->target = GL_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_CONSTANT_BUFFER) {
+ gr->target = GL_UNIFORM_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_QUERY_BUFFER) {
+ gr->target = GL_QUERY_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_COMMAND_ARGS) {
+ gr->target = GL_DRAW_INDIRECT_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == 0 || bind == VIRGL_BIND_SHADER_BUFFER) {
+ gr->target = GL_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind & VIRGL_BIND_SAMPLER_VIEW) {
+ /*
+ * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
+ * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
+ */
+#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
+#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
+#endif
+
+ /* need to check GL version here */
+ if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
+ gr->target = GL_TEXTURE_BUFFER;
+ } else {
+ gr->target = GL_PIXEL_PACK_BUFFER_ARB;
+ }
+ vrend_create_buffer(gr, size, flags);
+ } else {
+ vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, bind);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline void
-vrend_renderer_resource_copy_args(struct vrend_renderer_resource_create_args *args,
+vrend_renderer_resource_copy_args(const struct vrend_renderer_resource_create_args *args,
struct vrend_resource *gr)
{
assert(gr);
@@ -6647,7 +6855,7 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
if (!virgl_gbm_gpu_import_required(gr->base.bind))
return;
- gr->egl_image = virgl_egl_image_from_dmabuf(egl, bo);
+ gr->egl_image = virgl_egl_image_from_gbm_bo(egl, bo);
if (!gr->egl_image) {
gr->gbm_bo = NULL;
gbm_bo_destroy(bo);
@@ -6661,19 +6869,12 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
#endif
}
-static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
- void *image_oes)
+static enum virgl_formats vrend_resource_fixup_emulated_bgra(struct vrend_resource *gr,
+ bool imported)
{
- uint level;
- GLenum internalformat, glformat, gltype;
- enum virgl_formats format = gr->base.format;
- struct vrend_texture *gt = (struct vrend_texture *)gr;
- struct pipe_resource *pr = &gr->base;
-
- if (pr->width0 == 0)
- return EINVAL;
-
- bool format_can_texture_storage = has_feature(feat_texture_storage) &&
+ const struct pipe_resource *pr = &gr->base;
+ const enum virgl_formats format = pr->format;
+ const bool format_can_texture_storage = has_feature(feat_texture_storage) &&
(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE);
/* On GLES there is no support for glTexImage*DMultisample and
@@ -6685,7 +6886,7 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
gr->base.bind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
}
- if (image_oes && !has_feature(feat_egl_image_storage))
+ if (imported && !has_feature(feat_egl_image_storage))
gr->base.bind &= ~VIRGL_BIND_PREFER_EMULATED_BGRA;
#ifdef ENABLE_MINIGBM_ALLOCATION
@@ -6694,8 +6895,19 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
gr->base.bind &= ~VIRGL_BIND_PREFER_EMULATED_BGRA;
#endif
- format = vrend_format_replace_emulated(gr->base.bind, gr->base.format);
- format_can_texture_storage = has_feature(feat_texture_storage) &&
+ return vrend_format_replace_emulated(gr->base.bind, format);
+}
+
+static int vrend_resource_alloc_texture(struct vrend_resource *gr,
+ enum virgl_formats format,
+ void *image_oes)
+{
+ uint level;
+ GLenum internalformat, glformat, gltype;
+ struct vrend_texture *gt = (struct vrend_texture *)gr;
+ struct pipe_resource *pr = &gr->base;
+
+ const bool format_can_texture_storage = has_feature(feat_texture_storage) &&
(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE);
if (format_can_texture_storage)
@@ -6855,21 +7067,21 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
!vrend_format_can_texture_view(gr->base.format)) {
for (int i = 0; i < gbm_bo_get_plane_count(gr->gbm_bo); i++) {
gr->aux_plane_egl_image[i] =
- virgl_egl_aux_plane_image_from_dmabuf(egl, gr->gbm_bo, i);
+ virgl_egl_aux_plane_image_from_gbm_bo(egl, gr->gbm_bo, i);
}
}
#endif
}
gt->state.max_lod = -1;
- gt->cur_swizzle_r = gt->cur_swizzle_g = gt->cur_swizzle_b = gt->cur_swizzle_a = -1;
+ gt->cur_swizzle[0] = gt->cur_swizzle[1] = gt->cur_swizzle[2] = gt->cur_swizzle[3] = -1;
gt->cur_base = -1;
gt->cur_max = 10000;
return 0;
}
-struct pipe_resource *
-vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, void *image_oes)
+static struct vrend_resource *
+vrend_resource_create(const struct vrend_renderer_resource_create_args *args)
{
struct vrend_resource *gr;
int ret;
@@ -6893,65 +7105,31 @@ vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args,
pipe_reference_init(&gr->base.reference, 1);
- if (args->target == PIPE_BUFFER) {
- if (args->bind == VIRGL_BIND_CUSTOM) {
- /* use iovec directly when attached */
- gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
- gr->ptr = malloc(args->width);
- if (!gr->ptr) {
- FREE(gr);
- return NULL;
- }
- } else if (args->bind == VIRGL_BIND_STAGING) {
- /* staging buffers only use guest memory -- nothing to do. */
- } else if (args->bind == VIRGL_BIND_INDEX_BUFFER) {
- gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_STREAM_OUTPUT) {
- gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_VERTEX_BUFFER) {
- gr->target = GL_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_CONSTANT_BUFFER) {
- gr->target = GL_UNIFORM_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_QUERY_BUFFER) {
- gr->target = GL_QUERY_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_COMMAND_ARGS) {
- gr->target = GL_DRAW_INDIRECT_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == 0 || args->bind == VIRGL_BIND_SHADER_BUFFER) {
- gr->target = GL_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind & VIRGL_BIND_SAMPLER_VIEW) {
- /*
- * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
- * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
- */
-#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
-#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
-#endif
+ return gr;
+}
- /* need to check GL version here */
- if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
- gr->target = GL_TEXTURE_BUFFER;
- } else {
- gr->target = GL_PIXEL_PACK_BUFFER_ARB;
- }
- vrend_create_buffer(gr, args->width, args->flags);
- } else {
- vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, args->bind);
- FREE(gr);
- return NULL;
- }
+struct pipe_resource *
+vrend_renderer_resource_create(const struct vrend_renderer_resource_create_args *args,
+ void *image_oes)
+{
+ struct vrend_resource *gr;
+ int ret;
+
+ gr = vrend_resource_create(args);
+ if (!gr)
+ return NULL;
+
+ if (args->target == PIPE_BUFFER) {
+ ret = vrend_resource_alloc_buffer(gr, args->flags);
} else {
- int r = vrend_renderer_resource_allocate_texture(gr, image_oes);
- if (r) {
- FREE(gr);
- return NULL;
- }
+ const enum virgl_formats format =
+ vrend_resource_fixup_emulated_bgra(gr, image_oes);
+ ret = vrend_resource_alloc_texture(gr, format, image_oes);
+ }
+
+ if (ret) {
+ FREE(gr);
+ return NULL;
}
return &gr->base;
@@ -6959,9 +7137,6 @@ vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args,
void vrend_renderer_resource_destroy(struct vrend_resource *res)
{
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
if (has_bit(res->storage_bits, VREND_STORAGE_GL_TEXTURE)) {
glDeleteTextures(1, &res->id);
} else if (has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER)) {
@@ -6976,7 +7151,7 @@ void vrend_renderer_resource_destroy(struct vrend_resource *res)
glDeleteMemoryObjectsEXT(1, &res->memobj);
}
-#ifdef ENABLE_MINIGBM_ALLOCATION
+#if HAVE_EPOXY_EGL_H
if (res->egl_image) {
virgl_egl_image_destroy(egl, res->egl_image);
for (unsigned i = 0; i < ARRAY_SIZE(res->aux_plane_egl_image); i++) {
@@ -6985,6 +7160,8 @@ void vrend_renderer_resource_destroy(struct vrend_resource *res)
}
}
}
+#endif
+#ifdef ENABLE_MINIGBM_ALLOCATION
if (res->gbm_bo)
gbm_bo_destroy(res->gbm_bo);
#endif
@@ -7274,8 +7451,8 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
{
void *data;
- if (is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) ||
- (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY) && res->iov)) {
+ if ((is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) ||
+ has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) && res->iov) {
return vrend_copy_iovec(iov, num_iovs, info->offset,
res->iov, res->num_iovs, info->box->x,
info->box->width, res->ptr);
@@ -7321,7 +7498,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
uint32_t layer_stride = info->layer_stride;
if (ctx)
- vrend_use_program(ctx, 0);
+ vrend_use_program(ctx->sub, 0);
else
glUseProgram(0);
@@ -7393,21 +7570,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if ((!vrend_state.use_core_profile) && (res->y_0_top)) {
GLuint buffers;
+ GLuint fb_id;
- if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level) {
- GLuint fb_id;
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
- vrend_fb_bind_texture(res, 0, info->level, 0);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = info->level;
- } else {
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
- }
+ glGenFramebuffers(1, &fb_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
+ vrend_fb_bind_texture(res, 0, info->level, 0);
buffers = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, &buffers);
@@ -7415,7 +7582,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if (ctx) {
vrend_depth_test_enable(ctx, false);
vrend_alpha_test_enable(ctx, false);
- vrend_stencil_test_enable(ctx, false);
+ vrend_stencil_test_enable(ctx->sub, false);
} else {
glDisable(GL_DEPTH_TEST);
glDisable(GL_ALPHA_TEST);
@@ -7425,6 +7592,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
glWindowPos2i(info->box->x, res->y_0_top ? (int)res->base.height0 - info->box->y : info->box->y);
glDrawPixels(info->box->width, info->box->height, glformat, gltype,
data);
+ glDeleteFramebuffers(1, &fb_id);
} else {
uint32_t comp_size;
GLint old_tex = 0;
@@ -7624,17 +7792,28 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
return 0;
}
-static void do_readpixels(GLint x, GLint y,
+static void do_readpixels(struct vrend_resource *res,
+ int idx, uint32_t level, uint32_t layer,
+ GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
GLsizei bufSize, void *data)
{
+ GLuint fb_id;
+
+ glGenFramebuffers(1, &fb_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
+
+ vrend_fb_bind_texture(res, idx, level, layer);
+
if (has_feature(feat_arb_robustness))
glReadnPixelsARB(x, y, width, height, format, type, bufSize, data);
else if (has_feature(feat_gles_khr_robustness))
glReadnPixelsKHR(x, y, width, height, format, type, bufSize, data);
else
glReadPixels(x, y, width, height, format, type, data);
+
+ glDeleteFramebuffers(1, &fb_id);
}
static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
@@ -7644,7 +7823,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
{
char *myptr = (char*)iov[0].iov_base + info->offset;
int need_temp = 0;
- GLuint fb_id;
char *data;
bool actually_invert, separate_invert = false;
GLenum format, type;
@@ -7657,7 +7835,7 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
GLint old_fbo;
if (ctx)
- vrend_use_program(ctx, 0);
+ vrend_use_program(ctx->sub, 0);
else
glUseProgram(0);
@@ -7705,22 +7883,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &old_fbo);
- if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level ||
- (int)res->readback_fb_z != info->box->z) {
-
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
-
- vrend_fb_bind_texture(res, 0, info->level, info->box->z);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = info->level;
- res->readback_fb_z = info->box->z;
- } else
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
if (actually_invert)
y1 = h - info->box->y - info->box->height;
else
@@ -7728,8 +7890,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
if (has_feature(feat_mesa_invert) && actually_invert)
glPixelStorei(GL_PACK_INVERT_MESA, 1);
- if (!vrend_format_is_ds(res->base.format))
- glReadBuffer(GL_COLOR_ATTACHMENT0);
if (!need_temp && row_stride)
glPixelStorei(GL_PACK_ROW_LENGTH, row_stride);
@@ -7787,7 +7947,8 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
}
}
- do_readpixels(info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data);
+ do_readpixels(res, 0, info->level, info->box->z, info->box->x, y1,
+ info->box->width, info->box->height, format, type, send_size, data);
if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) {
if (!vrend_state.use_core_profile)
@@ -8812,7 +8973,8 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
args.array_size = src_res->base.array_size;
intermediate_copy = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
vrend_renderer_resource_copy_args(&args, intermediate_copy);
- MAYBE_UNUSED int r = vrend_renderer_resource_allocate_texture(intermediate_copy, NULL);
+ /* this is PIPE_MASK_ZS and bgra fixup is not needed */
+ MAYBE_UNUSED int r = vrend_resource_alloc_texture(intermediate_copy, args.format, NULL);
assert(!r);
glGenFramebuffers(1, &intermediate_fbo);
@@ -9003,16 +9165,32 @@ void vrend_renderer_blit(struct vrend_context *ctx,
vrend_pause_render_condition(ctx, false);
}
-int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
+void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
+ vrend_context_fence_retire retire,
+ void *retire_data)
+{
+ assert(ctx->ctx_id);
+ ctx->fence_retire = retire;
+ ctx->fence_retire_data = retire_data;
+}
+
+int vrend_renderer_create_fence(struct vrend_context *ctx,
+ uint32_t flags,
+ void *fence_cookie)
{
struct vrend_fence *fence;
+ if (!ctx)
+ return EINVAL;
+
fence = malloc(sizeof(struct vrend_fence));
if (!fence)
return ENOMEM;
- fence->ctx_id = ctx_id;
- fence->fence_id = client_fence_id;
+ fence->ctx = ctx;
+ fence->flags = flags;
+ fence->fence_cookie = fence_cookie;
+
#ifdef HAVE_EPOXY_EGL_H
if (vrend_state.use_egl_fence) {
fence->eglsyncobj = virgl_egl_fence_create(egl);
@@ -9043,18 +9221,51 @@ int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
static void vrend_renderer_check_queries(void);
+static bool need_fence_retire_signal_locked(struct vrend_fence *fence)
+{
+ struct vrend_fence *next;
+
+ /* last fence */
+ if (fence->fences.next == &vrend_state.fence_list)
+ return true;
+
+ /* next fence belongs to a different context */
+ next = LIST_ENTRY(struct vrend_fence, fence->fences.next, fences);
+ if (next->ctx != fence->ctx)
+ return true;
+
+ /* not mergeable */
+ if (!(fence->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
+ return true;
+
+ return false;
+}
+
void vrend_renderer_check_fences(void)
{
+ struct list_head retired_fences;
struct vrend_fence *fence, *stor;
- uint32_t latest_id = 0;
+
+ list_inithead(&retired_fences);
if (vrend_state.sync_thread) {
flush_eventfd(vrend_state.eventfd);
pipe_mutex_lock(vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
- if (fence->fence_id > latest_id)
- latest_id = fence->fence_id;
- free_fence_locked(fence);
+ /* vrend_free_fences_for_context might have marked the fence invalid
+ * by setting fence->ctx to NULL
+ */
+ if (!fence->ctx) {
+ free_fence_locked(fence);
+ continue;
+ }
+
+ if (need_fence_retire_signal_locked(fence)) {
+ list_del(&fence->fences);
+ list_addtail(&fence->fences, &retired_fences);
+ } else {
+ free_fence_locked(fence);
+ }
}
pipe_mutex_unlock(vrend_state.fence_mutex);
} else {
@@ -9062,8 +9273,12 @@ void vrend_renderer_check_fences(void)
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
if (do_wait(fence, /* can_block */ false)) {
- latest_id = fence->fence_id;
- free_fence_locked(fence);
+ if (need_fence_retire_signal_locked(fence)) {
+ list_del(&fence->fences);
+ list_addtail(&fence->fences, &retired_fences);
+ } else {
+ free_fence_locked(fence);
+ }
} else {
/* don't bother checking any subsequent ones */
break;
@@ -9071,12 +9286,17 @@ void vrend_renderer_check_fences(void)
}
}
- if (latest_id == 0)
+ if (LIST_IS_EMPTY(&retired_fences))
return;
vrend_renderer_check_queries();
- vrend_clicbs->write_fence(latest_id);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) {
+ struct vrend_context *ctx = fence->ctx;
+ ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data);
+
+ free_fence_locked(fence);
+ }
}
static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result)
@@ -9387,7 +9607,11 @@ void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
if (buf) memcpy(buf, &value, size); \
glUnmapBuffer(GL_QUERY_BUFFER);
-#define BUFFER_OFFSET(i) ((void *)((char *)NULL + i))
+static inline void *buffer_offset(intptr_t i)
+{
+ return (void *)i;
+}
+
void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle,
uint32_t qbo_handle,
uint32_t wait, uint32_t result_type, uint32_t offset,
@@ -9422,16 +9646,16 @@ void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle,
glBindBuffer(GL_QUERY_BUFFER, res->id);
switch ((enum pipe_query_value_type)result_type) {
case PIPE_QUERY_TYPE_I32:
- glGetQueryObjectiv(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectiv(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_U32:
- glGetQueryObjectuiv(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectuiv(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_I64:
- glGetQueryObjecti64v(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjecti64v(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_U64:
- glGetQueryObjectui64v(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectui64v(q->id, qtype, buffer_offset(offset));
break;
}
} else {
@@ -9877,6 +10101,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
{
GLint max;
GLfloat range[2];
+ uint32_t video_memory;
/* Count this up when you add a feature flag that is used to set a CAP in
* the guest that was set unconditionally before. Then check that flag and
@@ -9990,7 +10215,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
else
caps->v2.max_vertex_attrib_stride = 2048;
- if (has_feature(feat_compute_shader)) {
+ if (has_feature(feat_compute_shader) && (vrend_state.use_gles || gl_ver >= 33)) {
glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, (GLint*)&caps->v2.max_compute_work_group_invocations);
glGetIntegerv(GL_MAX_COMPUTE_SHARED_MEMORY_SIZE, (GLint*)&caps->v2.max_compute_shared_memory_size);
glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0, (GLint*)&caps->v2.max_compute_grid_size[0]);
@@ -10177,6 +10402,22 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (has_feature(feat_blend_equation_advanced))
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_BLEND_EQUATION;
+
+#ifdef HAVE_EPOXY_EGL_H
+ if (egl)
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_UNTYPED_RESOURCE;
+#endif
+
+ video_memory = vrend_winsys_query_video_memory();
+ if (video_memory) {
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_VIDEO_MEMORY;
+ caps->v2.max_video_memory = video_memory;
+ }
+
+ if (has_feature(feat_ati_meminfo) || has_feature(feat_nvx_gpu_memory_info)) {
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_MEMINFO;
+ }
+
}
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
@@ -10279,26 +10520,7 @@ void *vrend_renderer_get_cursor_contents(struct pipe_resource *pres,
glBindTexture(res->target, res->id);
glGetnTexImageARB(res->target, 0, format, type, size, data);
} else if (vrend_state.use_gles) {
- GLuint fb_id;
-
- if (res->readback_fb_id == 0 || res->readback_fb_level != 0 || res->readback_fb_z != 0) {
-
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
-
- vrend_fb_bind_texture(res, 0, 0, 0);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = 0;
- res->readback_fb_z = 0;
- } else {
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
- }
-
- do_readpixels(0, 0, *width, *height, format, type, size, data);
+ do_readpixels(res, 0, 0, 0, 0, 0, *width, *height, format, type, size, data);
} else {
glBindTexture(res->target, res->id);
glGetTexImage(res->target, 0, format, type, data);
@@ -10355,17 +10577,52 @@ void vrend_renderer_get_rect(struct pipe_resource *pres,
}
void vrend_renderer_attach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id,
- struct pipe_resource *pres)
-{
- struct vrend_resource *res = (struct vrend_resource *)pres;
- vrend_ctx_resource_insert(ctx->res_hash, res_id, res);
+ struct virgl_resource *res)
+{
+ if (!res->pipe_resource) {
+ /* move the last untyped resource from cache to list */
+ if (unlikely(ctx->untyped_resource_cache)) {
+ struct virgl_resource *last = ctx->untyped_resource_cache;
+ struct vrend_untyped_resource *wrapper = malloc(sizeof(*wrapper));
+ if (wrapper) {
+ wrapper->resource = last;
+ list_add(&wrapper->head, &ctx->untyped_resources);
+ } else {
+ vrend_printf("dropping attached resource %d due to OOM\n", last->res_id);
+ }
+ }
+
+ ctx->untyped_resource_cache = res;
+ /* defer to vrend_renderer_pipe_resource_set_type */
+ return;
+ }
+
+ vrend_ctx_resource_insert(ctx->res_hash,
+ res->res_id,
+ (struct vrend_resource *)res->pipe_resource);
}
void vrend_renderer_detach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id)
+ struct virgl_resource *res)
{
- vrend_ctx_resource_remove(ctx->res_hash, res_id);
+ if (!res->pipe_resource) {
+ if (ctx->untyped_resource_cache == res) {
+ ctx->untyped_resource_cache = NULL;
+ } else {
+ struct vrend_untyped_resource *iter;
+ LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) {
+ if (iter->resource == res) {
+ list_del(&iter->head);
+ free(iter);
+ break;
+ }
+ }
+ }
+
+ return;
+ }
+
+ vrend_ctx_resource_remove(ctx->res_hash, res->res_id);
}
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
@@ -10438,6 +10695,7 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
+ sub->parent = ctx;
vrend_clicbs->make_current(sub->gl_context);
/* enable if vrend_renderer_init function has done it as well */
@@ -10463,7 +10721,9 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
glBindFramebuffer(GL_FRAMEBUFFER, sub->fb_id);
glGenFramebuffers(2, sub->blit_fb_ids);
- list_inithead(&sub->programs);
+ for (int i = 0; i < VREND_PROGRAM_NQUEUES; ++i)
+ list_inithead(&sub->gl_programs[i]);
+ list_inithead(&sub->cs_programs);
list_inithead(&sub->streamout_list);
sub->object_hash = vrend_object_init_ctx_table();
@@ -10571,6 +10831,7 @@ int vrend_renderer_export_query(struct pipe_resource *pres,
*/
export_query->out_num_fds = 0;
export_query->out_fourcc = 0;
+ export_query->out_modifier = DRM_FORMAT_MOD_INVALID;
if (export_query->in_export_fds)
return -EINVAL;
@@ -10578,7 +10839,7 @@ int vrend_renderer_export_query(struct pipe_resource *pres,
}
int vrend_renderer_pipe_resource_create(struct vrend_context *ctx, uint32_t blob_id,
- struct vrend_renderer_resource_create_args *args)
+ const struct vrend_renderer_resource_create_args *args)
{
struct vrend_resource *res;
res = (struct vrend_resource *)vrend_renderer_resource_create(args, NULL);
@@ -10608,16 +10869,128 @@ struct pipe_resource *vrend_get_blob_pipe(struct vrend_context *ctx, uint64_t bl
return NULL;
}
-int vrend_renderer_resource_get_map_info(struct pipe_resource *pres, uint32_t *map_info)
+int
+vrend_renderer_pipe_resource_set_type(struct vrend_context *ctx,
+ uint32_t res_id,
+ const struct vrend_renderer_resource_set_type_args *args)
{
- struct vrend_resource *res = (struct vrend_resource *)pres;
- if (!res->map_info)
- return -EINVAL;
+ struct virgl_resource *res = NULL;
+
+ /* look up the untyped resource */
+ if (ctx->untyped_resource_cache &&
+ ctx->untyped_resource_cache->res_id == res_id) {
+ res = ctx->untyped_resource_cache;
+ ctx->untyped_resource_cache = NULL;
+ } else {
+ /* cache miss */
+ struct vrend_untyped_resource *iter;
+ LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) {
+ if (iter->resource->res_id == res_id) {
+ res = iter->resource;
+ list_del(&iter->head);
+ free(iter);
+ break;
+ }
+ }
+ }
+
+ /* either a bad res_id or the resource is already typed */
+ if (!res) {
+ if (vrend_renderer_ctx_res_lookup(ctx, res_id))
+ return 0;
+
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_id);
+ return EINVAL;
+ }
+
+ /* resource is still untyped */
+ if (!res->pipe_resource) {
+#ifdef HAVE_EPOXY_EGL_H
+ const struct vrend_renderer_resource_create_args create_args = {
+ .target = PIPE_TEXTURE_2D,
+ .format = args->format,
+ .bind = args->bind,
+ .width = args->width,
+ .height = args->height,
+ .depth = 1,
+ .array_size = 1,
+ .last_level = 0,
+ .nr_samples = 0,
+ .flags = 0,
+ };
+ int plane_fds[VIRGL_GBM_MAX_PLANES];
+ struct vrend_resource *gr;
+ uint32_t virgl_format;
+ uint32_t drm_format;
+ int ret;
+
+ if (res->fd_type != VIRGL_RESOURCE_FD_DMABUF)
+ return EINVAL;
+
+ for (uint32_t i = 0; i < args->plane_count; i++)
+ plane_fds[i] = res->fd;
+
+ gr = vrend_resource_create(&create_args);
+ if (!gr)
+ return ENOMEM;
+
+ virgl_format = vrend_resource_fixup_emulated_bgra(gr, true);
+ drm_format = 0;
+ if (virgl_gbm_convert_format(&virgl_format, &drm_format)) {
+ vrend_printf("%s: unsupported format %d\n", __func__, virgl_format);
+ FREE(gr);
+ return EINVAL;
+ }
+
+ gr->egl_image = virgl_egl_image_from_dmabuf(egl,
+ args->width,
+ args->height,
+ drm_format,
+ args->modifier,
+ args->plane_count,
+ plane_fds,
+ args->plane_strides,
+ args->plane_offsets);
+ if (!gr->egl_image) {
+ vrend_printf("%s: failed to create egl image\n", __func__);
+ FREE(gr);
+ return EINVAL;
+ }
+
+ gr->storage_bits |= VREND_STORAGE_EGL_IMAGE;
+
+ ret = vrend_resource_alloc_texture(gr, virgl_format, gr->egl_image);
+ if (ret) {
+ virgl_egl_image_destroy(egl, gr->egl_image);
+ FREE(gr);
+ return ret;
+ }
+
+ /* "promote" the fd to pipe_resource */
+ close(res->fd);
+ res->fd = -1;
+ res->fd_type = VIRGL_RESOURCE_FD_INVALID;
+ res->pipe_resource = &gr->base;
+#else /* HAVE_EPOXY_EGL_H */
+ (void)args;
+ vrend_printf("%s: no EGL support \n", __func__);
+ return EINVAL;
+#endif /* HAVE_EPOXY_EGL_H */
+ }
+
+ vrend_ctx_resource_insert(ctx->res_hash,
+ res->res_id,
+ (struct vrend_resource *)res->pipe_resource);
- *map_info = res->map_info;
return 0;
}
+uint32_t vrend_renderer_resource_get_map_info(struct pipe_resource *pres)
+{
+ struct vrend_resource *res = (struct vrend_resource *)pres;
+ return res->map_info;
+}
+
int vrend_renderer_resource_map(struct pipe_resource *pres, void **map, uint64_t *out_size)
{
struct vrend_resource *res = (struct vrend_resource *)pres;
@@ -10646,7 +11019,41 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres)
return 0;
}
-int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
+int vrend_renderer_create_ctx0_fence(uint32_t fence_id)
+{
+ void *fence_cookie = (void *)(uintptr_t)fence_id;
+ return vrend_renderer_create_fence(vrend_state.ctx0,
+ VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_cookie);
+}
+
+static bool find_ctx0_fence_locked(struct list_head *fence_list,
+ void *fence_cookie,
+ bool *seen_first,
+ struct vrend_fence **fence)
+{
+ struct vrend_fence *iter;
+
+ LIST_FOR_EACH_ENTRY(iter, fence_list, fences) {
+ /* only consider ctx0 fences */
+ if (iter->ctx != vrend_state.ctx0)
+ continue;
+
+ if (iter->fence_cookie == fence_cookie) {
+ *fence = iter;
+ return true;
+ }
+
+ if (!*seen_first) {
+ if (fence_cookie < iter->fence_cookie)
+ return true;
+ *seen_first = true;
+ }
+ }
+
+ return false;
+}
+
+int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd) {
#ifdef HAVE_EPOXY_EGL_H
if (!vrend_state.use_egl_fence) {
return -EINVAL;
@@ -10655,45 +11062,61 @@ int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
if (vrend_state.sync_thread)
pipe_mutex_lock(vrend_state.fence_mutex);
+ void *fence_cookie = (void *)(uintptr_t)fence_id;
+ bool seen_first = false;
struct vrend_fence *fence = NULL;
- struct vrend_fence *iter;
- uint32_t min_fence_id = UINT_MAX;
-
- if (!LIST_IS_EMPTY(&vrend_state.fence_list)) {
- min_fence_id = LIST_ENTRY(struct vrend_fence, vrend_state.fence_list.next, fences)->fence_id;
- } else if (!LIST_IS_EMPTY(&vrend_state.fence_wait_list)) {
- min_fence_id =
- LIST_ENTRY(struct vrend_fence, vrend_state.fence_wait_list.next, fences)->fence_id;
- }
-
- if (fence_id < min_fence_id) {
- if (vrend_state.sync_thread)
- pipe_mutex_unlock(vrend_state.fence_mutex);
- return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
- }
-
- LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_list, fences) {
- if (iter->fence_id == fence_id) {
- fence = iter;
- break;
- }
- }
-
- if (!fence) {
- LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_wait_list, fences) {
- if (iter->fence_id == fence_id) {
- fence = iter;
- break;
- }
- }
+ bool found = find_ctx0_fence_locked(&vrend_state.fence_list,
+ fence_cookie,
+ &seen_first,
+ &fence);
+ if (!found) {
+ found = find_ctx0_fence_locked(&vrend_state.fence_wait_list,
+ fence_cookie,
+ &seen_first,
+ &fence);
+ /* consider signaled when no active ctx0 fence at all */
+ if (!found && !seen_first)
+ found = true;
}
if (vrend_state.sync_thread)
pipe_mutex_unlock(vrend_state.fence_mutex);
- if (fence && virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd)) {
- return 0;
+ if (found) {
+ if (fence)
+ return virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd) ? 0 : -EINVAL;
+ else
+ return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
}
#endif
return -EINVAL;
}
+
+void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle)
+{
+ struct vrend_resource *res;
+ struct virgl_memory_info *info;
+
+ res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
+
+ info = (struct virgl_memory_info *)res->iov->iov_base;
+
+ if (has_feature(feat_nvx_gpu_memory_info)) {
+ int i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &i);
+ info->total_device_memory = i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &i);
+ info->total_staging_memory = i - info->total_device_memory;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX, &i);
+ info->nr_device_memory_evictions = i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX, &i);
+ info->device_memory_evicted = i;
+ }
+
+ if (has_feature(feat_ati_meminfo)) {
+ int i[4];
+ glGetIntegerv(GL_VBO_FREE_MEMORY_ATI, i);
+ info->avail_device_memory = i[0];
+ info->avail_staging_memory = i[2];
+ }
+}