aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2021-03-07 00:06:56 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2021-03-07 00:06:56 +0000
commitbbc1ba7625a2adeb71ac4a8bc3359e1679200bf3 (patch)
tree1418aa452ff3c020803ac1248aedd190ff7228ec
parent55faa7931ff8b239badf8d83399e5a5bb412ad46 (diff)
parent6ff9940a7463feb3c3f4e4a21e6c84ae4961f9ec (diff)
downloadvirglrenderer-android12L-gsi.tar.gz
Change-Id: Ia803c43aeb55f6478d4a2377f4c7f94827e67a26
-rw-r--r--ci/.gitlab-ci.yml2
-rw-r--r--ci/build-container.sh88
-rw-r--r--ci/previous_results/es_host_softpipe/deqp_gl30/results.txt15
-rw-r--r--ci/previous_results/es_host_softpipe/deqp_gl31/results.txt15
-rw-r--r--ci/previous_results/es_host_softpipe/deqp_gl32/results.txt23
-rw-r--r--ci/previous_results/es_host_softpipe/deqp_gles2/results.txt42
-rw-r--r--ci/previous_results/es_host_softpipe/deqp_gles3/results.txt66
-rw-r--r--ci/previous_results/es_host_softpipe/deqp_gles31/results.txt3
-rw-r--r--ci/previous_results/es_host_softpipe/piglit_gles2/results.txt3
-rw-r--r--ci/previous_results/es_host_softpipe/piglit_gles3/results.txt3
-rw-r--r--ci/previous_results/gl_host_softpipe/deqp_gl30/results.txt15
-rw-r--r--ci/previous_results/gl_host_softpipe/deqp_gl31/results.txt15
-rw-r--r--ci/previous_results/gl_host_softpipe/deqp_gl32/results.txt23
-rw-r--r--ci/previous_results/gl_host_softpipe/deqp_gles2/results.txt42
-rw-r--r--ci/previous_results/gl_host_softpipe/deqp_gles3/results.txt66
-rw-r--r--ci/previous_results/gl_host_softpipe/deqp_gles31/results.txt7
-rw-r--r--ci/previous_results/gl_host_softpipe/piglit_gles2/results.txt3
-rw-r--r--ci/previous_results/gl_host_softpipe/piglit_gles3/results.txt3
-rwxr-xr-xci/run_tests.sh8
-rw-r--r--meson.build10
-rw-r--r--meson_options.txt2
-rw-r--r--perf-testing/Docker/Dockerfile299
-rwxr-xr-xperf-testing/Docker/init.sh18
-rwxr-xr-xperf-testing/Docker/merge_traces.py154
-rw-r--r--perf-testing/Docker/perfetto-guest.cfg49
-rw-r--r--perf-testing/Docker/perfetto-host.cfg55
-rwxr-xr-xperf-testing/Docker/run.sh236
-rwxr-xr-xperf-testing/Docker/run_perfetto_ui.sh13
-rw-r--r--perf-testing/Docker/run_traces.sh119
-rw-r--r--perf-testing/Docker/x86_64.config37
-rw-r--r--perf-testing/README.md71
-rwxr-xr-xperf-testing/build-dockerimage.sh18
-rwxr-xr-xperf-testing/perfetto-ui.sh25
-rwxr-xr-xperf-testing/run-trace-in-container.sh200
-rw-r--r--prebuilt-intermediates/src/u_format_table.c784
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_cache.c4
-rw-r--r--src/gallium/auxiliary/util/u_format.csv30
-rw-r--r--src/gallium/auxiliary/util/u_format.h14
-rw-r--r--src/meson.build4
-rw-r--r--src/virgl_context.h36
-rw-r--r--src/virgl_hw.h33
-rw-r--r--src/virgl_protocol.h25
-rw-r--r--src/virgl_resource.c18
-rw-r--r--src/virgl_resource.h18
-rw-r--r--src/virgl_util.c41
-rw-r--r--src/virgl_util.h41
-rw-r--r--src/virglrenderer.c164
-rw-r--r--src/virglrenderer.h23
-rw-r--r--src/virglrenderer_hw.h2
-rw-r--r--src/vrend_debug.c1
-rw-r--r--src/vrend_debug.h2
-rw-r--r--src/vrend_decode.c113
-rw-r--r--src/vrend_formats.c84
-rw-r--r--src/vrend_renderer.c2113
-rw-r--r--src/vrend_renderer.h54
-rw-r--r--src/vrend_winsys.c9
-rw-r--r--src/vrend_winsys.h6
-rw-r--r--src/vrend_winsys_egl.c176
-rw-r--r--src/vrend_winsys_egl.h14
-rw-r--r--src/vrend_winsys_gbm.c3
-rw-r--r--src/vrend_winsys_glx.c12
-rw-r--r--src/vrend_winsys_glx.h1
-rw-r--r--tests/meson.build1
-rw-r--r--tests/test_virgl_cmd.c2
-rw-r--r--tests/test_virgl_fence.c311
-rw-r--r--tests/test_virgl_init.c2
-rw-r--r--tests/test_virgl_resource.c2
-rw-r--r--tests/test_virgl_transfer.c2
-rw-r--r--vtest/vtest.h6
-rw-r--r--vtest/vtest_fuzzer.c3
-rw-r--r--vtest/vtest_protocol.h25
-rw-r--r--vtest/vtest_renderer.c164
-rw-r--r--vtest/vtest_server.c60
73 files changed, 5027 insertions, 1124 deletions
diff --git a/ci/.gitlab-ci.yml b/ci/.gitlab-ci.yml
index 401cc3a3..8721e155 100644
--- a/ci/.gitlab-ci.yml
+++ b/ci/.gitlab-ci.yml
@@ -1,5 +1,5 @@
variables:
- FDO_DISTRIBUTION_TAG: "2020-03-30"
+ FDO_DISTRIBUTION_TAG: "2021-02-04"
FDO_DISTRIBUTION_VERSION: buster
FDO_UPSTREAM_REPO: "virgl/virglrenderer"
TEST_IMAGE: "$CI_REGISTRY_IMAGE/debian/$FDO_DISTRIBUTION_VERSION:$FDO_DISTRIBUTION_TAG"
diff --git a/ci/build-container.sh b/ci/build-container.sh
index 489b0c2f..a15629df 100644
--- a/ci/build-container.sh
+++ b/ci/build-container.sh
@@ -13,7 +13,7 @@ export CC="gcc-8"
export CXX="g++-8"
export CFLAGS="-g3"
export CXXFLAGS="-g3"
-export GIT_DATE="2020-11-11"
+export GIT_DATE="2020-02-02"
export MESA_DEBUG=1
echo 'path-exclude=/usr/share/doc/*' > /etc/dpkg/dpkg.cfg.d/99-exclude-cruft
@@ -42,7 +42,6 @@ apt-get -y install --no-install-recommends \
golang-go \
kbd \
libcurl4-openssl-dev \
- libepoxy-dev \
libgbm-dev \
libnss-systemd \
libpng-dev \
@@ -80,15 +79,62 @@ apt-get -y install --no-install-recommends \
xterm \
xvfb \
zlib1g-dev
+
apt-get -y build-dep --no-install-recommends \
- libepoxy-dev \
libdrm \
mesa \
piglit \
- virglrenderer
-apt-get -y remove valgrind
+ check
+
+apt-get -y remove valgrind libdrm-dev
rm -rf /var/lib/apt/lists/*
+export KNOWN_GOOD_DRM=libdrm-2.4.104
+mkdir /drm
+pushd /drm
+git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/drm.git . && \
+ git checkout ${KNOWN_GOOD_DRM} && \
+ git log --oneline -n 1 && \
+ mkdir -p build && \
+ meson build/ && \
+ meson configure build/ -Dprefix=/usr/local -Dlibdir=lib && \
+ ninja -C build/ install >/dev/null && \
+ rm -rf /drm
+ [ "$?" = "0" ] || exit 1
+popd
+
+export KNOWN_GOOD_MESA=${KNOWN_GOOD_MESA:-30a393f4581079ced1ac05d6b74c7408fbe26f83}
+echo $KNOWN_GOOD_MESA
+export MESA_REPO=https://gitlab.freedesktop.org/mesa/mesa.git
+echo $MESA_REPO
+mkdir /mesa
+pushd /mesa
+git clone --shallow-since="$GIT_DATE" ${MESA_REPO} . && \
+ git checkout ${KNOWN_GOOD_MESA} && \
+ git log --oneline -n 1 && \
+ mkdir -p build && \
+ meson build/ && \
+ meson configure build/ -Dprefix=/usr/local -Dplatforms=drm,x11,wayland,surfaceless -Ddri-drivers=i965 -Dgallium-drivers=swrast,virgl,radeonsi -Dbuildtype=debugoptimized -Dllvm=true -Dglx=dri -Dgallium-vdpau=false -Dgallium-va=false -Dvulkan-drivers=[] -Dlibdir=lib && \
+ ninja -C build/ install >/dev/null && \
+ rm -rf /mesa
+ [ "$?" = "0" ] || exit 1
+popd
+
+export KNOWN_GOOD_EPOXY=${KNOWN_GOOD_EPOXY:-1.5.4}
+mkdir /epoxy
+pushd /epoxy
+git clone --shallow-since="$GIT_DATE" https://github.com/anholt/libepoxy.git . && \
+ git fetch --tags &&
+ git checkout ${KNOWN_GOOD_EPOXY} && \
+ git log --oneline -n 1 && \
+ mkdir -p build && \
+ meson build/ && \
+ meson configure build/ -Dprefix=/usr/local -Dlibdir=lib && \
+ ninja -C build/ install >/dev/null && \
+ rm -rf /epoxy
+ [ "$?" == "0" ] || exit 1
+popd
+
export BATTERY_VERSION=0.1.23
mkdir /battery
pushd /battery
@@ -96,6 +142,7 @@ wget "https://github.com/VoltLang/Battery/releases/download/v${BATTERY_VERSION}/
tar xzvf battery-${BATTERY_VERSION}-x86_64-linux.tar.gz && \
rm battery-${BATTERY_VERSION}-x86_64-linux.tar.gz && \
mv battery /usr/local/bin
+ [ "$?" = "0" ] || exit 1
popd
mkdir /volt
@@ -109,6 +156,7 @@ git clone --depth=1 https://github.com/VoltLang/Watt.git && \
battery build && \
cp dEQP/deqp /usr/local/bin && \
rm -rf /volt
+ [ "$?" = "0" ] || exit 1
popd
# To avoid this error:
@@ -128,6 +176,7 @@ git clone --shallow-since="$GIT_DATE" https://github.com/KhronosGroup/VK-GL-CTS.
make -j$(nproc) && \
find . -name CMakeFiles | xargs rm -rf && \
find . -name lib\*.a | xargs rm -rf
+ [ "$?" = "0" ] || exit 1
popd
export KNOWN_GOOD_PIGLIT=${KNOWN_GOOD_PIGLIT:-08a92f4094c927276a20f608d7b3c5de2a72e9e7}
@@ -141,34 +190,7 @@ git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/piglit
rm -rf /usr/local/lib/piglit/generated_tests/spec/arb_vertex_attrib_64bit && \
rm -rf /usr/local/lib/piglit/generated_tests/spec/glsl-4.20 && \
rm -rf /piglit
+ [ "$?" = "0" ] || exit 1
popd
-export KNOWN_GOOD_DRM=libdrm-2.4.103
-mkdir /drm
-pushd /drm
-git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/drm.git . && \
- git checkout ${KNOWN_GOOD_DRM} && \
- git log --oneline -n 1 && \
- mkdir -p build && \
- meson build/ && \
- meson configure build/ -Dprefix=/usr/local -Dlibdir=lib && \
- ninja -C build/ install >/dev/null && \
- rm -rf /drm
-popd
-
-export KNOWN_GOOD_MESA=${KNOWN_GOOD_MESA:-1c17223c02b68679d67a4e4a6be8b9b7a80fa2e9}
-echo $KNOWN_GOOD_MESA
-export MESA_REPO=https://gitlab.freedesktop.org/mesa/mesa.git
-echo $MESA_REPO
-mkdir /mesa
-pushd /mesa
-git clone --shallow-since="$GIT_DATE" ${MESA_REPO} . && \
- git checkout ${KNOWN_GOOD_MESA} && \
- git log --oneline -n 1 && \
- mkdir -p build && \
- meson build/ && \
- meson configure build/ -Dprefix=/usr/local -Dplatforms=drm,x11,wayland,surfaceless -Ddri-drivers=i965 -Dgallium-drivers=swrast,virgl,radeonsi -Dbuildtype=debugoptimized -Dllvm=true -Dglx=dri -Dgallium-vdpau=false -Dgallium-va=false -Dvulkan-drivers=[] -Dlibdir=lib && \
- ninja -C build/ install >/dev/null && \
- rm -rf /mesa
-popd
diff --git a/ci/previous_results/es_host_softpipe/deqp_gl30/results.txt b/ci/previous_results/es_host_softpipe/deqp_gl30/results.txt
index cf24d0f2..9464606b 100644
--- a/ci/previous_results/es_host_softpipe/deqp_gl30/results.txt
+++ b/ci/previous_results/es_host_softpipe/deqp_gl30/results.txt
@@ -1,6 +1,21 @@
KHR-GL30.clip_distance.coverage Pass
KHR-GL30.clip_distance.functional Fail
KHR-GL30.clip_distance.negative Pass
+KHR-GL30.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.samplercubearrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.samplercubeshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.samplercubeshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.samplercubearrayshadow_bias_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.samplercubearrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.samplercubearrayshadow_vertex NotSupported
KHR-GL30.glsl_noperspective.functionaltest Fail
KHR-GL30.info.extensions Pass
KHR-GL30.info.renderer Pass
diff --git a/ci/previous_results/es_host_softpipe/deqp_gl31/results.txt b/ci/previous_results/es_host_softpipe/deqp_gl31/results.txt
index 0ae1bceb..b0fee2ef 100644
--- a/ci/previous_results/es_host_softpipe/deqp_gl31/results.txt
+++ b/ci/previous_results/es_host_softpipe/deqp_gl31/results.txt
@@ -9,6 +9,21 @@ KHR-GL31.CommonBugs.CommonBug_SparseBuffersWithCopyOps Pass
KHR-GL31.clip_distance.coverage Pass
KHR-GL31.clip_distance.functional Fail
KHR-GL31.clip_distance.negative Pass
+KHR-GL31.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.samplercubearrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.samplercubeshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.samplercubeshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.samplercubearrayshadow_bias_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.samplercubearrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.samplercubearrayshadow_vertex NotSupported
KHR-GL31.glsl_noperspective.functionaltest Fail
KHR-GL31.info.extensions Pass
KHR-GL31.info.renderer Pass
diff --git a/ci/previous_results/es_host_softpipe/deqp_gl32/results.txt b/ci/previous_results/es_host_softpipe/deqp_gl32/results.txt
index 2847e9cb..4aeffd58 100644
--- a/ci/previous_results/es_host_softpipe/deqp_gl32/results.txt
+++ b/ci/previous_results/es_host_softpipe/deqp_gl32/results.txt
@@ -9,10 +9,25 @@ KHR-GL32.CommonBugs.CommonBug_SparseBuffersWithCopyOps Pass
KHR-GL32.clip_distance.coverage Pass
KHR-GL32.clip_distance.functional Fail
KHR-GL32.clip_distance.negative Pass
+KHR-GL32.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.samplercubearrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.samplercubeshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.samplercubeshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.samplercubearrayshadow_bias_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.samplercubearrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.samplercubearrayshadow_vertex NotSupported
KHR-GL32.glsl_noperspective.functionaltest Fail
-KHR-GL32.gpu_shader5_gl.float_encoding Fail
-KHR-GL32.gpu_shader5_gl.function_overloading Fail
-KHR-GL32.gpu_shader5_gl.implicit_conversions Fail
+KHR-GL32.gpu_shader5_gl.float_encoding NotSupported
+KHR-GL32.gpu_shader5_gl.function_overloading NotSupported
+KHR-GL32.gpu_shader5_gl.implicit_conversions NotSupported
KHR-GL32.info.extensions Pass
KHR-GL32.info.renderer Pass
KHR-GL32.info.render_target Pass
@@ -459,7 +474,7 @@ KHR-GL32.texture_repeat_mode.rgba32ui_49x23_2_clamp_to_edge Pass
KHR-GL32.texture_repeat_mode.rgba32ui_49x23_2_mirrored_repeat Pass
KHR-GL32.texture_repeat_mode.rgba32ui_49x23_2_repeat Pass
KHR-GL32.texture_size_promotion.functional InternalError
-KHR-GL32.transform_feedback.api_errors_test Fail
+KHR-GL32.transform_feedback.api_errors_test Pass
KHR-GL32.transform_feedback.capture_geometry_interleaved_test Fail
KHR-GL32.transform_feedback.capture_geometry_separate_test Fail
KHR-GL32.transform_feedback.capture_special_interleaved_test Pass
diff --git a/ci/previous_results/es_host_softpipe/deqp_gles2/results.txt b/ci/previous_results/es_host_softpipe/deqp_gles2/results.txt
index 2020341d..3af7b918 100644
--- a/ci/previous_results/es_host_softpipe/deqp_gles2/results.txt
+++ b/ci/previous_results/es_host_softpipe/deqp_gles2/results.txt
@@ -590,6 +590,14 @@ dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos
dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos_x_neg_y_pos_z Pass
dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos_x_pos_y_neg_z Pass
dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos_x_pos_y_pos_z Pass
+dEQP-GLES2.functional.clip_control.depth_mode_one_to_one Pass
+dEQP-GLES2.functional.clip_control.depth_mode_zero_to_one Pass
+dEQP-GLES2.functional.clip_control.errors Pass
+dEQP-GLES2.functional.clip_control.face_culling Pass
+dEQP-GLES2.functional.clip_control.initial Pass
+dEQP-GLES2.functional.clip_control.modify_get Pass
+dEQP-GLES2.functional.clip_control.origin Pass
+dEQP-GLES2.functional.clip_control.viewport_bounds Pass
dEQP-GLES2.functional.color_clear.complex_rgba Pass
dEQP-GLES2.functional.color_clear.complex_rgb Pass
dEQP-GLES2.functional.color_clear.long_masked_rgba Pass
@@ -2998,7 +3006,7 @@ dEQP-GLES2.functional.rasterization.limits.points Fail
dEQP-GLES2.functional.rasterization.primitives.lines Pass
dEQP-GLES2.functional.rasterization.primitives.line_loop Pass
dEQP-GLES2.functional.rasterization.primitives.line_strip Pass
-dEQP-GLES2.functional.rasterization.primitives.points Fail
+dEQP-GLES2.functional.rasterization.primitives.points Pass
dEQP-GLES2.functional.rasterization.primitives.triangles Pass
dEQP-GLES2.functional.rasterization.primitives.triangle_fan Pass
dEQP-GLES2.functional.rasterization.primitives.triangle_strip Pass
@@ -4097,6 +4105,14 @@ dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static
dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_dynamic_loop_write_dynamic_read_fragment Pass
@@ -4129,6 +4145,14 @@ dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_loop_
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_dynamic_loop_write_dynamic_read_fragment Pass
@@ -4161,6 +4185,14 @@ dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_loop_r
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_dynamic_loop_write_dynamic_read_fragment Pass
@@ -4225,6 +4257,14 @@ dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_static_write_static_loop_r
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.uniform_array.float_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.uniform_array.float_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.uniform_array.float_dynamic_read_fragment Pass
diff --git a/ci/previous_results/es_host_softpipe/deqp_gles3/results.txt b/ci/previous_results/es_host_softpipe/deqp_gles3/results.txt
index 15049701..bc9ad24d 100644
--- a/ci/previous_results/es_host_softpipe/deqp_gles3/results.txt
+++ b/ci/previous_results/es_host_softpipe/deqp_gles3/results.txt
@@ -9370,7 +9370,7 @@ dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.interpolation.lines Pas
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.interpolation.triangles Pass
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.lines Pass
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.lines_wide Pass
-dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.points Fail
+dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.points Pass
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.triangles Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.fill_rules.basic_quad Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.fill_rules.basic_quad_reverse Pass
@@ -9381,7 +9381,7 @@ dEQP-GLES3.functional.rasterization.fbo.texture_2d.interpolation.lines Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.interpolation.triangles Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.lines Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.lines_wide Pass
-dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.points Fail
+dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.points Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.triangles Pass
dEQP-GLES3.functional.rasterization.fill_rules.basic_quad Pass
dEQP-GLES3.functional.rasterization.fill_rules.basic_quad_reverse Pass
@@ -9415,7 +9415,7 @@ dEQP-GLES3.functional.rasterization.primitives.line_loop Pass
dEQP-GLES3.functional.rasterization.primitives.line_loop_wide Pass
dEQP-GLES3.functional.rasterization.primitives.line_strip Pass
dEQP-GLES3.functional.rasterization.primitives.line_strip_wide Pass
-dEQP-GLES3.functional.rasterization.primitives.points Fail
+dEQP-GLES3.functional.rasterization.primitives.points Pass
dEQP-GLES3.functional.rasterization.primitives.triangles Pass
dEQP-GLES3.functional.rasterization.primitives.triangle_fan Pass
dEQP-GLES3.functional.rasterization.primitives.triangle_strip Pass
@@ -37286,6 +37286,66 @@ dEQP-GLES3.functional.transform_feedback.random.separate.triangles.7 Pass
dEQP-GLES3.functional.transform_feedback.random.separate.triangles.8 Pass
dEQP-GLES3.functional.transform_feedback.random.separate.triangles.9 Pass
dEQP-GLES3.functional.transform_feedback.random.separate.triangles.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.10 Pass
dEQP-GLES3.functional.ubo.instance_array_basic_type.packed.bool_fragment Pass
dEQP-GLES3.functional.ubo.instance_array_basic_type.packed.bool_vertex Pass
dEQP-GLES3.functional.ubo.instance_array_basic_type.packed.bvec2_fragment Pass
diff --git a/ci/previous_results/es_host_softpipe/deqp_gles31/results.txt b/ci/previous_results/es_host_softpipe/deqp_gles31/results.txt
index dcd2abe2..5f764bf3 100644
--- a/ci/previous_results/es_host_softpipe/deqp_gles31/results.txt
+++ b/ci/previous_results/es_host_softpipe/deqp_gles31/results.txt
@@ -15059,6 +15059,9 @@ dEQP-GLES31.functional.draw_indirect.random.55 Pass
dEQP-GLES31.functional.draw_indirect.random.56 Pass
dEQP-GLES31.functional.draw_indirect.random.57 Pass
dEQP-GLES31.functional.draw_indirect.random.61 Pass
+dEQP-GLES31.functional.fbo.color.tex2d.r16 Pass
+dEQP-GLES31.functional.fbo.color.tex2d.rg16 Pass
+dEQP-GLES31.functional.fbo.color.tex2d.rgba16 Pass
dEQP-GLES31.functional.fbo.color.texcubearray.r8i NotSupported
dEQP-GLES31.functional.fbo.color.texcubearray.r8ui NotSupported
dEQP-GLES31.functional.fbo.color.texcubearray.r8 NotSupported
diff --git a/ci/previous_results/es_host_softpipe/piglit_gles2/results.txt b/ci/previous_results/es_host_softpipe/piglit_gles2/results.txt
index d89cf5db..0942ab9a 100644
--- a/ci/previous_results/es_host_softpipe/piglit_gles2/results.txt
+++ b/ci/previous_results/es_host_softpipe/piglit_gles2/results.txt
@@ -21,9 +21,10 @@ spec/ext_shader_framebuffer_fetch_non_coherent/execution/gles2/nonuniform-ss-red
spec/ext_shader_framebuffer_fetch_non_coherent/execution/gles2/simple-ss: skip
spec/ext_texture_compression_bptc/bptc-api_gles2: pass
spec/ext_texture_compression_rgtc/rgtc-api_gles2: pass
-spec/ext_texture_compression_s3tc/s3tc-errors_gles2: fail
+spec/ext_texture_compression_s3tc/s3tc-errors_gles2: pass
spec/ext_texture_compression_s3tc/s3tc-teximage_gles2: pass
spec/ext_texture_compression_s3tc/s3tc-texsubimage_gles2: pass
+spec/intel_blackhole_render/intel_blackhole-blit_gles2: skip
spec/intel_blackhole_render/intel_blackhole-draw_gles2: skip
spec/khr_debug/object-label_gles2: pass
spec/khr_debug/push-pop-group_gles2: pass
diff --git a/ci/previous_results/es_host_softpipe/piglit_gles3/results.txt b/ci/previous_results/es_host_softpipe/piglit_gles3/results.txt
index 614788b2..cb04779d 100644
--- a/ci/previous_results/es_host_softpipe/piglit_gles3/results.txt
+++ b/ci/previous_results/es_host_softpipe/piglit_gles3/results.txt
@@ -11,6 +11,7 @@ spec/!opengl es 3.0/oes_compressed_etc2_texture-miptree_gles3 srgb8-punchthrough
spec/amd_framebuffer_multisample_advanced/api-gles3: skip
spec/arb_blend_func_extended/arb_blend_func_extended-bindfragdataindexed-invalid-parameters_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-dual-src-blending-discard-without-src1_gles3: pass
+spec/arb_blend_func_extended/arb_blend_func_extended-dual-src-blending-issue-1917_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-fbo-extended-blend-explicit_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-fbo-extended-blend-pattern_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-fbo-extended-blend_gles3: pass
@@ -76,6 +77,7 @@ spec/ext_transform_feedback/structs_gles3 basic-struct run-no-fs: pass
spec/ext_window_rectangles/errors_gles3: skip
spec/ext_window_rectangles/render_gles3: skip
spec/glsl-es-3.00/execution/varying-struct-centroid_gles3: fail
+spec/intel_blackhole_render/intel_blackhole-blit_gles3: skip
spec/intel_blackhole_render/intel_blackhole-draw_gles3: skip
spec/intel_conservative_rasterization/intel_conservative_rasterization-depthcoverage_gles3: skip
spec/intel_conservative_rasterization/intel_conservative_rasterization-innercoverage_gles3: skip
@@ -110,3 +112,4 @@ spec/nv_image_formats/nv_image_formats-gles3/copy-rgb10_a2ui: pass
spec/nv_image_formats/nv_image_formats-gles3/copy-rgba16: pass
spec/nv_image_formats/nv_image_formats-gles3/copy-rgba16_snorm: pass
spec/nv_read_depth/read_depth_gles3: fail
+spec/nv_viewport_swizzle/nv_viewport_swizzle-errors_gles3: skip
diff --git a/ci/previous_results/gl_host_softpipe/deqp_gl30/results.txt b/ci/previous_results/gl_host_softpipe/deqp_gl30/results.txt
index c0b10e56..68c5df92 100644
--- a/ci/previous_results/gl_host_softpipe/deqp_gl30/results.txt
+++ b/ci/previous_results/gl_host_softpipe/deqp_gl30/results.txt
@@ -1,6 +1,21 @@
KHR-GL30.clip_distance.coverage Pass
KHR-GL30.clip_distance.functional Fail
KHR-GL30.clip_distance.negative Pass
+KHR-GL30.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.samplercubearrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.samplercubeshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texturelod.samplercubeshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.sampler2darrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.samplercubearrayshadow_bias_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.samplercubearrayshadow_fragment NotSupported
+KHR-GL30.ext_texture_shadow_lod.texture.samplercubearrayshadow_vertex NotSupported
KHR-GL30.glsl_noperspective.functionaltest Pass
KHR-GL30.info.extensions Pass
KHR-GL30.info.renderer Pass
diff --git a/ci/previous_results/gl_host_softpipe/deqp_gl31/results.txt b/ci/previous_results/gl_host_softpipe/deqp_gl31/results.txt
index 3775be1c..46de9baa 100644
--- a/ci/previous_results/gl_host_softpipe/deqp_gl31/results.txt
+++ b/ci/previous_results/gl_host_softpipe/deqp_gl31/results.txt
@@ -9,6 +9,21 @@ KHR-GL31.CommonBugs.CommonBug_SparseBuffersWithCopyOps Pass
KHR-GL31.clip_distance.coverage Pass
KHR-GL31.clip_distance.functional Fail
KHR-GL31.clip_distance.negative Pass
+KHR-GL31.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.samplercubearrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.samplercubeshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texturelod.samplercubeshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.sampler2darrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.samplercubearrayshadow_bias_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.samplercubearrayshadow_fragment NotSupported
+KHR-GL31.ext_texture_shadow_lod.texture.samplercubearrayshadow_vertex NotSupported
KHR-GL31.glsl_noperspective.functionaltest Pass
KHR-GL31.info.extensions Pass
KHR-GL31.info.renderer Pass
diff --git a/ci/previous_results/gl_host_softpipe/deqp_gl32/results.txt b/ci/previous_results/gl_host_softpipe/deqp_gl32/results.txt
index 4f83ca24..549918b9 100644
--- a/ci/previous_results/gl_host_softpipe/deqp_gl32/results.txt
+++ b/ci/previous_results/gl_host_softpipe/deqp_gl32/results.txt
@@ -9,10 +9,25 @@ KHR-GL32.CommonBugs.CommonBug_SparseBuffersWithCopyOps Pass
KHR-GL32.clip_distance.coverage Pass
KHR-GL32.clip_distance.functional Fail
KHR-GL32.clip_distance.negative Pass
+KHR-GL32.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelodoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.sampler2darrayshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.samplercubearrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.samplercubeshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texturelod.samplercubeshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.textureoffset.sampler2darrayshadow_vertex NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.sampler2darrayshadow_bias_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.sampler2darrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.samplercubearrayshadow_bias_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.samplercubearrayshadow_fragment NotSupported
+KHR-GL32.ext_texture_shadow_lod.texture.samplercubearrayshadow_vertex NotSupported
KHR-GL32.glsl_noperspective.functionaltest Pass
-KHR-GL32.gpu_shader5_gl.float_encoding Fail
-KHR-GL32.gpu_shader5_gl.function_overloading Fail
-KHR-GL32.gpu_shader5_gl.implicit_conversions Fail
+KHR-GL32.gpu_shader5_gl.float_encoding NotSupported
+KHR-GL32.gpu_shader5_gl.function_overloading NotSupported
+KHR-GL32.gpu_shader5_gl.implicit_conversions NotSupported
KHR-GL32.info.extensions Pass
KHR-GL32.info.renderer Pass
KHR-GL32.info.render_target Pass
@@ -459,7 +474,7 @@ KHR-GL32.texture_repeat_mode.rgba32ui_49x23_2_clamp_to_edge Pass
KHR-GL32.texture_repeat_mode.rgba32ui_49x23_2_mirrored_repeat Pass
KHR-GL32.texture_repeat_mode.rgba32ui_49x23_2_repeat Pass
KHR-GL32.texture_size_promotion.functional InternalError
-KHR-GL32.transform_feedback.api_errors_test Fail
+KHR-GL32.transform_feedback.api_errors_test Pass
KHR-GL32.transform_feedback.capture_geometry_interleaved_test Fail
KHR-GL32.transform_feedback.capture_geometry_separate_test Fail
KHR-GL32.transform_feedback.capture_special_interleaved_test Pass
diff --git a/ci/previous_results/gl_host_softpipe/deqp_gles2/results.txt b/ci/previous_results/gl_host_softpipe/deqp_gles2/results.txt
index 1350896e..6e6fff1c 100644
--- a/ci/previous_results/gl_host_softpipe/deqp_gles2/results.txt
+++ b/ci/previous_results/gl_host_softpipe/deqp_gles2/results.txt
@@ -590,6 +590,14 @@ dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos
dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos_x_neg_y_pos_z Pass
dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos_x_pos_y_neg_z Pass
dEQP-GLES2.functional.clipping.triangle_vertex.clip_two.clip_pos_y_pos_z_and_pos_x_pos_y_pos_z Pass
+dEQP-GLES2.functional.clip_control.depth_mode_one_to_one Pass
+dEQP-GLES2.functional.clip_control.depth_mode_zero_to_one Pass
+dEQP-GLES2.functional.clip_control.errors Pass
+dEQP-GLES2.functional.clip_control.face_culling Pass
+dEQP-GLES2.functional.clip_control.initial Pass
+dEQP-GLES2.functional.clip_control.modify_get Pass
+dEQP-GLES2.functional.clip_control.origin Pass
+dEQP-GLES2.functional.clip_control.viewport_bounds Pass
dEQP-GLES2.functional.color_clear.complex_rgba Pass
dEQP-GLES2.functional.color_clear.complex_rgb Pass
dEQP-GLES2.functional.color_clear.long_masked_rgba Pass
@@ -2998,7 +3006,7 @@ dEQP-GLES2.functional.rasterization.limits.points Fail
dEQP-GLES2.functional.rasterization.primitives.lines Pass
dEQP-GLES2.functional.rasterization.primitives.line_loop Pass
dEQP-GLES2.functional.rasterization.primitives.line_strip Pass
-dEQP-GLES2.functional.rasterization.primitives.points Fail
+dEQP-GLES2.functional.rasterization.primitives.points Pass
dEQP-GLES2.functional.rasterization.primitives.triangles Pass
dEQP-GLES2.functional.rasterization.primitives.triangle_fan Pass
dEQP-GLES2.functional.rasterization.primitives.triangle_strip Pass
@@ -4097,6 +4105,14 @@ dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static
dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.matrix_subscript.mat4_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.float_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_dynamic_loop_write_dynamic_read_fragment Pass
@@ -4129,6 +4145,14 @@ dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_loop_
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.float_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_dynamic_loop_write_dynamic_read_fragment Pass
@@ -4161,6 +4185,14 @@ dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_loop_r
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec2_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_dynamic_loop_write_dynamic_read_fragment Pass
@@ -4193,6 +4225,14 @@ dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_static_write_static_loop_r
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_static_write_static_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_static_write_static_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec3_static_write_static_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_dynamic_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_loop_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_loop_read_vertex Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_read_fragment Pass
+dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_const_write_static_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_dynamic_loop_write_dynamic_loop_read_fragment Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_dynamic_loop_write_dynamic_loop_read_vertex Pass
dEQP-GLES2.functional.shaders.indexing.tmp_array.vec4_dynamic_loop_write_dynamic_read_fragment Pass
diff --git a/ci/previous_results/gl_host_softpipe/deqp_gles3/results.txt b/ci/previous_results/gl_host_softpipe/deqp_gles3/results.txt
index 12ffed4d..65dcef10 100644
--- a/ci/previous_results/gl_host_softpipe/deqp_gles3/results.txt
+++ b/ci/previous_results/gl_host_softpipe/deqp_gles3/results.txt
@@ -9370,7 +9370,7 @@ dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.interpolation.lines Pas
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.interpolation.triangles Pass
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.lines Pass
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.lines_wide Pass
-dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.points Fail
+dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.points Pass
dEQP-GLES3.functional.rasterization.fbo.rbo_singlesample.primitives.triangles Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.fill_rules.basic_quad Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.fill_rules.basic_quad_reverse Pass
@@ -9381,7 +9381,7 @@ dEQP-GLES3.functional.rasterization.fbo.texture_2d.interpolation.lines Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.interpolation.triangles Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.lines Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.lines_wide Pass
-dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.points Fail
+dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.points Pass
dEQP-GLES3.functional.rasterization.fbo.texture_2d.primitives.triangles Pass
dEQP-GLES3.functional.rasterization.fill_rules.basic_quad Pass
dEQP-GLES3.functional.rasterization.fill_rules.basic_quad_reverse Pass
@@ -9415,7 +9415,7 @@ dEQP-GLES3.functional.rasterization.primitives.line_loop Pass
dEQP-GLES3.functional.rasterization.primitives.line_loop_wide Pass
dEQP-GLES3.functional.rasterization.primitives.line_strip Pass
dEQP-GLES3.functional.rasterization.primitives.line_strip_wide Pass
-dEQP-GLES3.functional.rasterization.primitives.points Fail
+dEQP-GLES3.functional.rasterization.primitives.points Pass
dEQP-GLES3.functional.rasterization.primitives.triangles Pass
dEQP-GLES3.functional.rasterization.primitives.triangle_fan Pass
dEQP-GLES3.functional.rasterization.primitives.triangle_strip Pass
@@ -37286,6 +37286,66 @@ dEQP-GLES3.functional.transform_feedback.random.separate.triangles.7 Pass
dEQP-GLES3.functional.transform_feedback.random.separate.triangles.8 Pass
dEQP-GLES3.functional.transform_feedback.random.separate.triangles.9 Pass
dEQP-GLES3.functional.transform_feedback.random.separate.triangles.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.lines.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.points.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.interleaved.triangles.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.lines.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.points.10 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.1 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.2 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.3 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.4 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.5 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.6 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.7 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.8 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.9 Pass
+dEQP-GLES3.functional.transform_feedback.random_full_array_capture.separate.triangles.10 Pass
dEQP-GLES3.functional.ubo.instance_array_basic_type.packed.bool_fragment Pass
dEQP-GLES3.functional.ubo.instance_array_basic_type.packed.bool_vertex Pass
dEQP-GLES3.functional.ubo.instance_array_basic_type.packed.bvec2_fragment Pass
diff --git a/ci/previous_results/gl_host_softpipe/deqp_gles31/results.txt b/ci/previous_results/gl_host_softpipe/deqp_gles31/results.txt
index 276091a2..96e5b5e1 100644
--- a/ci/previous_results/gl_host_softpipe/deqp_gles31/results.txt
+++ b/ci/previous_results/gl_host_softpipe/deqp_gles31/results.txt
@@ -15059,6 +15059,9 @@ dEQP-GLES31.functional.draw_indirect.random.55 Pass
dEQP-GLES31.functional.draw_indirect.random.56 Pass
dEQP-GLES31.functional.draw_indirect.random.57 Pass
dEQP-GLES31.functional.draw_indirect.random.61 Pass
+dEQP-GLES31.functional.fbo.color.tex2d.r16 Pass
+dEQP-GLES31.functional.fbo.color.tex2d.rg16 Pass
+dEQP-GLES31.functional.fbo.color.tex2d.rgba16 Pass
dEQP-GLES31.functional.fbo.color.texcubearray.r8i NotSupported
dEQP-GLES31.functional.fbo.color.texcubearray.r8ui NotSupported
dEQP-GLES31.functional.fbo.color.texcubearray.r8 NotSupported
@@ -33534,14 +33537,14 @@ dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomi
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomic_multiple_unrelated_write_read_ordered Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomic_multiple_write_read Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomic_overwrite Pass
-dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomic_read_write Pass
+dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomic_read_write Fail
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_atomic_write_read Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_multiple_interleaved_write_read Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_multiple_unrelated_write_read_non_ordered Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_multiple_unrelated_write_read_ordered Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_multiple_write_read Pass
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_overwrite Pass
-dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_read_write Pass
+dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_read_write Fail
dEQP-GLES31.functional.synchronization.inter_call.with_memory_barrier.ssbo_write_read Pass
dEQP-GLES31.functional.synchronization.inter_invocation.image_alias_overwrite Pass
dEQP-GLES31.functional.synchronization.inter_invocation.image_alias_write Pass
diff --git a/ci/previous_results/gl_host_softpipe/piglit_gles2/results.txt b/ci/previous_results/gl_host_softpipe/piglit_gles2/results.txt
index 2ccc4443..bf098e04 100644
--- a/ci/previous_results/gl_host_softpipe/piglit_gles2/results.txt
+++ b/ci/previous_results/gl_host_softpipe/piglit_gles2/results.txt
@@ -21,9 +21,10 @@ spec/ext_shader_framebuffer_fetch_non_coherent/execution/gles2/nonuniform-ss-red
spec/ext_shader_framebuffer_fetch_non_coherent/execution/gles2/simple-ss: skip
spec/ext_texture_compression_bptc/bptc-api_gles2: pass
spec/ext_texture_compression_rgtc/rgtc-api_gles2: pass
-spec/ext_texture_compression_s3tc/s3tc-errors_gles2: fail
+spec/ext_texture_compression_s3tc/s3tc-errors_gles2: pass
spec/ext_texture_compression_s3tc/s3tc-teximage_gles2: pass
spec/ext_texture_compression_s3tc/s3tc-texsubimage_gles2: pass
+spec/intel_blackhole_render/intel_blackhole-blit_gles2: skip
spec/intel_blackhole_render/intel_blackhole-draw_gles2: skip
spec/khr_debug/object-label_gles2: pass
spec/khr_debug/push-pop-group_gles2: pass
diff --git a/ci/previous_results/gl_host_softpipe/piglit_gles3/results.txt b/ci/previous_results/gl_host_softpipe/piglit_gles3/results.txt
index 614788b2..cb04779d 100644
--- a/ci/previous_results/gl_host_softpipe/piglit_gles3/results.txt
+++ b/ci/previous_results/gl_host_softpipe/piglit_gles3/results.txt
@@ -11,6 +11,7 @@ spec/!opengl es 3.0/oes_compressed_etc2_texture-miptree_gles3 srgb8-punchthrough
spec/amd_framebuffer_multisample_advanced/api-gles3: skip
spec/arb_blend_func_extended/arb_blend_func_extended-bindfragdataindexed-invalid-parameters_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-dual-src-blending-discard-without-src1_gles3: pass
+spec/arb_blend_func_extended/arb_blend_func_extended-dual-src-blending-issue-1917_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-fbo-extended-blend-explicit_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-fbo-extended-blend-pattern_gles3: pass
spec/arb_blend_func_extended/arb_blend_func_extended-fbo-extended-blend_gles3: pass
@@ -76,6 +77,7 @@ spec/ext_transform_feedback/structs_gles3 basic-struct run-no-fs: pass
spec/ext_window_rectangles/errors_gles3: skip
spec/ext_window_rectangles/render_gles3: skip
spec/glsl-es-3.00/execution/varying-struct-centroid_gles3: fail
+spec/intel_blackhole_render/intel_blackhole-blit_gles3: skip
spec/intel_blackhole_render/intel_blackhole-draw_gles3: skip
spec/intel_conservative_rasterization/intel_conservative_rasterization-depthcoverage_gles3: skip
spec/intel_conservative_rasterization/intel_conservative_rasterization-innercoverage_gles3: skip
@@ -110,3 +112,4 @@ spec/nv_image_formats/nv_image_formats-gles3/copy-rgb10_a2ui: pass
spec/nv_image_formats/nv_image_formats-gles3/copy-rgba16: pass
spec/nv_image_formats/nv_image_formats-gles3/copy-rgba16_snorm: pass
spec/nv_read_depth/read_depth_gles3: fail
+spec/nv_viewport_swizzle/nv_viewport_swizzle-errors_gles3: skip
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index f5b81c1f..8f5cec7a 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -111,6 +111,7 @@ run_make_check_trace_stderr()
run_deqp()
{
+ local retval=0
run_setup meson
OGL_BACKEND="$1"
SUITE="$2"
@@ -148,13 +149,15 @@ run_deqp()
./run_test_suite.sh --deqp ${TEST_SUITE} \
--host-${OGL_BACKEND} \
${BACKENDS}
+ retval=$?
popd
- return $?
+ return $retval
}
run_piglit()
{
+ local retval=0
run_setup meson
OGL_BACKEND="$1"
@@ -172,9 +175,10 @@ run_piglit()
./run_test_suite.sh --piglit --gles2 --gles3 \
--host-${OGL_BACKEND} \
${BACKENDS}
+ retval=$?
popd
- return $?
+ return $retval
}
parse_input()
diff --git a/meson.build b/meson.build
index fc9dea74..333dd74a 100644
--- a/meson.build
+++ b/meson.build
@@ -85,12 +85,20 @@ conf_data.set('VERSION', '0.8.1')
with_tracing = get_option('tracing')
if with_tracing != 'none'
- if not cc.compiles('void f(void* v){}; int main () { void *dummy __attribute__((cleanup (f))) = 0;}')
+ if not cc.compiles('void f(void* v){} int main () { void *dummy __attribute__((cleanup (f))) = 0;}')
error('Tracing requires compiler support for __attribute__((cleanup))')
endif
endif
+if with_tracing == 'percetto'
+ # percetto uses C++ internally, so we need to link with C++.
+ # TODO: remove -lstdc++ when percetto is a shared library.
+ add_project_link_arguments('-lstdc++', language : 'c')
+ percetto_dep = dependency('percetto', version : '>=0.0.8')
+ conf_data.set('ENABLE_TRACING', 'TRACE_WITH_PERCETTO')
+endif
+
if with_tracing == 'perfetto'
vperfetto_min_dep = dependency('vperfetto_min')
conf_data.set('ENABLE_TRACING', 'TRACE_WITH_PERFETTO')
diff --git a/meson_options.txt b/meson_options.txt
index ce988a63..0a57b6fd 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -63,6 +63,6 @@ option(
'tracing',
type : 'combo',
value : 'none',
- choices : [ 'perfetto', 'stderr', 'none' ],
+ choices : [ 'percetto', 'perfetto', 'stderr', 'none' ],
description : 'enable emitting traces using the selected backend'
)
diff --git a/perf-testing/Docker/Dockerfile b/perf-testing/Docker/Dockerfile
new file mode 100644
index 00000000..3c49beaa
--- /dev/null
+++ b/perf-testing/Docker/Dockerfile
@@ -0,0 +1,299 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+FROM debian:bullseye
+LABEL description="Test crosvm using a command like the following: \
+docker run --privileged -v /dev/log:/dev/log -v <path to crosvm>:/platform/crosvm:ro <crosvm base image>"
+
+# should be set to the ID/GROUP_ID of the user running the docker image
+ARG USER_ID
+ARG GROUP_ID
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends ca-certificates gnupg wget && \
+ echo 'deb-src https://deb.debian.org/debian bullseye main' >> /etc/apt/sources.list && \
+ cat /etc/apt/sources.list && \
+ apt-get update && \
+ apt-get build-dep -y mesa && \
+ apt-get install -y --no-install-recommends ca-certificates \
+ python3-setuptools \
+ llvm-dev \
+ libxcb-shm0-dev \
+ libelf-dev \
+ cmake \
+ chrony \
+ bc \
+ flex \
+ bison \
+ debootstrap \
+ cpio \
+ xz-utils \
+ libegl1-mesa-dev \
+ autoconf \
+ automake \
+ curl \
+ g++ \
+ gcc \
+ gdb \
+ git \
+ kmod \
+ libcap-dev \
+ libdbus-1-dev \
+ libegl1-mesa-dev \
+ libepoxy-dev \
+ libfdt-dev \
+ libgl1-mesa-dev \
+ libgles2-mesa-dev \
+ libpciaccess-dev \
+ libssl-dev \
+ libtool \
+ libusb-1.0-0-dev \
+ libwayland-dev \
+ make \
+ nasm \
+ ninja-build \
+ pkg-config \
+ protobuf-compiler \
+ python \
+ libtinfo5 \
+ python3-protobuf \
+ clang \
+ iptables \
+ libunwind-dev \
+ libprotobuf-dev \
+ protobuf-compiler \
+ libprotoc-dev \
+ libdw-dev \
+ libprotobuf-dev \
+ libdocopt-dev \
+ && \
+ apt-get -y build-dep intel-gpu-tools
+
+ENV RUSTUP_HOME=/usr/local/rustup \
+ CARGO_HOME=/usr/local/cargo \
+ PATH=/usr/local/cargo/bin:$PATH \
+ RUST_VERSION=1.45.2 \
+ RUSTFLAGS='--cfg hermetic'
+
+# Debian usually has an old rust version in the repository. Instead of using that, we use rustup to
+# pull in a toolchain versions of our choosing.
+RUN curl -LO "https://static.rust-lang.org/rustup/archive/1.22.1/x86_64-unknown-linux-gnu/rustup-init" \
+ && echo "49c96f3f74be82f4752b8bffcf81961dea5e6e94ce1ccba94435f12e871c3bdb *rustup-init" | sha256sum -c - \
+ && chmod +x rustup-init \
+ && ./rustup-init -y --no-modify-path --default-toolchain $RUST_VERSION \
+ && rm rustup-init \
+ && chmod -R a+w $RUSTUP_HOME $CARGO_HOME \
+ && rustup --version \
+ && cargo --version \
+ && rustc --version
+
+# Set the default toolchain to 'stable' to match the one that bin/smoke_test
+# uses. This allows kokoro runs to avoid re-downloading the toolchain as long
+# as the version matches RUST_VERSION.
+RUN rustup default stable
+
+# Warms up the cargo registry cache for future cargo runs. Cargo will still update the cache using a
+# git pull, but it only needs to download files that were changed since this image was built.
+RUN cargo install thisiznotarealpackage -q || true
+
+# Used /scratch for building dependencies which are too new or don't exist on Debian stretch.
+WORKDIR /scratch
+
+# Suppress warnings about detached HEAD, which will happen a lot and is meaningless in this context.
+RUN git config --global advice.detachedHead false
+
+# New libepoxy and libdrm-dev requires newer meson than is in Debian stretch.
+ARG MESON_COMMIT=master
+RUN git clone https://github.com/mesonbuild/meson /meson \
+ && cd /meson \
+ && git checkout $MESON_COMMIT \
+ && rm -f /usr/bin/meson \
+ && ln -s $PWD/meson.py /usr/bin/meson
+
+# The libdrm-dev in distro can be too old to build minigbm,
+# so we build it from upstream.
+ARG DRM_COMMIT=master
+RUN git clone https://gitlab.freedesktop.org/mesa/drm.git/ \
+ && cd drm \
+ && git checkout $DRM_COMMIT \
+ && meson build -Dlibdir=lib \
+ && ninja -C build/ install
+
+# The gbm used by upstream linux distros is not compatible with crosvm, which must use Chrome OS's
+# minigbm.
+RUN git clone https://chromium.googlesource.com/chromiumos/platform/minigbm \
+ && cd minigbm \
+ && sed 's/-Wall/-Wno-maybe-uninitialized/g' -i Makefile \
+ && make CPPFLAGS="-DDRV_I915" DRV_I915=1 install -j$(nproc)
+
+RUN git clone https://gitlab.freedesktop.org/virgl/virglrenderer.git \
+ && cd virglrenderer \
+ && mkdir -p build \
+ && meson build/ -Dprefix=/usr/local -Dlibdir=lib \
+ && ninja -C build/ install
+
+# Install libtpm2 so that tpm2-sys/build.rs does not try to build it in place in
+# the read-only source directory.
+ARG TPM2_COMMIT=073dc25aa4dda42475a7a5a140399fc5db61b20f
+RUN git clone https://chromium.googlesource.com/chromiumos/third_party/tpm2 \
+ && cd tpm2 \
+ && git checkout $TPM2_COMMIT \
+ && make -j$(nproc) \
+ && cp build/libtpm2.a /lib
+
+# PUll down platform2 repositroy and install librendernodehost.
+# Note that we clone the repository outside of /scratch not to be removed
+# because crosvm depends on libvda.
+ENV PLATFORM2_ROOT=/platform2
+ARG PLATFORM2_COMMIT=2dce812fc9091e41a33094929610199468ee322b
+RUN git clone https://chromium.googlesource.com/chromiumos/platform2 $PLATFORM2_ROOT \
+ && cd $PLATFORM2_ROOT \
+ && git checkout $PLATFORM2_COMMIT
+
+# Set up sysroot from which system_api proto files are built.
+ENV SYSROOT=/sysroot
+RUN mkdir -p $SYSROOT/usr/include/chromeos/dbus/trunks \
+ && cp $PLATFORM2_ROOT/trunks/interface.proto \
+ $SYSROOT/usr/include/chromeos/dbus/trunks
+# Copy it under rustc's sysroot as well for cargo clippy.
+RUN export RUST_SYSROOT=$(rustc --print sysroot); echo $RUST_SYSROOT
+RUN mkdir -p $RUST_SYSROOT/usr/include/chromeos/dbus/trunks \
+ && cp $PLATFORM2_ROOT/trunks/interface.proto \
+ $RUST_SYSROOT/usr/include/chromeos/dbus/trunks
+
+# Reduces image size and prevents accidentally using /scratch files
+RUN rm -r /scratch
+WORKDIR /
+
+# The manual installation of shared objects requires an ld.so.cache refresh.
+RUN ldconfig
+
+# Pull down repositories that crosvm depends on to cros checkout-like locations.
+ENV CROS_ROOT=/
+ENV THIRD_PARTY_ROOT=$CROS_ROOT/third_party
+RUN mkdir -p $THIRD_PARTY_ROOT
+ENV PLATFORM_ROOT=$CROS_ROOT/platform
+RUN mkdir -p $PLATFORM_ROOT
+ENV AOSP_EXTERNAL_ROOT=$CROS_ROOT/aosp/external
+RUN mkdir -p $AOSP_EXTERNAL_ROOT
+
+# minijail does not exist in upstream linux distros.
+ARG MINIJAIL_COMMIT=5f9e3001c61626d2863dad91248ba8496c3ef511
+RUN git clone https://android.googlesource.com/platform/external/minijail $AOSP_EXTERNAL_ROOT/minijail \
+ && cd $AOSP_EXTERNAL_ROOT/minijail \
+ && git checkout $MINIJAIL_COMMIT \
+ && make -j$(nproc) \
+ && cp libminijail.so /usr/lib/x86_64-linux-gnu/
+
+# Pull the cras library for audio access.
+ARG ADHD_COMMIT=5068bdd18b51de8f2d5bcff754cdecda80de8f44
+RUN git clone https://chromium.googlesource.com/chromiumos/third_party/adhd $THIRD_PARTY_ROOT/adhd \
+ && cd $THIRD_PARTY_ROOT/adhd \
+ && git checkout $ADHD_COMMIT
+
+ARG VPERFETTO_COMMIT=3ce4813ae114e5f2e6e0b3f29517a88246c00363
+RUN git clone https://github.com/741g/vperfetto.git && \
+ cd vperfetto && \
+ git checkout $VPERFETTO_COMMIT && \
+ cmake -G Ninja -B_build -DOPTION_BUILD_TESTS=FALSE && \
+ ninja -C _build install
+
+ARG CROSVM_COMMIT=3f9373f474a295df0f8a38592472ae59adc98e29
+RUN mkdir -p /platform/ \
+ && cd /platform \
+ && git clone --single-branch -b perfetto https://gitlab.freedesktop.org/tomeu/crosvm.git \
+ && cd crosvm \
+ && cargo install --locked --debug --features 'default-no-sandbox wl-dmabuf gpu x virtio-gpu-next' --path . --root /usr/local
+
+RUN export uid=$USER_ID gid=$GROUP_ID && \
+ mkdir -p /home/chronos && \
+ echo "chronos:x:${uid}:${gid}:Developer,,,:/home/chronos:/bin/bash" >> /etc/passwd && \
+ echo "chronos:x:${uid}:" >> /etc/group && \
+ chown ${uid}:${gid} -R /home/chronos
+
+
+#crony didn't install cleanly when using --include, so we add it by using an extra apt install
+
+ENV EXTRA_PACKAGES="sudo,strace,libxcb-dri2-0,libxcb-dri3-0,libx11-xcb1,libxcb-xfixes0,libxcb-present0,libxcb-sync1,libxshmfence1,libx11-6,sysvinit-core,libwayland-client0,libwayland-server0,time,inetutils-ping,dnsutils,libpng16-16,libprocps8,valgrind,libsensors5,python3,wget,gnupg,ca-certificates,llvm-dev,chrony"
+
+RUN ulimit -n 1024 && \
+ debootstrap --variant=minbase --components main,contrib,non-free --include=$EXTRA_PACKAGES bullseye /rootfs http://deb.debian.org/debian && \
+ chroot /rootfs /bin/bash -c "apt install -yy gdb" && \
+ chroot /rootfs /bin/bash -c "dpkg-query -Wf '\${Installed-Size}\t\${Package}\n' | sort -n " && \
+ chroot /rootfs /bin/bash -c "useradd -u 1001 -r -d / -s /sbin/nologin -c 'crossvm image user' perfetto"
+
+COPY perf-testing/Docker/init.sh /rootfs/.
+
+RUN cd /rootfs && \
+ find -H | cpio -H newc -o | xz --check=crc32 -T4 - > /rootfs.cpio.gz
+
+COPY perf-testing/Docker/x86_64.config /tmp/.
+RUN mkdir -p kernel && \
+ wget -O- https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.16.tar.xz | tar -xJ --strip-components=1 -C kernel && \
+ cd kernel && \
+ ./scripts/kconfig/merge_config.sh arch/x86/configs/x86_64_defconfig /tmp/x86_64.config && \
+ make -j12 vmlinux && \
+ cp vmlinux /. && \
+ cd .. && \
+ rm -rf kernel
+
+# Need an unreleased version of Waffle for surfaceless support in apitrace
+# Replace this build with the Debian package once that's possible
+ENV WAFFLE_VERSION="b6d94483694b6889a8567b6bd748f7baad527df4"
+RUN git clone https://gitlab.freedesktop.org/mesa/waffle.git --single-branch --no-checkout /waffle && \
+ cd /waffle && \
+ git checkout "$WAFFLE_VERSION" && \
+ cmake -B_build -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_BUILD_TYPE=Debug -Dwaffle_has_surfaceless_egl=1 . && \
+ make -j12 -C _build install && \
+ mkdir -p build/lib build/bin && \
+ cp _build/lib/libwaffle-1.so build/lib/libwaffle-1.so.0 && \
+ cp _build/bin/wflinfo build/bin/wflinfo
+
+ENV APITRACE_VERSION="perfetto"
+RUN git clone https://gitlab.freedesktop.org/tomeu/apitrace.git --single-branch -b perfetto --no-checkout /apitrace && \
+ cd /apitrace && \
+ git checkout "$APITRACE_VERSION" && \
+ cmake -G Ninja -B_build -H. -DCMAKE_BUILD_TYPE=Debug -DENABLE_GUI=False -DENABLE_WAFFLE=on -DWaffle_DIR=/usr/local/lib/cmake/Waffle/ && \
+ ninja -C _build && \
+ mkdir build && \
+ cp _build/apitrace build && \
+ cp _build/eglretrace build
+
+ENV GN_ARGS="is_debug=false use_custom_libcxx=false"
+ENV CFG=linux_trusty-gcc7-x86_64-release
+RUN git clone https://android.googlesource.com/platform/external/perfetto && \
+ cd perfetto && \
+ git checkout v12.1 && \
+ python3 tools/install-build-deps && \
+ python3 tools/install-build-deps --ui && \
+ tools/gn gen out/dist --args="${GN_ARGS}" --check && \
+ tools/ninja -C out/dist traced traced_probes perfetto trace_to_text ui trace_processor_shell && \
+ mkdir -p /usr/local/lib/python3.7/site-packages && \
+ protoc --python_out=/usr/local/lib/python3.7/site-packages protos/perfetto/trace/perfetto_trace.proto && \
+ tools/gen_amalgamated --gn_args 'target_os="linux" is_debug=false'
+
+RUN mkdir -p /traces-db && chown chronos:chronos /traces-db && mkdir -p /wd && chown -R chronos:chronos /wd
+
+ENV IGT_GPU_TOOLS_VERSION="igt-gpu-tools-1.25"
+RUN git clone --single-branch -b master https://gitlab.freedesktop.org/drm/igt-gpu-tools.git && \
+ cd igt-gpu-tools && \
+ git checkout "$IGT_GPU_TOOLS_VERSION" && \
+ meson build -Doverlay=disabled -Dchamelium=disabled -Dvalgrind=disabled -Dman=disabled -Ddocs=disabled -Dtests=disabled -Drunner=disabled && \
+ ninja -C build install
+
+ENV GFX_PPS_VERSION="v0.3.0"
+RUN git clone --single-branch -b master https://gitlab.freedesktop.org/Fahien/gfx-pps.git && \
+ cd gfx-pps && \
+ git checkout "$GFX_PPS_VERSION" && \
+ meson build -Dtest=false -Dbuildtype=debugoptimized && \
+ ninja -C build
+
+COPY perf-testing/Docker/run_traces.sh /usr/local/.
+COPY perf-testing/Docker/run_perfetto_ui.sh /usr/local/.
+COPY perf-testing/Docker/run.sh /usr/local/.
+COPY perf-testing/Docker/perfetto-guest.cfg /usr/local/.
+COPY perf-testing/Docker/perfetto-host.cfg /usr/local/.
+COPY perf-testing/Docker/merge_traces.py /usr/local/.
+
+ENTRYPOINT ["/usr/local/run.sh"]
diff --git a/perf-testing/Docker/init.sh b/perf-testing/Docker/init.sh
new file mode 100755
index 00000000..0a01482e
--- /dev/null
+++ b/perf-testing/Docker/init.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set +xe
+
+mount -t proc none /proc
+mount -t sysfs none /sys
+mount -t devtmpfs none /dev || echo possibly already mounted
+mkdir -p /dev/pts
+mount -t devpts devpts /dev/pts
+mount -t virtiofs local /usr/local
+mount -t debugfs none /sys/kernel/debug
+echo "nameserver 8.8.8.8" > /etc/resolv.conf
+#for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done
+
+bash /usr/local/run_traces.sh
+
+sync
+sleep 1
diff --git a/perf-testing/Docker/merge_traces.py b/perf-testing/Docker/merge_traces.py
new file mode 100755
index 00000000..0d2f7a3a
--- /dev/null
+++ b/perf-testing/Docker/merge_traces.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2020 Collabora Ltd
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+from google import protobuf
+import protos.perfetto.trace.perfetto_trace_pb2
+from protos.perfetto.trace.perfetto_trace_pb2 import BUILTIN_CLOCK_BOOTTIME
+from protos.perfetto.trace.perfetto_trace_pb2 import BUILTIN_CLOCK_REALTIME
+import math
+import sys
+import operator
+import time
+
+def add_ftrace_event(out_message, in_packet, in_event, max_host_sequence_id = 0):
+ out_packet = out_message.packet.add()
+ out_packet.ftrace_events.cpu = in_packet.ftrace_events.cpu
+ out_packet.trusted_uid = in_packet.trusted_uid
+ out_packet.trusted_packet_sequence_id += max_host_sequence_id
+ out_packet.ftrace_events.event.add().CopyFrom(in_event)
+
+virtio_gpu_pids = set()
+
+print('%d Loading host trace' % time.time())
+
+in_message = protos.perfetto.trace.perfetto_trace_pb2.Trace()
+in_message.ParseFromString(open(sys.argv[1], 'rb').read())
+
+print('%d Copying host trace' % time.time())
+
+out_message = protos.perfetto.trace.perfetto_trace_pb2.Trace()
+max_host_sequence_id = 0
+first_host_virtio_gpu_cmd = math.inf
+host_boot_ts = -1
+for in_packet in in_message.packet:
+ max_host_sequence_id = max(max_host_sequence_id,
+ in_packet.trusted_packet_sequence_id)
+
+ if in_packet.HasField('ftrace_events'):
+ for event in in_packet.ftrace_events.event:
+ if event.HasField('sched_switch'):
+ if 'virtio_gpu' == event.sched_switch.prev_comm:
+ virtio_gpu_pids.add(event.sched_switch.prev_pid)
+ if 'virtio_gpu' == event.sched_switch.next_comm:
+ virtio_gpu_pids.add(event.sched_switch.next_pid)
+
+ if event.sched_switch.prev_pid in virtio_gpu_pids or \
+ event.sched_switch.next_pid in virtio_gpu_pids:
+ add_ftrace_event(out_message, in_packet, event)
+ elif event.HasField('sched_wakeup'):
+ if 'virtio_gpu' == event.sched_wakeup.comm:
+ virtio_gpu_pids.add(event.sched_wakeup.pid)
+
+ if event.sched_wakeup.pid in virtio_gpu_pids:
+ add_ftrace_event(out_message, in_packet, event)
+ elif event.HasField('print'):
+ event_type, guest_pid, label, cookie = event.print.buf.split('|')
+
+ # Replace host PID with the guest PID
+ event.pid = int(guest_pid)
+ add_ftrace_event(out_message, in_packet, event)
+ else:
+ if in_packet.HasField('track_descriptor'):
+ if in_packet.track_descriptor.HasField('name'):
+ in_packet.track_descriptor.name += ' (Host)'
+ elif in_packet.HasField('track_event'):
+ if in_packet.track_event.type == in_packet.track_event.TYPE_SLICE_BEGIN and \
+ in_packet.track_event.name == 'GetCapset':
+ first_host_virtio_gpu_cmd = min(first_host_virtio_gpu_cmd, in_packet.timestamp)
+ elif host_boot_ts == -1 and in_packet.HasField('clock_snapshot'):
+ for clock in in_packet.clock_snapshot.clocks:
+ if clock.clock_id == BUILTIN_CLOCK_BOOTTIME:
+ host_boottime = clock.timestamp
+ elif clock.clock_id == BUILTIN_CLOCK_REALTIME:
+ host_realtime = clock.timestamp
+ host_boot_ts = host_realtime - host_boottime
+ out_packet = out_message.packet.add()
+ out_packet.CopyFrom(in_packet)
+
+print('%d Loading guest trace' % time.time())
+in_message.ParseFromString(open(sys.argv[2], 'rb').read())
+
+#print('%d Writing guest trace txt' % time.time())
+#open('../traces-db/perfetto-guest.txt', 'w').write(str(in_message))
+
+first_guest_virtio_gpu_cmd = math.inf
+guest_boot_ts = -1
+for in_packet in in_message.packet:
+ if guest_boot_ts == -1 and in_packet.HasField('clock_snapshot'):
+ for clock in in_packet.clock_snapshot.clocks:
+ if clock.clock_id == BUILTIN_CLOCK_BOOTTIME:
+ guest_boottime = clock.timestamp
+ elif clock.clock_id == BUILTIN_CLOCK_REALTIME:
+ guest_realtime = clock.timestamp
+ guest_boot_ts = guest_realtime - guest_boottime
+ elif in_packet.HasField('track_event'):
+ if in_packet.track_event.type == in_packet.track_event.TYPE_SLICE_BEGIN and \
+ in_packet.track_event.name == 'DRM_IOCTL_VIRTGPU_GET_CAPS':
+ first_guest_virtio_gpu_cmd = min(first_guest_virtio_gpu_cmd, in_packet.timestamp)
+
+delta = guest_boot_ts - host_boot_ts
+cmd_delta = first_host_virtio_gpu_cmd - first_guest_virtio_gpu_cmd - delta
+print("boottime delta %ds." % (delta / 1000 / 1000 / 1000))
+print("cmd delta %dus." % (cmd_delta / 1000))
+
+for in_packet in in_message.packet:
+ if in_packet.HasField('process_tree') or \
+ in_packet.HasField('service_event') or \
+ in_packet.HasField('track_event') or \
+ in_packet.HasField('trace_packet_defaults') or \
+ in_packet.HasField('track_descriptor'):
+ out_packet = out_message.packet.add()
+ out_packet.CopyFrom(in_packet)
+ out_packet.trusted_packet_sequence_id += max_host_sequence_id
+ out_packet.timestamp += delta
+ if out_packet.HasField('track_descriptor'):
+ if out_packet.track_descriptor.HasField('name'):
+ out_packet.track_descriptor.name += ' (Guest)'
+ elif in_packet.HasField('ftrace_events'):
+ for event in in_packet.ftrace_events.event:
+ event.timestamp += delta
+ add_ftrace_event(out_message, in_packet, event, max_host_sequence_id)
+
+def get_timestamp(packet):
+ if packet.HasField('timestamp'):
+ return packet.timestamp
+ elif packet.HasField('ftrace_events') and \
+ packet.ftrace_events.event:
+ return packet.ftrace_events.event[0].timestamp
+ return 0
+
+out_message.packet.sort(key=get_timestamp)
+print('%d Writing merged trace' % time.time())
+open(sys.argv[3], 'wb').write(out_message.SerializeToString())
+
+#print('%d Writing merged trace txt' % time.time())
+#open('../traces-db/perfetto.txt', 'w').write(str(out_message))
diff --git a/perf-testing/Docker/perfetto-guest.cfg b/perf-testing/Docker/perfetto-guest.cfg
new file mode 100644
index 00000000..960325a6
--- /dev/null
+++ b/perf-testing/Docker/perfetto-guest.cfg
@@ -0,0 +1,49 @@
+buffers {
+ size_kb: 655360
+ fill_policy: RING_BUFFER
+}
+
+data_sources {
+ config {
+ name: "linux.ftrace"
+ target_buffer: 0
+ ftrace_config {
+ ftrace_events: "virtio_gpu/virtio_gpu_cmd_queue"
+ ftrace_events: "virtio_gpu/virtio_gpu_cmd_response"
+
+ ftrace_events: "sched_switch"
+ ftrace_events: "sched_wakeup"
+
+ atrace_apps: "*"
+
+ compact_sched {
+ enabled: true
+ }
+ }
+ }
+}
+
+data_sources {
+ config {
+ name: "track_event"
+ track_event_config {
+ disabled_categories: "*"
+ enabled_categories: "Driver"
+# enabled_categories: "EGL"
+ enabled_categories: "Gallium"
+ enabled_categories: "OpenGL"
+ }
+ }
+}
+
+# Resolve process commandlines and parent/child relationships, to better
+# interpret the ftrace events, which are in terms of pids.
+data_sources {
+ config {
+ name: "linux.process_stats"
+ target_buffer: 0
+ }
+}
+
+write_into_file: true
+flush_period_ms: 10000
diff --git a/perf-testing/Docker/perfetto-host.cfg b/perf-testing/Docker/perfetto-host.cfg
new file mode 100644
index 00000000..dd179d33
--- /dev/null
+++ b/perf-testing/Docker/perfetto-host.cfg
@@ -0,0 +1,55 @@
+buffers {
+ size_kb: 655360
+ fill_policy: RING_BUFFER
+}
+
+data_sources {
+ config {
+ name: "linux.ftrace"
+ target_buffer: 0
+ ftrace_config {
+ ftrace_events: "sched_switch"
+ ftrace_events: "sched_wakeup"
+
+ atrace_apps: "*"
+
+ compact_sched {
+ enabled: true
+ }
+ }
+ }
+}
+
+data_sources {
+ config {
+ name: "track_event"
+ track_event_config {
+ disabled_categories: "*"
+ enabled_categories: "Driver"
+ enabled_categories: "EGL"
+ enabled_categories: "OpenGL"
+ enabled_categories: "VMM"
+ }
+ }
+}
+
+# Resolve process commandlines and parent/child relationships, to better
+# interpret the ftrace events, which are in terms of pids.
+data_sources {
+ config {
+ name: "linux.process_stats"
+ target_buffer: 0
+ }
+}
+
+data_sources {
+ config {
+ name: "gpu.metrics"
+ gpu_counter_config {
+ counter_period_ns: 1000000
+ }
+ }
+}
+
+write_into_file: true
+flush_period_ms: 250
diff --git a/perf-testing/Docker/run.sh b/perf-testing/Docker/run.sh
new file mode 100755
index 00000000..4cf1b013
--- /dev/null
+++ b/perf-testing/Docker/run.sh
@@ -0,0 +1,236 @@
+#!/bin/bash
+
+# This script is to be run on the KVM host, inside the container
+
+set -ex
+
+export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
+export PYTHONPATH=/usr/local/lib/python3.7/site-packages
+
+benchmark_loops=0
+perfetto_loops=10
+wait_after_frame=
+
+debug=no
+trace=
+command=""
+prep_snapshot=
+while [ -n "$1" ] ; do
+ case "$1" in
+
+ --trace|-t)
+ trace="$2"
+ shift
+ ;;
+
+ --benchmark|-b)
+ command="$command benchmark=$2"
+ benchmark_loops=$2
+ shift
+ ;;
+
+ --perfetto|-p)
+ command="$command perfetto=$2"
+ perfetto_loops=$2
+ shift
+ ;;
+
+ --wait-after-frame|-w)
+ command="$command wait-after-frame=1"
+ wait_after_frame="--wait-after-frame"
+ ;;
+
+ --snapshot|-s)
+ command="$command record-frame=1"
+ prep_snapshot=yes
+ ;;
+
+ --debug)
+ debug=yes
+ ;;
+ *)
+ echo "Unknown option '$1' given, run with option --help to see supported options"
+ exit
+ ;;
+ esac
+ shift
+done
+
+if [ "x$trace" = "x" ]; then
+ echo "No trace given in run script, you must pass is as free parameter to the docker call"
+ exit 1
+fi
+
+pushd /mesa
+mkdir -p build
+
+if [ ! -f build/build.ninja ]; then
+ meson build/ \
+ -Dprefix=/usr/local \
+ -Ddri-drivers=i965 \
+ -Dgallium-drivers=swrast,virgl,radeonsi,iris \
+ -Dbuildtype=debugoptimized \
+ -Dllvm=true \
+ -Dglx=dri \
+ -Degl=true \
+ -Dgbm=false \
+ -Dgallium-vdpau=false \
+ -Dgallium-va=false \
+ -Dvulkan-drivers=[] \
+ -Dvalgrind=false \
+ -Dtracing=perfetto \
+ -Dlibdir=lib
+else
+ pushd build
+ meson configure \
+ -Dprefix=/usr/local \
+ -Ddri-drivers=i965 \
+ -Dgallium-drivers=swrast,virgl,radeonsi,iris \
+ -Dbuildtype=debugoptimized \
+ -Dllvm=true \
+ -Dglx=dri \
+ -Degl=true \
+ -Dgbm=false \
+ -Dgallium-vdpau=false \
+ -Dgallium-va=false \
+ -Dvulkan-drivers=[] \
+ -Dvalgrind=false \
+ -Dtracing=perfetto \
+ -Dlibdir=lib
+ popd
+fi
+ninja -C build/ install
+popd
+
+pushd /virglrenderer
+mkdir -p build
+
+if [ ! -f build/build.ninja ]; then
+ meson build/ \
+ -Dprefix=/usr/local \
+ -Dlibdir=lib \
+ -Dplatforms=egl \
+ -Dminigbm_allocation=true \
+ -Dtracing=perfetto \
+ -Dbuildtype=debugoptimized
+else
+ pushd build
+ meson configure \
+ -Dprefix=/usr/local \
+ -Dlibdir=lib \
+ -Dplatforms=egl \
+ -Dminigbm_allocation=true \
+ -Dtracing=perfetto \
+ -Dbuildtype=debugoptimized
+ popd
+fi
+ninja -C build/ install
+popd
+
+# Crosvm needs to link with minigbm, due to incompatible ABI
+export LD_PRELOAD=/usr/lib/libminigbm.so.1.0.0
+
+export PATH="/apitrace/build:$PATH"
+export PATH="/waffle/build/bin:$PATH"
+export LD_LIBRARY_PATH="/waffle/build/lib:$LD_LIBRARY_PATH"
+export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
+export LD_LIBRARY_PATH="/usr/local/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH"
+
+trace_no_ext=${trace%.*}
+datadir="/traces-db/${trace_no_ext}-out"
+
+echo "Host:"
+wflinfo --platform surfaceless_egl --api gles2
+
+export EGL_PLATFORM="surfaceless"
+export WAFFLE_PLATFORM="surfaceless_egl"
+export DISPLAY=
+
+if [ "x$benchmark_loops" != "x0" ]; then
+ echo "Measuring rendering times:"
+ eglretrace --benchmark --loop=$benchmark_loops --headless "/traces-db/${trace}"
+fi
+
+# To keep Perfetto happy
+echo 0 > /sys/kernel/debug/tracing/tracing_on
+echo nop > /sys/kernel/debug/tracing/current_tracer
+
+if [ "x$perfetto_loops" != "x" ] ; then
+ echo "perfetto_loops parameter not given"
+fi
+
+if [ "x$perfetto_loops" != "x0" ]; then
+ /perfetto/out/dist/traced &
+ /perfetto/out/dist/traced_probes &
+ sleep 1
+ /gfx-pps/build/src/gpu/producer-gpu &
+ sleep 1
+ /perfetto/out/dist/perfetto --txt -c /usr/local/perfetto-host.cfg -o /tmp/perfetto-host.trace --detach=mykey
+ sleep 1
+
+ echo "Replaying for Perfetto:"
+ eglretrace --benchmark --singlethread --loop=$perfetto_loops $wait_after_frame --headless "/traces-db/${trace}"
+fi
+
+
+iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
+echo 1 > /proc/sys/net/ipv4/ip_forward
+
+
+# store name of trace to be replayed so the guest can obtain the name
+echo $trace_no_ext > /traces-db/current_trace
+echo $command > /traces-db/command
+
+trace_base=$(basename $trace_no_ext)
+guest_perf="$datadir/${trace_base}-guest.perfetto"
+host_perf="$datadir/${trace_base}-host.perfetto"
+summary_perf="$datadir/${trace_base}-summary.perfetto"
+
+mkdir -p $datadir
+
+# work around Crosvm crashing because of errors in context
+# handling, could be a problem with the kernel and/or with virglrenderer
+export MESA_EXTENSION_OVERRIDE="-GL_ARB_buffer_storage -GL_EXT_buffer_storage"
+
+if [ "x$debug" = "xyes" ]; then
+ export EGL_DEBUG=debug
+fi
+
+
+if [ -e /wd/crosvm-debug.cmd ]; then
+ gdb -x /wd/crosvm-debug.cmd
+else
+ crosvm run \
+ --gpu gles=false\
+ -m 4096 \
+ -c 4 \
+ -i /rootfs.cpio.gz \
+ --shared-dir "/usr/local:local:type=fs" \
+ --shared-dir "/waffle:waffle-tag:type=fs" \
+ --shared-dir "/apitrace:apitrace-tag:type=fs" \
+ --shared-dir "/traces-db:traces-db-tag:type=fs" \
+ --shared-dir "/perfetto:perfetto-tag:type=fs" \
+ --host_ip 192.168.200.1 --netmask 255.255.255.0 --mac AA:BB:CC:00:00:12 \
+ -p "root=/dev/ram0 rdinit=/init.sh ip=192.168.200.2::192.168.200.1:255.255.255.0:crosvm:eth0 nohz=off clocksource=kvm-clock" \
+ /vmlinux
+fi
+
+rm -f /traces-db/current_trace
+rm -f /traces-db/command
+
+if [ "x$perfetto_loops" != "x0" ]; then
+ /perfetto/out/dist/perfetto --attach=mykey --stop
+
+ mv /tmp/perfetto-host.trace "$host_perf"
+ chmod a+rw "$host_perf"
+
+ # sometimes one of these processes seems to crash or exit before, so
+ # check whether it is still
+ kill `pidof producer-gpu` || echo "producer-gpu was not running (anymore)"
+ kill `pidof traced_probes` || echo "traced_probes was not running (anymore)"
+ kill `pidof traced` || echo "traced was not running (anymore="
+
+ /usr/local/merge_traces.py "$host_perf" "$guest_perf" "$summary_perf"
+fi
+
+sleep 1
diff --git a/perf-testing/Docker/run_perfetto_ui.sh b/perf-testing/Docker/run_perfetto_ui.sh
new file mode 100755
index 00000000..60f5e0d9
--- /dev/null
+++ b/perf-testing/Docker/run_perfetto_ui.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+root=""
+
+if [ "x$1" != "x" ]; then
+ root="$1"
+fi
+cd "$root/perfetto"
+GN_ARGS="is_debug=false use_custom_libcxx=false"
+tools/install-build-deps --ui
+tools/gn gen out/dist --args="${GN_ARGS}" --check
+tools/ninja -C out/dist traced traced_probes perfetto trace_to_text ui trace_processor_shell
+ui/run-dev-server out/dist/
diff --git a/perf-testing/Docker/run_traces.sh b/perf-testing/Docker/run_traces.sh
new file mode 100644
index 00000000..653565fd
--- /dev/null
+++ b/perf-testing/Docker/run_traces.sh
@@ -0,0 +1,119 @@
+# This script is to be run on the KVM guest
+
+set -ex
+
+mkdir /waffle
+mount -t virtiofs waffle-tag /waffle
+
+mkdir /apitrace
+mount -t virtiofs apitrace-tag /apitrace
+
+mkdir /traces-db
+mount -t virtiofs traces-db-tag /traces-db
+
+mkdir /perfetto
+mount -t virtiofs perfetto-tag /perfetto
+
+echo 3 > /proc/sys/kernel/printk
+
+export PATH="/apitrace/build:$PATH"
+export PATH="/waffle/build/bin:$PATH"
+export LD_LIBRARY_PATH="/waffle/build/lib:$LD_LIBRARY_PATH"
+export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
+export EGL_PLATFORM="surfaceless"
+export WAFFLE_PLATFORM="surfaceless_egl"
+export MESA_GL_VERSION_OVERRIDE="4.5"
+export DISPLAY=
+
+# Comment out any other sources, so it only syncs to the host via PTP
+sed -i '/pool/s/^/#/' /etc/chrony/chrony.conf
+echo refclock PHC /dev/ptp0 poll 1 dpoll -2 offset 0 >> /etc/chrony/chrony.conf
+echo cmdport 0 >> /etc/chrony/chrony.conf
+echo bindcmdaddress / >> /etc/chrony/chrony.conf
+
+mkdir -p /run/chrony
+time chronyd -q # Initial synchronization, will take some time
+chronyd # Keep clocks in sync
+
+# Get trace cached
+trace_no_ext=$(cat /traces-db/current_trace)
+if [ "x$trace_no_ext" = "x" ]; then
+ echo "No trace given, bailing out"
+ exit 1
+fi
+
+command=$(cat /traces-db/command)
+echo command=$command
+
+WAIT=
+RECORD=
+benchmark_loops=0
+perfetto_loops=10
+
+for c in $command; do
+
+ val=(${c//=/ })
+ case "${val[0]}" in
+ benchmark)
+ benchmark_loops=${val[1]}
+ ;;
+
+ perfetto)
+ perfetto_loops=${val[1]}
+ ;;
+
+ wait-after-frame)
+ WAIT="--wait-after-frame"
+ ;;
+
+ record-frame)
+ RECORD="--snapshot"
+ ;;
+
+ esac
+done
+
+if [ -e /traces-db/wait_after_frame ]; then
+ WAIT=-wait-after-frame
+fi
+
+trace="/traces-db/${trace_no_ext}.trace"
+datadir="/traces-db/${trace_no_ext}-out"
+trace_base=$(basename ${trace_no_ext})
+guest_perf="$datadir/${trace_base}-guest.perfetto"
+
+cat "$trace" > /dev/null
+
+# To keep Perfetto happy
+echo 0 > /sys/kernel/debug/tracing/tracing_on
+echo nop > /sys/kernel/debug/tracing/current_tracer
+
+echo "Guest:"
+wflinfo --platform surfaceless_egl --api gles2 -v
+
+if [ "x$perfetto_loops" != "x" -a "x$perfetto_loops" != "x0" ]; then
+ /perfetto/out/dist/traced &
+ /perfetto/out/dist/traced_probes &
+ sleep 1
+
+ /perfetto/out/dist/perfetto --txt -c /usr/local/perfetto-guest.cfg -o "$guest_perf" --detach=mykey
+ sleep 1
+
+ # The first virtio-gpu event has to be captured in the guest, so we correlate correctly to the host event
+
+ echo "Replaying for Perfetto:"
+ eglretrace --benchmark --singlethread --loop=$perfetto_loops $WAIT --headless "$trace"
+ sleep 1
+
+ /perfetto/out/dist/perfetto --attach=mykey --stop
+ chmod a+rw "$guest_perf"
+fi
+
+if [ "x$benchmark_loops" != "x0" ]; then
+ echo "Measuring rendering times:"
+ eglretrace --benchmark --loop=$benchmark_loops --headless "$trace"
+fi
+
+if [ "x$RECORD" != "x" ]; then
+ eglretrace --snapshot frame --snapshot-prefix=${datadir}/ --headless "$trace"
+fi
diff --git a/perf-testing/Docker/x86_64.config b/perf-testing/Docker/x86_64.config
new file mode 100644
index 00000000..041bd2f7
--- /dev/null
+++ b/perf-testing/Docker/x86_64.config
@@ -0,0 +1,37 @@
+CONFIG_DRM=y
+CONFIG_MMU=y
+CONFIG_FUSE_FS=y
+
+CONFIG_FUNCTION_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE=y
+CONFIG_TRACEPOINTS=y
+CONFIG_DMA_FENCE_TRACE=y
+CONFIG_STACK_TRACER=y
+CONFIG_DYNAMIC_DEBUG=y
+
+CONFIG_VIRTUALIZATION=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_FS=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_BLK_SCSI=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_BLK_MQ_VIRTIO=y
+CONFIG_NET_9P_VIRTIO=y
+
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PTP_1588_CLOCK_KVM=y
+CONFIG_KVM=y
+CONFIG_KVM_GUEST=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+
diff --git a/perf-testing/README.md b/perf-testing/README.md
new file mode 100644
index 00000000..3e492ddb
--- /dev/null
+++ b/perf-testing/README.md
@@ -0,0 +1,71 @@
+The files in this directory help with testing Virgl on the virtio-gpu winsys
+by means of Crosvm.
+
+A whole environment will be built in a Docker image, then Mesa and Virglrenderer
+will be built from local directories to be used both in the host and the guest.
+
+The container image builds on top of other images built by scripts in the crosvm repository.
+
+Instructions for building base images:
+
+```console
+$ git clone https://chromium.googlesource.com/chromiumos/platform/crosvm
+$ pushd crosvm
+$ sh docker/build_crosvm_base.sh
+$ sh docker/build_crosvm.sh
+```
+
+Instructions for building target image:
+
+```console
+$ cd virglrenderer
+$ sh perf-testing/build-dockerimage.sh
+```
+
+Instructions for running the container:
+
+```console
+$ cd virglrenderer
+$ bash perf-testing/run_trace-in-container.sh \
+ --root $PATH_THAT_CONTAINS_MESA_CHECKOUT_VIRGLRENDERER_AND_TRACES_DB_CHECKOUT \
+ --trace $API_TRACE_TO_RUN
+```
+
+There are also options for run_trace-in-container.sh that allow specifying the
+path to mesa, virglrenderer, and the traces db. These override the root path.
+In addition, the root path defaults to the current working directory.
+
+As a conveniance for shell autocompletion users running the script from the default
+root that contains the traces db as subdirectory the the trace file name can be
+also given with this traces db sudirectory name, i.e. if the traces db is located
+in '$workdir/traces-db', root=$workdir, and the trace is calles 'sometrace.trace',
+then both commands
+```
+ perf-testing/run_trace-in-container.sh -r $rootdir -t traces-db/sometrace.trace
+```
+and
+```
+ perf-testing/run_trace-in-container.sh -r $rootdir -t sometrace.trace
+```
+will work equally.
+
+At the moment of writing, the branch perfetto-tracing is needed for mesa at
+commit ec4277aea63cf3, and the for virglrenderer at least commit
+"perf: compile mesa with perfeto support" is needed so that these projects
+emit the required traces.
+
+The perfetto traces will be saved to the a subdirectory of the traces-db checkout
+directory with a name based on the api trace passed in with the --trace parameter.
+
+Once the run_trace-in-container.sh script finishes, 3 Perfetto trace files will be written:
+$(API_TRACE_TO_RUN%.*}-host.perfetto, $(API_TRACE_TO_RUN%.*}-guest.perfetto
+and $(API_TRACE_TO_RUN%.*}-summary.perfetto. The last one is the fusion of the two first.
+
+In order to visualize the traces, the Perfetto UI needs to be running in a local
+service which can be started as follows:
+
+```console
+$ perf-testing/perfetto-ui.sh
+```
+
+The Perfetto UI can be loaded then on Chromium on the http://localhost:10000 address.
diff --git a/perf-testing/build-dockerimage.sh b/perf-testing/build-dockerimage.sh
new file mode 100755
index 00000000..72de606f
--- /dev/null
+++ b/perf-testing/build-dockerimage.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -ex
+cd "${0%/*}"
+
+export USER_ID=$(id -u)
+export GROUP_ID=$(id -g)
+src_root="$(realpath ..)"
+
+docker build -t mesa \
+ -f Docker/Dockerfile \
+ --build-arg USER_ID=${USER_ID} \
+ --build-arg GROUP_ID=${GROUP_ID} \
+ "$@" \
+ "${src_root}"
diff --git a/perf-testing/perfetto-ui.sh b/perf-testing/perfetto-ui.sh
new file mode 100755
index 00000000..c75b67e9
--- /dev/null
+++ b/perf-testing/perfetto-ui.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script is to be run on the KVM host, outside the container
+
+set -ex
+
+# grab the pwd before changing it to this script's directory
+pwd="${PWD}"
+
+cd "${0%/*}"
+
+exec docker run -it --rm \
+ --privileged \
+ --ipc=host \
+ -v /dev/log:/dev/log \
+ -v /dev/vhost-net:/dev/vhost-net \
+ -v /sys/kernel/debug:/sys/kernel/debug \
+ --volume "$pwd":/wd \
+ --workdir /wd \
+ -p 127.0.0.1:10000:10000/tcp \
+ --entrypoint /usr/local/run_perfetto_ui.sh \
+ mesa
diff --git a/perf-testing/run-trace-in-container.sh b/perf-testing/run-trace-in-container.sh
new file mode 100755
index 00000000..657a0bd2
--- /dev/null
+++ b/perf-testing/run-trace-in-container.sh
@@ -0,0 +1,200 @@
+#!/bin/bash
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script is to be run on the KVM host, outside the container
+
+
+#set -ex
+
+# grab the pwd before changing it to this script's directory
+pwd="${PWD}"
+root_dir="$(pwd)"
+
+cd "${0%/*}"
+
+mesa_src=""
+virgl_src=""
+traces_db=""
+kernel_src=""
+benchmark_loops=0
+perfetto_loops=10
+wait_after_frame=
+
+print_help() {
+ echo "Run GL trace with perfetto"
+ echo "Usage mesa_wrapper.sh [options]"
+ echo ""
+ echo " --root, -r Path to a root directory that contains the sources for mesa, virglrenderer, and the trace-db"
+ echo " --mesa, -m Path to Mesa source code (overrides path generated from root)"
+ echo " --virgl, -v Path to virglrenderer source code (overrides path generated from root)"
+ echo " --kernel, -k Path to Linux kernel source code"
+ echo " --traces-db, -d Path to the directory containing the traces (overrides path generated from root)"
+ echo " --trace, -t Trace to be run (path relative to traces-db) (required)"
+ echo " --benchmark, -b Number of times the last frame should be run for benchmarking (default 0=disabled)"
+ echo " --perfetto, -p Number of times the last frame should be loop for perfetto (default 10; 0=run trace normally)"
+ echo " --snapshot, -s Make per-frame snapshots"
+ echo " --debug Enable extra logging"
+ echo ""
+ echo " --help, -h Print this help"
+}
+
+command=""
+
+while [ -n "$1" ] ; do
+ case "$1" in
+
+ --root|-r)
+ root_dir="$2"
+ shift
+ ;;
+
+ --mesa|-m)
+ mesa_src="$2"
+ shift
+ ;;
+
+ --virgl|-v)
+ virgl_src="$2"
+ shift
+ ;;
+
+ --traces-db|-d)
+ traces_db="$2"
+ shift
+ ;;
+
+ --kernel|-k)
+ kernel_src="$2"
+ shift
+ ;;
+
+ --help|-h)
+ print_help
+ exit
+ ;;
+
+ --trace|-t)
+ trace="$2"
+ shift
+ ;;
+
+ --benchmark|-b)
+ command="$command -b $2"
+ shift
+ ;;
+
+ --perfetto|-p)
+ command="$command -p $2"
+ shift
+ ;;
+
+ --wait-after-frame|-w)
+ command="$command -w"
+ ;;
+
+ --snapshot|-s)
+ command="$command -s"
+ ;;
+
+ --debug)
+ command="$command --debug"
+ ;;
+ *)
+ echo "Unknown option '$1' given, run with option --help to see supported options"
+ exit
+ ;;
+ esac
+ shift
+done
+
+if [ "x$mesa_src" = "x" ] ; then
+ mesa_src="$root_dir/mesa"
+fi
+
+if [ "x$virgl_src" = "x" ] ; then
+ virgl_src="$root_dir/virglrenderer"
+fi
+
+if [ "x$traces_db" = "x" ] ; then
+ traces_db="$root_dir/traces-db"
+fi
+
+can_run=1
+
+if [ "x$trace" = "x" ]; then
+ echo "No trace given" >&2;
+ can_run=0
+fi
+
+if [ "x$mesa_src" = "x" ]; then
+ echo "no mesa src dir given" >&2;
+ can_run=0
+fi
+
+if [ ! -d "$mesa_src/src/mesa" ]; then
+ echo "mesa src dir '$mesa_src' is not a mesa source tree" >&2;
+ can_run=0
+fi
+
+if [ "x$virgl_src" = "x" ]; then
+ echo "no virglrenderer src dir given" >&2;
+ can_run=0
+fi
+
+if [ ! -d "$virgl_src/vtest" ]; then
+ echo "virglrenderer src dir '$virgl_src' is not a virglrenderer source tree" >&2;
+ can_run=0
+fi
+
+if [ "x$traces_db" = "x" ]; then
+ echo "no traces_db dir given" >&2;
+ can_run=0
+fi
+
+if [ ! -f "$traces_db/$trace" ]; then
+ echo "Given trace file '$trace' doesn't exist in traces db dir '$traces_db'" >&2;
+ # check whether the trace has been given with a path relative to the root dir
+ # that can be removed
+ trace=${trace#*/}
+ echo "Trying $traces_db/$trace" >&2;
+ if [ ! -f "$traces_db/$trace" ]; then
+ echo "Given trace file '$trace' not found " >&2;
+ can_run=0
+ fi
+fi
+
+if [ "x$can_run" = "x0" ]; then
+ echo "Missing or command line options with errors were given" >&2;
+ exit 1
+fi
+
+re='^[0-9]+$'
+if ! [[ 1$benchmark_loops =~ $re ]] ; then
+ echo "error: benchmark_loops '" $benchmark_loops "'is not a number" >&2;
+ exit 1
+fi
+
+if ! [[ 1$perfetto_loops =~ $re ]] ; then
+ echo "error: perfetto_loops '" $perfetto_loops "' is not a number" >&2;
+ exit 1
+fi
+
+echo "command=$command"
+
+docker run -it --rm \
+ --privileged \
+ --ipc=host \
+ -v /dev/log:/dev/log \
+ -v /dev/vhost-net:/dev/vhost-net \
+ -v /sys/kernel/debug:/sys/kernel/debug \
+ -v "$mesa_src":/mesa \
+ -v "$virgl_src":/virglrenderer \
+ -v "$traces_db":/traces-db \
+ -v "$kernel_src":/kernel \
+ --volume "$pwd":/wd \
+ --workdir /wd \
+ mesa \
+ -t "$trace" \
+ $command
diff --git a/prebuilt-intermediates/src/u_format_table.c b/prebuilt-intermediates/src/u_format_table.c
index 8bfe3a52..58bd9431 100644
--- a/prebuilt-intermediates/src/u_format_table.c
+++ b/prebuilt-intermediates/src/u_format_table.c
@@ -10286,6 +10286,734 @@ util_format_etc2_rg11_snorm_description = {
UTIL_FORMAT_COLORSPACE_RGB,
};
+const struct util_format_description
+util_format_astc_4x4_description = {
+ PIPE_FORMAT_ASTC_4x4,
+ "PIPE_FORMAT_ASTC_4x4",
+ "astc_4x4",
+ {4, 4, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_5x4_description = {
+ PIPE_FORMAT_ASTC_5x4,
+ "PIPE_FORMAT_ASTC_5x4",
+ "astc_5x4",
+ {5, 4, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_5x5_description = {
+ PIPE_FORMAT_ASTC_5x5,
+ "PIPE_FORMAT_ASTC_5x5",
+ "astc_5x5",
+ {5, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_6x5_description = {
+ PIPE_FORMAT_ASTC_6x5,
+ "PIPE_FORMAT_ASTC_6x5",
+ "astc_6x5",
+ {6, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_6x6_description = {
+ PIPE_FORMAT_ASTC_6x6,
+ "PIPE_FORMAT_ASTC_6x6",
+ "astc_6x6",
+ {6, 6, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_8x5_description = {
+ PIPE_FORMAT_ASTC_8x5,
+ "PIPE_FORMAT_ASTC_8x5",
+ "astc_8x5",
+ {8, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_8x6_description = {
+ PIPE_FORMAT_ASTC_8x6,
+ "PIPE_FORMAT_ASTC_8x6",
+ "astc_8x6",
+ {8, 6, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_8x8_description = {
+ PIPE_FORMAT_ASTC_8x8,
+ "PIPE_FORMAT_ASTC_8x8",
+ "astc_8x8",
+ {8, 8, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_10x5_description = {
+ PIPE_FORMAT_ASTC_10x5,
+ "PIPE_FORMAT_ASTC_10x5",
+ "astc_10x5",
+ {10, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_10x6_description = {
+ PIPE_FORMAT_ASTC_10x6,
+ "PIPE_FORMAT_ASTC_10x6",
+ "astc_10x6",
+ {10, 6, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_10x8_description = {
+ PIPE_FORMAT_ASTC_10x8,
+ "PIPE_FORMAT_ASTC_10x8",
+ "astc_10x8",
+ {10, 8, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_10x10_description = {
+ PIPE_FORMAT_ASTC_10x10,
+ "PIPE_FORMAT_ASTC_10x10",
+ "astc_10x10",
+ {10, 10, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_12x10_description = {
+ PIPE_FORMAT_ASTC_12x10,
+ "PIPE_FORMAT_ASTC_12x10",
+ "astc_12x10",
+ {12, 10, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_12x12_description = {
+ PIPE_FORMAT_ASTC_12x12,
+ "PIPE_FORMAT_ASTC_12x12",
+ "astc_12x12",
+ {12, 12, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* r */
+ UTIL_FORMAT_SWIZZLE_Y, /* g */
+ UTIL_FORMAT_SWIZZLE_Z, /* b */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_RGB,
+};
+
+const struct util_format_description
+util_format_astc_4x4_srgb_description = {
+ PIPE_FORMAT_ASTC_4x4_SRGB,
+ "PIPE_FORMAT_ASTC_4x4_SRGB",
+ "astc_4x4_srgb",
+ {4, 4, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_5x4_srgb_description = {
+ PIPE_FORMAT_ASTC_5x4_SRGB,
+ "PIPE_FORMAT_ASTC_5x4_SRGB",
+ "astc_5x4_srgb",
+ {5, 4, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_5x5_srgb_description = {
+ PIPE_FORMAT_ASTC_5x5_SRGB,
+ "PIPE_FORMAT_ASTC_5x5_SRGB",
+ "astc_5x5_srgb",
+ {5, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_6x5_srgb_description = {
+ PIPE_FORMAT_ASTC_6x5_SRGB,
+ "PIPE_FORMAT_ASTC_6x5_SRGB",
+ "astc_6x5_srgb",
+ {6, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_6x6_srgb_description = {
+ PIPE_FORMAT_ASTC_6x6_SRGB,
+ "PIPE_FORMAT_ASTC_6x6_SRGB",
+ "astc_6x6_srgb",
+ {6, 6, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_8x5_srgb_description = {
+ PIPE_FORMAT_ASTC_8x5_SRGB,
+ "PIPE_FORMAT_ASTC_8x5_SRGB",
+ "astc_8x5_srgb",
+ {8, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_8x6_srgb_description = {
+ PIPE_FORMAT_ASTC_8x6_SRGB,
+ "PIPE_FORMAT_ASTC_8x6_SRGB",
+ "astc_8x6_srgb",
+ {8, 6, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_8x8_srgb_description = {
+ PIPE_FORMAT_ASTC_8x8_SRGB,
+ "PIPE_FORMAT_ASTC_8x8_SRGB",
+ "astc_8x8_srgb",
+ {8, 8, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_10x5_srgb_description = {
+ PIPE_FORMAT_ASTC_10x5_SRGB,
+ "PIPE_FORMAT_ASTC_10x5_SRGB",
+ "astc_10x5_srgb",
+ {10, 5, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_10x6_srgb_description = {
+ PIPE_FORMAT_ASTC_10x6_SRGB,
+ "PIPE_FORMAT_ASTC_10x6_SRGB",
+ "astc_10x6_srgb",
+ {10, 6, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_10x8_srgb_description = {
+ PIPE_FORMAT_ASTC_10x8_SRGB,
+ "PIPE_FORMAT_ASTC_10x8_SRGB",
+ "astc_10x8_srgb",
+ {10, 8, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_10x10_srgb_description = {
+ PIPE_FORMAT_ASTC_10x10_SRGB,
+ "PIPE_FORMAT_ASTC_10x10_SRGB",
+ "astc_10x10_srgb",
+ {10, 10, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_12x10_srgb_description = {
+ PIPE_FORMAT_ASTC_12x10_SRGB,
+ "PIPE_FORMAT_ASTC_12x10_SRGB",
+ "astc_12x10_srgb",
+ {12, 10, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
+const struct util_format_description
+util_format_astc_12x12_srgb_description = {
+ PIPE_FORMAT_ASTC_12x12_SRGB,
+ "PIPE_FORMAT_ASTC_12x12_SRGB",
+ "astc_12x12_srgb",
+ {12, 12, 128}, /* block */
+ UTIL_FORMAT_LAYOUT_ASTC,
+ 1, /* nr_channels */
+ FALSE, /* is_array */
+ FALSE, /* is_bitmask */
+ FALSE, /* is_mixed */
+ {
+ {UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 128, 0}, /* x = x */
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+ },
+ {
+ UTIL_FORMAT_SWIZZLE_X, /* sr */
+ UTIL_FORMAT_SWIZZLE_Y, /* sg */
+ UTIL_FORMAT_SWIZZLE_Z, /* sb */
+ UTIL_FORMAT_SWIZZLE_W /* a */
+ },
+ UTIL_FORMAT_COLORSPACE_SRGB,
+};
+
const struct util_format_description *
util_format_description(enum pipe_format format)
{
@@ -10846,6 +11574,62 @@ util_format_description(enum pipe_format format)
return &util_format_etc2_rg11_unorm_description;
case PIPE_FORMAT_ETC2_RG11_SNORM:
return &util_format_etc2_rg11_snorm_description;
+ case PIPE_FORMAT_ASTC_4x4:
+ return &util_format_astc_4x4_description;
+ case PIPE_FORMAT_ASTC_5x4:
+ return &util_format_astc_5x4_description;
+ case PIPE_FORMAT_ASTC_5x5:
+ return &util_format_astc_5x5_description;
+ case PIPE_FORMAT_ASTC_6x5:
+ return &util_format_astc_6x5_description;
+ case PIPE_FORMAT_ASTC_6x6:
+ return &util_format_astc_6x6_description;
+ case PIPE_FORMAT_ASTC_8x5:
+ return &util_format_astc_8x5_description;
+ case PIPE_FORMAT_ASTC_8x6:
+ return &util_format_astc_8x6_description;
+ case PIPE_FORMAT_ASTC_8x8:
+ return &util_format_astc_8x8_description;
+ case PIPE_FORMAT_ASTC_10x5:
+ return &util_format_astc_10x5_description;
+ case PIPE_FORMAT_ASTC_10x6:
+ return &util_format_astc_10x6_description;
+ case PIPE_FORMAT_ASTC_10x8:
+ return &util_format_astc_10x8_description;
+ case PIPE_FORMAT_ASTC_10x10:
+ return &util_format_astc_10x10_description;
+ case PIPE_FORMAT_ASTC_12x10:
+ return &util_format_astc_12x10_description;
+ case PIPE_FORMAT_ASTC_12x12:
+ return &util_format_astc_12x12_description;
+ case PIPE_FORMAT_ASTC_4x4_SRGB:
+ return &util_format_astc_4x4_srgb_description;
+ case PIPE_FORMAT_ASTC_5x4_SRGB:
+ return &util_format_astc_5x4_srgb_description;
+ case PIPE_FORMAT_ASTC_5x5_SRGB:
+ return &util_format_astc_5x5_srgb_description;
+ case PIPE_FORMAT_ASTC_6x5_SRGB:
+ return &util_format_astc_6x5_srgb_description;
+ case PIPE_FORMAT_ASTC_6x6_SRGB:
+ return &util_format_astc_6x6_srgb_description;
+ case PIPE_FORMAT_ASTC_8x5_SRGB:
+ return &util_format_astc_8x5_srgb_description;
+ case PIPE_FORMAT_ASTC_8x6_SRGB:
+ return &util_format_astc_8x6_srgb_description;
+ case PIPE_FORMAT_ASTC_8x8_SRGB:
+ return &util_format_astc_8x8_srgb_description;
+ case PIPE_FORMAT_ASTC_10x5_SRGB:
+ return &util_format_astc_10x5_srgb_description;
+ case PIPE_FORMAT_ASTC_10x6_SRGB:
+ return &util_format_astc_10x6_srgb_description;
+ case PIPE_FORMAT_ASTC_10x8_SRGB:
+ return &util_format_astc_10x8_srgb_description;
+ case PIPE_FORMAT_ASTC_10x10_SRGB:
+ return &util_format_astc_10x10_srgb_description;
+ case PIPE_FORMAT_ASTC_12x10_SRGB:
+ return &util_format_astc_12x10_srgb_description;
+ case PIPE_FORMAT_ASTC_12x12_SRGB:
+ return &util_format_astc_12x12_srgb_description;
default:
return NULL;
}
diff --git a/src/gallium/auxiliary/cso_cache/cso_cache.c b/src/gallium/auxiliary/cso_cache/cso_cache.c
index 232fa9ec..0bd124e6 100644
--- a/src/gallium/auxiliary/cso_cache/cso_cache.c
+++ b/src/gallium/auxiliary/cso_cache/cso_cache.c
@@ -54,8 +54,10 @@ static unsigned hash_key(const void *key, unsigned key_size)
/* I'm sure this can be improved on:
*/
- for (i = 0; i < key_size/4; i++)
+ for (i = 0; i < key_size/4; i++) {
+ hash = (hash << 7) | (hash >> 25);
hash ^= ikey[i];
+ }
return hash;
}
diff --git a/src/gallium/auxiliary/util/u_format.csv b/src/gallium/auxiliary/util/u_format.csv
index 2409a610..1d743a69 100644
--- a/src/gallium/auxiliary/util/u_format.csv
+++ b/src/gallium/auxiliary/util/u_format.csv
@@ -409,3 +409,33 @@ PIPE_FORMAT_ETC2_R11_UNORM , etc, 4, 4, x64, , , , x001, rgb
PIPE_FORMAT_ETC2_R11_SNORM , etc, 4, 4, x64, , , , x001, rgb
PIPE_FORMAT_ETC2_RG11_UNORM , etc, 4, 4, x128, , , , xy01, rgb
PIPE_FORMAT_ETC2_RG11_SNORM , etc, 4, 4, x128, , , , xy01, rgb
+
+PIPE_FORMAT_ASTC_4x4 , astc, 4, 4, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_5x4 , astc, 5, 4, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_5x5 , astc, 5, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_6x5 , astc, 6, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_6x6 , astc, 6, 6, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_8x5 , astc, 8, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_8x6 , astc, 8, 6, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_8x8 , astc, 8, 8, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x5 , astc,10, 5, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x6 , astc,10, 6, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x8 , astc,10, 8, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_10x10 , astc,10,10, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_12x10 , astc,12,10, x128, , , , xyzw, rgb
+PIPE_FORMAT_ASTC_12x12 , astc,12,12, x128, , , , xyzw, rgb
+
+PIPE_FORMAT_ASTC_4x4_SRGB , astc, 4, 4, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_5x4_SRGB , astc, 5, 4, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_5x5_SRGB , astc, 5, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_6x5_SRGB , astc, 6, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_6x6_SRGB , astc, 6, 6, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_8x5_SRGB , astc, 8, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_8x6_SRGB , astc, 8, 6, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_8x8_SRGB , astc, 8, 8, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x5_SRGB , astc,10, 5, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x6_SRGB , astc,10, 6, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x8_SRGB , astc,10, 8, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_10x10_SRGB , astc,10,10, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_12x10_SRGB , astc,12,10, x128, , , , xyzw, srgb
+PIPE_FORMAT_ASTC_12x12_SRGB , astc,12,12, x128, , , , xyzw, srgb \ No newline at end of file
diff --git a/src/gallium/auxiliary/util/u_format.h b/src/gallium/auxiliary/util/u_format.h
index 1990c606..978b9201 100644
--- a/src/gallium/auxiliary/util/u_format.h
+++ b/src/gallium/auxiliary/util/u_format.h
@@ -83,10 +83,19 @@ enum util_format_layout {
*/
UTIL_FORMAT_LAYOUT_BPTC = 7,
+ UTIL_FORMAT_LAYOUT_ASTC = 8,
+
+ UTIL_FORMAT_LAYOUT_ATC = 9,
+
+ /** Formats with 2 or more planes. */
+ UTIL_FORMAT_LAYOUT_PLANAR2 = 10,
+ UTIL_FORMAT_LAYOUT_PLANAR3 = 11,
+
+ UTIL_FORMAT_LAYOUT_FXT1 = 12,
/**
* Everything else that doesn't fit in any of the above layouts.
*/
- UTIL_FORMAT_LAYOUT_OTHER = 8
+ UTIL_FORMAT_LAYOUT_OTHER = 13,
};
@@ -299,6 +308,9 @@ util_format_is_compressed(enum pipe_format format)
case UTIL_FORMAT_LAYOUT_RGTC:
case UTIL_FORMAT_LAYOUT_ETC:
case UTIL_FORMAT_LAYOUT_BPTC:
+ case UTIL_FORMAT_LAYOUT_ASTC:
+ case UTIL_FORMAT_LAYOUT_ATC:
+ case UTIL_FORMAT_LAYOUT_FXT1:
/* XXX add other formats in the future */
return TRUE;
default:
diff --git a/src/meson.build b/src/meson.build
index d854027f..257d7dc7 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -86,6 +86,10 @@ if with_tracing == 'perfetto'
virgl_depends += [vperfetto_min_dep]
endif
+if with_tracing == 'percetto'
+ virgl_depends += [percetto_dep]
+endif
+
virgl_sources += vrend_sources
if have_egl
diff --git a/src/virgl_context.h b/src/virgl_context.h
index fa39fe99..ea86b31e 100644
--- a/src/virgl_context.h
+++ b/src/virgl_context.h
@@ -29,6 +29,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "virglrenderer_hw.h"
#include "virgl_resource.h"
struct vrend_transfer_info;
@@ -42,9 +43,17 @@ struct virgl_context_blob {
struct pipe_resource *pipe_resource;
} u;
+ uint32_t map_info;
+
void *renderer_data;
};
+struct virgl_context;
+
+typedef void (*virgl_context_fence_retire)(struct virgl_context *ctx,
+ uint64_t queue_id,
+ void *fence_cookie);
+
/**
* Base class for renderer contexts. For example, vrend_decode_ctx is a
* subclass of virgl_context.
@@ -52,6 +61,18 @@ struct virgl_context_blob {
struct virgl_context {
uint32_t ctx_id;
+ enum virgl_renderer_capset capset_id;
+
+ /*
+ * Each fence goes through submitted, signaled, and retired. This callback
+ * is called from virgl_context::retire_fences to retire signaled fences of
+ * each queue. When a queue has multiple signaled fences by the time
+ * virgl_context::retire_fences is called, this callback might not be called
+ * on all fences but only on the latest one, depending on the flags of the
+ * fences.
+ */
+ virgl_context_fence_retire fence_retire;
+
void (*destroy)(struct virgl_context *ctx);
void (*attach_resource)(struct virgl_context *ctx,
@@ -84,6 +105,21 @@ struct virgl_context {
int (*submit_cmd)(struct virgl_context *ctx,
const void *buffer,
size_t size);
+
+ /*
+ * Return an fd that is readable whenever there is any signaled fence in
+ * any queue, or -1 if not supported.
+ */
+ int (*get_fencing_fd)(struct virgl_context *ctx);
+
+ /* retire signaled fences of all queues */
+ void (*retire_fences)(struct virgl_context *ctx);
+
+ /* submit a fence to the queue identified by queue_id */
+ int (*submit_fence)(struct virgl_context *ctx,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie);
};
struct virgl_context_foreach_args {
diff --git a/src/virgl_hw.h b/src/virgl_hw.h
index 58dafd99..2cdbf60f 100644
--- a/src/virgl_hw.h
+++ b/src/virgl_hw.h
@@ -358,6 +358,35 @@ enum virgl_formats {
VIRGL_FORMAT_ETC2_RG11_UNORM = 277,
VIRGL_FORMAT_ETC2_RG11_SNORM = 278,
+ VIRGL_FORMAT_ASTC_4x4 = 279,
+ VIRGL_FORMAT_ASTC_5x4 = 280,
+ VIRGL_FORMAT_ASTC_5x5 = 281,
+ VIRGL_FORMAT_ASTC_6x5 = 282,
+ VIRGL_FORMAT_ASTC_6x6 = 283,
+ VIRGL_FORMAT_ASTC_8x5 = 284,
+ VIRGL_FORMAT_ASTC_8x6 = 285,
+ VIRGL_FORMAT_ASTC_8x8 = 286,
+ VIRGL_FORMAT_ASTC_10x5 = 287,
+ VIRGL_FORMAT_ASTC_10x6 = 288,
+ VIRGL_FORMAT_ASTC_10x8 = 289,
+ VIRGL_FORMAT_ASTC_10x10 = 290,
+ VIRGL_FORMAT_ASTC_12x10 = 291,
+ VIRGL_FORMAT_ASTC_12x12 = 292,
+ VIRGL_FORMAT_ASTC_4x4_SRGB = 293,
+ VIRGL_FORMAT_ASTC_5x4_SRGB = 294,
+ VIRGL_FORMAT_ASTC_5x5_SRGB = 295,
+ VIRGL_FORMAT_ASTC_6x5_SRGB = 296,
+ VIRGL_FORMAT_ASTC_6x6_SRGB = 297,
+ VIRGL_FORMAT_ASTC_8x5_SRGB = 298,
+ VIRGL_FORMAT_ASTC_8x6_SRGB = 299,
+ VIRGL_FORMAT_ASTC_8x8_SRGB = 300,
+ VIRGL_FORMAT_ASTC_10x5_SRGB = 301,
+ VIRGL_FORMAT_ASTC_10x6_SRGB = 302,
+ VIRGL_FORMAT_ASTC_10x8_SRGB = 303,
+ VIRGL_FORMAT_ASTC_10x10_SRGB = 304,
+ VIRGL_FORMAT_ASTC_12x10_SRGB = 305,
+ VIRGL_FORMAT_ASTC_12x12_SRGB = 306,
+
VIRGL_FORMAT_R10G10B10X2_UNORM = 308,
VIRGL_FORMAT_A4B4G4R4_UNORM = 311,
@@ -407,6 +436,9 @@ enum virgl_formats {
/* These are used by the capability_bits_v2 field in virgl_caps_v2. */
#define VIRGL_CAP_V2_BLEND_EQUATION (1 << 0)
+#define VIRGL_CAP_V2_UNTYPED_RESOURCE (1 << 1)
+#define VIRGL_CAP_V2_VIDEO_MEMORY (1 << 2)
+#define VIRGL_CAP_V2_MEMINFO (1 << 3)
/* virgl bind flags - these are compatible with mesa 10.5 gallium.
* but are fixed, no other should be passed to virgl either.
@@ -558,6 +590,7 @@ struct virgl_caps_v2 {
uint32_t host_feature_check_version;
struct virgl_supported_format_mask supported_readback_formats;
struct virgl_supported_format_mask scanout;
+ uint32_t max_video_memory;
uint32_t capability_bits_v2;
};
diff --git a/src/virgl_protocol.h b/src/virgl_protocol.h
index a230898c..d8d7b16d 100644
--- a/src/virgl_protocol.h
+++ b/src/virgl_protocol.h
@@ -35,6 +35,16 @@ struct virgl_host_query_state {
uint64_t result;
};
+struct virgl_memory_info
+{
+ uint32_t total_device_memory; /**< size of device memory, e.g. VRAM */
+ uint32_t avail_device_memory; /**< free device memory at the moment */
+ uint32_t total_staging_memory; /**< size of staging memory, e.g. GART */
+ uint32_t avail_staging_memory; /**< free staging memory at the moment */
+ uint32_t device_memory_evicted; /**< size of memory evicted (monotonic counter) */
+ uint32_t nr_device_memory_evictions; /**< # of evictions (monotonic counter) */
+};
+
enum virgl_object_type {
VIRGL_OBJECT_NULL,
VIRGL_OBJECT_BLEND,
@@ -102,6 +112,8 @@ enum virgl_context_cmd {
VIRGL_CCMD_SET_TWEAKS,
VIRGL_CCMD_CLEAR_TEXTURE,
VIRGL_CCMD_PIPE_RESOURCE_CREATE,
+ VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE,
+ VIRGL_CCMD_GET_MEMORY_INFO,
VIRGL_MAX_COMMANDS
};
@@ -636,4 +648,17 @@ enum vrend_tweak_type {
#define VIRGL_PIPE_RES_CREATE_FLAGS 10
#define VIRGL_PIPE_RES_CREATE_BLOB_ID 11
+/* VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE */
+#define VIRGL_PIPE_RES_SET_TYPE_SIZE(nplanes) (8 + (nplanes) * 2)
+#define VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE 1
+#define VIRGL_PIPE_RES_SET_TYPE_FORMAT 2
+#define VIRGL_PIPE_RES_SET_TYPE_BIND 3
+#define VIRGL_PIPE_RES_SET_TYPE_WIDTH 4
+#define VIRGL_PIPE_RES_SET_TYPE_HEIGHT 5
+#define VIRGL_PIPE_RES_SET_TYPE_USAGE 6
+#define VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO 7
+#define VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI 8
+#define VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(plane) (9 + (plane) * 2)
+#define VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(plane) (10 + (plane) * 2)
+
#endif
diff --git a/src/virgl_resource.c b/src/virgl_resource.c
index e3c9423b..c58dd708 100644
--- a/src/virgl_resource.c
+++ b/src/virgl_resource.c
@@ -102,7 +102,7 @@ virgl_resource_create(uint32_t res_id)
return res;
}
-int
+struct virgl_resource *
virgl_resource_create_from_pipe(uint32_t res_id,
struct pipe_resource *pres,
const struct iovec *iov,
@@ -112,7 +112,7 @@ virgl_resource_create_from_pipe(uint32_t res_id,
res = virgl_resource_create(res_id);
if (!res)
- return ENOMEM;
+ return NULL;
/* take ownership */
res->pipe_resource = pres;
@@ -120,10 +120,10 @@ virgl_resource_create_from_pipe(uint32_t res_id,
res->iov = iov;
res->iov_count = iov_count;
- return 0;
+ return res;
}
-int
+struct virgl_resource *
virgl_resource_create_from_fd(uint32_t res_id,
enum virgl_resource_fd_type fd_type,
int fd,
@@ -136,7 +136,7 @@ virgl_resource_create_from_fd(uint32_t res_id,
res = virgl_resource_create(res_id);
if (!res)
- return ENOMEM;
+ return NULL;
res->fd_type = fd_type;
/* take ownership */
@@ -145,10 +145,10 @@ virgl_resource_create_from_fd(uint32_t res_id,
res->iov = iov;
res->iov_count = iov_count;
- return 0;
+ return res;
}
-int
+struct virgl_resource *
virgl_resource_create_from_iov(uint32_t res_id,
const struct iovec *iov,
int iov_count)
@@ -160,12 +160,12 @@ virgl_resource_create_from_iov(uint32_t res_id,
res = virgl_resource_create(res_id);
if (!res)
- return ENOMEM;
+ return NULL;
res->iov = iov;
res->iov_count = iov_count;
- return 0;
+ return res;
}
void
diff --git a/src/virgl_resource.h b/src/virgl_resource.h
index 15efa8fa..42983cdc 100644
--- a/src/virgl_resource.h
+++ b/src/virgl_resource.h
@@ -44,6 +44,16 @@ enum virgl_resource_fd_type {
* and imported into a vrend_decode_ctx to create a vrend_resource.
*
* It is also possible to create a virgl_resource from a context object.
+ *
+ * The underlying storage of a virgl_resource is provided by a pipe_resource
+ * and/or a fd. When it is provided by a pipe_resource, the virgl_resource is
+ * said to be typed because pipe_resource also provides the type information.
+ *
+ * Conventional resources are always typed. Blob resources by definition do
+ * not have nor need type information, but those created from vrend_decode_ctx
+ * objects are typed. That should be considered a convenience rather than
+ * something to be relied upon. Contexts must not assume that every resource is
+ * typed when interop is expected.
*/
struct virgl_resource {
uint32_t res_id;
@@ -56,6 +66,8 @@ struct virgl_resource {
const struct iovec *iov;
int iov_count;
+ uint32_t map_info;
+
void *private_data;
};
@@ -84,20 +96,20 @@ virgl_resource_table_cleanup(void);
void
virgl_resource_table_reset(void);
-int
+struct virgl_resource *
virgl_resource_create_from_pipe(uint32_t res_id,
struct pipe_resource *pres,
const struct iovec *iov,
int iov_count);
-int
+struct virgl_resource *
virgl_resource_create_from_fd(uint32_t res_id,
enum virgl_resource_fd_type fd_type,
int fd,
const struct iovec *iov,
int iov_count);
-int
+struct virgl_resource *
virgl_resource_create_from_iov(uint32_t res_id,
const struct iovec *iov,
int iov_count);
diff --git a/src/virgl_util.c b/src/virgl_util.c
index 2f673ddc..6dead0a7 100644
--- a/src/virgl_util.c
+++ b/src/virgl_util.c
@@ -114,6 +114,15 @@ void flush_eventfd(int fd)
} while ((len == -1 && errno == EINTR) || len == sizeof(value));
}
+#if ENABLE_TRACING == TRACE_WITH_PERCETTO
+PERCETTO_CATEGORY_DEFINE(VIRGL_PERCETTO_CATEGORIES)
+
+void trace_init(void)
+{
+ PERCETTO_INIT(PERCETTO_CLOCK_DONT_CARE);
+}
+#endif
+
#if ENABLE_TRACING == TRACE_WITH_PERFETTO
void trace_init(void)
{
@@ -126,18 +135,13 @@ void trace_init(void)
vperfetto_min_startTracing(&config);
}
-char *trace_begin(const char* format, ...)
+const char *trace_begin(const char *scope)
{
- char buffer[1024];
- va_list args;
- va_start (args, format);
- vsnprintf (buffer, sizeof(buffer), format, args);
- va_end (args);
- vperfetto_min_beginTrackEvent_VMM(buffer);
- return (void *)1;
+ vperfetto_min_beginTrackEvent_VMM(scope);
+ return scope;
}
-void trace_end(char **dummy)
+void trace_end(const char **dummy)
{
(void)dummy;
vperfetto_min_endTrackEvent_VMM();
@@ -150,33 +154,22 @@ void trace_init(void)
{
}
-char *trace_begin(const char* format, ...)
+const char *trace_begin(const char *scope)
{
for (int i = 0; i < nesting_depth; ++i)
fprintf(stderr, " ");
- fprintf(stderr, "ENTER:");
- char *buffer;
- va_list args;
- va_start (args, format);
- int size = vasprintf(&buffer, format, args);
-
- if (size < 0)
- buffer=strdup("error");
-
- va_end (args);
- fprintf(stderr, "%s\n", buffer);
+ fprintf(stderr, "ENTER:%s\n", scope);
nesting_depth++;
- return buffer;
+ return scope;
}
-void trace_end(char **func_name)
+void trace_end(const char **func_name)
{
--nesting_depth;
for (int i = 0; i < nesting_depth; ++i)
fprintf(stderr, " ");
fprintf(stderr, "LEAVE %s\n", *func_name);
- free(*func_name);
}
#endif
diff --git a/src/virgl_util.h b/src/virgl_util.h
index 861ecd72..951410ec 100644
--- a/src/virgl_util.h
+++ b/src/virgl_util.h
@@ -34,6 +34,7 @@
#define TRACE_WITH_PERFETTO 1
#define TRACE_WITH_STDERR 2
+#define TRACE_WITH_PERCETTO 3
#define BIT(n) (UINT32_C(1) << (n))
@@ -63,22 +64,42 @@ void flush_eventfd(int fd);
#ifdef ENABLE_TRACING
void trace_init(void);
-char *trace_begin(const char* format, ...);
-void trace_end(char **dummy);
#define TRACE_INIT() trace_init()
-#define TRACE_FUNC() \
- char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
- trace_begin("%s", __func__)
+#define TRACE_FUNC() TRACE_SCOPE(__func__)
-#define TRACE_SCOPE(FORMAT, ...) \
- char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
- trace_begin(FORMAT, __VA_ARGS__)
+#if ENABLE_TRACING == TRACE_WITH_PERCETTO
+
+#include <percetto.h>
+
+#define VIRGL_PERCETTO_CATEGORIES(C, G) \
+ C(virgl, "virglrenderer") \
+ C(virgls, "virglrenderer detailed events", "slow")
+
+PERCETTO_CATEGORY_DECLARE(VIRGL_PERCETTO_CATEGORIES)
+
+#define TRACE_SCOPE(SCOPE) TRACE_EVENT(virgl, SCOPE)
+/* Trace high frequency events (tracing may impact performance). */
+#define TRACE_SCOPE_SLOW(SCOPE) TRACE_EVENT(virgls, SCOPE)
+
+#else
+
+const char *trace_begin(const char *scope);
+void trace_end(const char **scope);
+
+#define TRACE_SCOPE(SCOPE) \
+ const char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
+ trace_begin(SCOPE)
+
+#define TRACE_SCOPE_SLOW(SCOPE) TRACE_SCOPE(SCOPE)
+
+#endif /* ENABLE_TRACING == TRACE_WITH_PERCETTO */
#else
#define TRACE_INIT()
#define TRACE_FUNC()
-#define TRACE_SCOPE(FORMAT, ...)
-#endif
+#define TRACE_SCOPE(SCOPE)
+#define TRACE_SCOPE_SLOW(SCOPE)
+#endif /* ENABLE_TRACING */
#endif /* VIRGL_UTIL_H */
diff --git a/src/virglrenderer.c b/src/virglrenderer.c
index 8458b215..0730a1d8 100644
--- a/src/virglrenderer.c
+++ b/src/virglrenderer.c
@@ -65,7 +65,7 @@ static int virgl_renderer_resource_create_internal(struct virgl_renderer_resourc
UNUSED struct iovec *iov, UNUSED uint32_t num_iovs,
void *image)
{
- int ret;
+ struct virgl_resource *res;
struct pipe_resource *pipe_res;
struct vrend_renderer_resource_create_args vrend_args = { 0 };
@@ -88,12 +88,14 @@ static int virgl_renderer_resource_create_internal(struct virgl_renderer_resourc
if (!pipe_res)
return EINVAL;
- ret = virgl_resource_create_from_pipe(args->handle, pipe_res, iov, num_iovs);
- if (ret) {
+ res = virgl_resource_create_from_pipe(args->handle, pipe_res, iov, num_iovs);
+ if (!res) {
vrend_renderer_resource_destroy((struct vrend_resource *)pipe_res);
- return ret;
+ return -ENOMEM;
}
+ res->map_info = vrend_renderer_resource_get_map_info(pipe_res);
+
return 0;
}
@@ -163,24 +165,57 @@ void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
}
}
-int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *name)
+static void per_context_fence_retire(struct virgl_context *ctx,
+ uint64_t queue_id,
+ void *fence_cookie)
+{
+ state.cbs->write_context_fence(state.cookie,
+ ctx->ctx_id,
+ queue_id,
+ fence_cookie);
+}
+
+int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
+ uint32_t ctx_flags,
+ uint32_t nlen,
+ const char *name)
{
+ const enum virgl_renderer_capset capset_id =
+ ctx_flags & VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK;
struct virgl_context *ctx;
int ret;
TRACE_FUNC();
/* user context id must be greater than 0 */
- if (handle == 0)
+ if (ctx_id == 0)
return EINVAL;
- if (virgl_context_lookup(handle))
- return 0;
+ /* unsupported flags */
+ if (ctx_flags & ~VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK)
+ return EINVAL;
+
+ ctx = virgl_context_lookup(ctx_id);
+ if (ctx) {
+ return ctx->capset_id == capset_id ? 0 : EINVAL;
+ }
- ctx = vrend_renderer_context_create(handle, nlen, name);
+ switch (capset_id) {
+ case VIRGL_RENDERER_CAPSET_VIRGL:
+ case VIRGL_RENDERER_CAPSET_VIRGL2:
+ ctx = vrend_renderer_context_create(ctx_id, nlen, name);
+ break;
+ default:
+ return EINVAL;
+ break;
+ }
if (!ctx)
return ENOMEM;
+ ctx->ctx_id = ctx_id;
+ ctx->capset_id = capset_id;
+ ctx->fence_retire = per_context_fence_retire;
+
ret = virgl_context_add(ctx);
if (ret) {
ctx->destroy(ctx);
@@ -190,6 +225,14 @@ int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *na
return 0;
}
+int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *name)
+{
+ return virgl_renderer_context_create_with_flags(handle,
+ VIRGL_RENDERER_CAPSET_VIRGL2,
+ nlen,
+ name);
+}
+
void virgl_renderer_context_destroy(uint32_t handle)
{
TRACE_FUNC();
@@ -315,10 +358,44 @@ void virgl_renderer_resource_detach_iov(int res_handle, struct iovec **iov_p, in
virgl_resource_detach_iov(res);
}
-int virgl_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
+int virgl_renderer_create_fence(int client_fence_id, UNUSED uint32_t ctx_id)
{
TRACE_FUNC();
- return vrend_renderer_create_fence(client_fence_id, ctx_id);
+ const uint32_t fence_id = (uint32_t)client_fence_id;
+ if (state.vrend_initialized)
+ return vrend_renderer_create_ctx0_fence(fence_id);
+ return EINVAL;
+}
+
+int virgl_renderer_context_create_fence(uint32_t ctx_id,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie)
+{
+ struct virgl_context *ctx = virgl_context_lookup(ctx_id);
+ if (!ctx)
+ return -EINVAL;
+
+ assert(state.cbs->version >= 3 && state.cbs->write_context_fence);
+ return ctx->submit_fence(ctx, flags, queue_id, fence_cookie);
+}
+
+void virgl_renderer_context_poll(uint32_t ctx_id)
+{
+ struct virgl_context *ctx = virgl_context_lookup(ctx_id);
+ if (!ctx)
+ return;
+
+ ctx->retire_fences(ctx);
+}
+
+int virgl_renderer_context_get_poll_fd(uint32_t ctx_id)
+{
+ struct virgl_context *ctx = virgl_context_lookup(ctx_id);
+ if (!ctx)
+ return -1;
+
+ return ctx->get_fencing_fd(ctx);
}
void virgl_renderer_force_ctx_0(void)
@@ -400,8 +477,10 @@ void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int nu
}
-static void virgl_write_fence(uint32_t fence_id)
+static void ctx0_fence_retire(void *fence_cookie,
+ UNUSED void *retire_data)
{
+ const uint32_t fence_id = (uint32_t)(uintptr_t)fence_cookie;
state.cbs->write_fence(state.cookie, fence_id);
}
@@ -438,7 +517,7 @@ static int make_current(virgl_renderer_gl_context ctx)
}
static const struct vrend_if_cbs vrend_cbs = {
- virgl_write_fence,
+ ctx0_fence_retire,
create_gl_context,
destroy_gl_context,
make_current,
@@ -622,10 +701,28 @@ static int virgl_renderer_export_query(void *execute_args, uint32_t execute_size
return -EINVAL;
res = virgl_resource_lookup(export_query->in_resource_id);
- if (!res || !res->pipe_resource)
+ if (!res)
return -EINVAL;
- return vrend_renderer_export_query(res->pipe_resource, export_query);
+
+ if (res->pipe_resource) {
+ return vrend_renderer_export_query(res->pipe_resource, export_query);
+ } else if (!export_query->in_export_fds) {
+ /* Untyped resources are expected to be exported with
+ * virgl_renderer_resource_export_blob instead and have no type
+ * information. But when this is called to query (in_export_fds is
+ * false) an untyped resource, we should return sane values.
+ */
+ export_query->out_num_fds = 1;
+ export_query->out_fourcc = 0;
+ export_query->out_fds[0] = -1;
+ export_query->out_strides[0] = 0;
+ export_query->out_offsets[0] = 0;
+ export_query->out_modifier = DRM_FORMAT_MOD_INVALID;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static int virgl_renderer_supported_structures(void *execute_args, uint32_t execute_size)
@@ -668,6 +765,7 @@ int virgl_renderer_execute(void *execute_args, uint32_t execute_size)
int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_create_blob_args *args)
{
TRACE_FUNC();
+ struct virgl_resource *res;
struct virgl_context *ctx;
struct virgl_context_blob blob;
bool has_host_storage;
@@ -707,9 +805,14 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
}
if (!has_host_storage) {
- return virgl_resource_create_from_iov(args->res_handle,
- args->iovecs,
- args->num_iovs);
+ res = virgl_resource_create_from_iov(args->res_handle,
+ args->iovecs,
+ args->num_iovs);
+ if (!res)
+ return -ENOMEM;
+
+ res->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
+ return 0;
}
ctx = virgl_context_lookup(args->ctx_id);
@@ -721,26 +824,28 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
return ret;
if (blob.type != VIRGL_RESOURCE_FD_INVALID) {
- ret = virgl_resource_create_from_fd(args->res_handle,
+ res = virgl_resource_create_from_fd(args->res_handle,
blob.type,
blob.u.fd,
args->iovecs,
args->num_iovs);
- if (ret) {
+ if (!res) {
close(blob.u.fd);
- return ret;
+ return -ENOMEM;
}
} else {
- ret = virgl_resource_create_from_pipe(args->res_handle,
+ res = virgl_resource_create_from_pipe(args->res_handle,
blob.u.pipe_resource,
args->iovecs,
args->num_iovs);
- if (ret) {
+ if (!res) {
vrend_renderer_resource_destroy((struct vrend_resource *)blob.u.pipe_resource);
- return ret;
+ return -ENOMEM;
}
}
+ res->map_info = blob.map_info;
+
if (ctx->get_blob_done)
ctx->get_blob_done(ctx, args->res_handle, &blob);
@@ -771,10 +876,15 @@ int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint32_t *map_info
{
TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
- if (!res || !res->pipe_resource)
+ if (!res)
+ return -EINVAL;
+
+ if ((res->map_info & VIRGL_RENDERER_MAP_CACHE_MASK) ==
+ VIRGL_RENDERER_MAP_CACHE_NONE)
return -EINVAL;
- return vrend_renderer_resource_get_map_info(res->pipe_resource, map_info);
+ *map_info = res->map_info;
+ return 0;
}
int
@@ -802,5 +912,5 @@ int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd)
{
TRACE_FUNC();
- return vrend_renderer_export_fence(client_fence_id, fd);
+ return vrend_renderer_export_ctx0_fence(client_fence_id, fd);
}
diff --git a/src/virglrenderer.h b/src/virglrenderer.h
index e7592a80..d56b5dcc 100644
--- a/src/virglrenderer.h
+++ b/src/virglrenderer.h
@@ -45,7 +45,11 @@ struct virgl_renderer_gl_ctx_param {
int minor_ver;
};
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+#define VIRGL_RENDERER_CALLBACKS_VERSION 3
+#else
#define VIRGL_RENDERER_CALLBACKS_VERSION 2
+#endif
struct virgl_renderer_callbacks {
int version;
@@ -57,6 +61,10 @@ struct virgl_renderer_callbacks {
int (*make_current)(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx);
int (*get_drm_fd)(void *cookie); /* v2, used with flags & VIRGL_RENDERER_USE_EGL */
+
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+ void (*write_context_fence)(void *cookie, uint32_t ctx_id, uint64_t queue_id, void *fence_cookie);
+#endif
};
/* virtio-gpu compatible interface */
@@ -256,6 +264,13 @@ VIRGL_EXPORT int virgl_renderer_execute(void *execute_args, uint32_t execute_siz
*/
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+#define VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK 0xff
+
+VIRGL_EXPORT int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
+ uint32_t ctx_flags,
+ uint32_t nlen,
+ const char *name);
+
#define VIRGL_RENDERER_BLOB_MEM_GUEST 0x0001
#define VIRGL_RENDERER_BLOB_MEM_HOST3D 0x0002
#define VIRGL_RENDERER_BLOB_MEM_HOST3D_GUEST 0x0003
@@ -300,6 +315,14 @@ virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
VIRGL_EXPORT int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd);
+#define VIRGL_RENDERER_FENCE_FLAG_MERGEABLE (1 << 0)
+VIRGL_EXPORT int virgl_renderer_context_create_fence(uint32_t ctx_id,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie);
+VIRGL_EXPORT void virgl_renderer_context_poll(uint32_t ctx_id); /* force fences */
+VIRGL_EXPORT int virgl_renderer_context_get_poll_fd(uint32_t ctx_id);
+
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
#endif
diff --git a/src/virglrenderer_hw.h b/src/virglrenderer_hw.h
index 65f98cb2..c2105eb6 100644
--- a/src/virglrenderer_hw.h
+++ b/src/virglrenderer_hw.h
@@ -26,11 +26,9 @@
#include "virgl_hw.h"
-#ifdef VIRGL_RENDERER_UNSTABLE_APIS
enum virgl_renderer_capset {
VIRGL_RENDERER_CAPSET_VIRGL = 1,
VIRGL_RENDERER_CAPSET_VIRGL2 = 2,
};
-#endif
#endif /* VIRGLRENDERER_HW_H */
diff --git a/src/vrend_debug.c b/src/vrend_debug.c
index 93398304..e48bface 100644
--- a/src/vrend_debug.c
+++ b/src/vrend_debug.c
@@ -78,6 +78,7 @@ static const char *command_names[VIRGL_MAX_COMMANDS] = {
"TWEAK",
"CLEAR_TEXTURE"
"PIPE_RESOURCE_CREATE",
+ "PIPE_RESOURCE_SET_TYPE",
};
static const char *object_type_names[VIRGL_MAX_OBJECTS] = {
diff --git a/src/vrend_debug.h b/src/vrend_debug.h
index 91038318..f4efefac 100644
--- a/src/vrend_debug.h
+++ b/src/vrend_debug.h
@@ -97,7 +97,7 @@ virgl_debug_callback_type vrend_set_debug_callback(virgl_debug_callback_type cb)
} while (0)
#else
-#define VREND_DEBUG(flag, ctx, fmt, ...)
+#define VREND_DEBUG(flag, ctx, ...)
#define VREND_DEBUG_EXT(flag, ctx, X)
#define VREND_DEBUG_NOCTX(flag, ctx, ...)
#endif
diff --git a/src/vrend_decode.c b/src/vrend_decode.c
index f60e89ba..bf162bd6 100644
--- a/src/vrend_decode.c
+++ b/src/vrend_decode.c
@@ -283,19 +283,18 @@ static int vrend_decode_set_index_buffer(struct vrend_context *ctx, const uint32
static int vrend_decode_set_constant_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t shader;
- uint32_t index;
int nc = (length - 2);
if (length < 2)
return EINVAL;
shader = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE);
- index = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_INDEX);
+ /* VIRGL_SET_CONSTANT_BUFFER_INDEX is not used */
if (shader >= PIPE_SHADER_TYPES)
return EINVAL;
- vrend_set_constants(ctx, shader, index, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
+ vrend_set_constants(ctx, shader, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
return 0;
}
@@ -757,7 +756,7 @@ static int vrend_decode_create_object(struct vrend_context *ctx, const uint32_t
VREND_DEBUG(dbg_object, ctx," CREATE %-18s handle:0x%x len:%d\n",
vrend_get_object_type_name(obj_type), handle, length);
- TRACE_SCOPE("CREATE %-18s", vrend_get_object_type_name(obj_type));
+ TRACE_SCOPE(vrend_get_object_type_name(obj_type));
switch (obj_type){
case VIRGL_OBJECT_BLEND:
@@ -1432,9 +1431,44 @@ static int vrend_decode_pipe_resource_create(struct vrend_context *ctx, const ui
return vrend_renderer_pipe_resource_create(ctx, blob_id, &args);
}
+static int vrend_decode_pipe_resource_set_type(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
+{
+ struct vrend_renderer_resource_set_type_args args = { 0 };
+ uint32_t res_id;
+
+ if (length >= VIRGL_PIPE_RES_SET_TYPE_SIZE(0))
+ args.plane_count = (length - VIRGL_PIPE_RES_SET_TYPE_SIZE(0)) / 2;
+
+ if (length != VIRGL_PIPE_RES_SET_TYPE_SIZE(args.plane_count) ||
+ !args.plane_count || args.plane_count > VIRGL_GBM_MAX_PLANES)
+ return EINVAL;
+
+ res_id = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE);
+ args.format = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_FORMAT);
+ args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_BIND);
+ args.width = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_WIDTH);
+ args.height = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_HEIGHT);
+ args.usage = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_USAGE);
+ args.modifier = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO);
+ args.modifier |= (uint64_t)get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI) << 32;
+ for (uint32_t i = 0; i < args.plane_count; i++) {
+ args.plane_strides[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i));
+ args.plane_offsets[i] = get_buf_entry(buf, VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i));
+ }
+
+ return vrend_renderer_pipe_resource_set_type(ctx, res_id, &args);
+}
+
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
uint32_t ctx_id);
+static void vrend_decode_ctx_fence_retire(void *fence_cookie,
+ void *retire_data)
+{
+ struct vrend_decode_ctx *dctx = retire_data;
+ dctx->base.fence_retire(&dctx->base, 0, fence_cookie);
+}
+
struct virgl_context *vrend_renderer_context_create(uint32_t handle,
uint32_t nlen,
const char *debug_name)
@@ -1453,6 +1487,10 @@ struct virgl_context *vrend_renderer_context_create(uint32_t handle,
return NULL;
}
+ vrend_renderer_set_fence_retire(dctx->grctx,
+ vrend_decode_ctx_fence_retire,
+ dctx);
+
return &dctx->base;
}
@@ -1470,12 +1508,7 @@ static void vrend_decode_ctx_attach_resource(struct virgl_context *ctx,
{
TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
- /* in the future, we should import to create the pipe resource */
- if (!res->pipe_resource)
- return;
-
- vrend_renderer_attach_res_ctx(dctx->grctx, res->res_id,
- res->pipe_resource);
+ vrend_renderer_attach_res_ctx(dctx->grctx, res);
}
static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
@@ -1483,7 +1516,7 @@ static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
{
TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
- vrend_renderer_detach_res_ctx(dctx->grctx, res->res_id);
+ vrend_renderer_detach_res_ctx(dctx->grctx, res);
}
static int vrend_decode_ctx_transfer_3d(struct virgl_context *ctx,
@@ -1508,8 +1541,24 @@ static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
blob->type = VIRGL_RESOURCE_FD_INVALID;
/* this transfers ownership and blob_id is no longer valid */
blob->u.pipe_resource = vrend_get_blob_pipe(dctx->grctx, blob_id);
+ if (!blob->u.pipe_resource)
+ return -EINVAL;
- return blob->u.pipe_resource ? 0 : EINVAL;
+ blob->map_info = vrend_renderer_resource_get_map_info(blob->u.pipe_resource);
+ return 0;
+}
+
+static int vrend_decode_get_memory_info(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
+{
+ TRACE_FUNC();
+ if (length != 1)
+ return EINVAL;
+
+ uint32_t res_handle = get_buf_entry(buf, 1);
+
+ vrend_renderer_get_meminfo(ctx, res_handle);
+
+ return 0;
}
typedef int (*vrend_decode_callback)(struct vrend_context *ctx, const uint32_t *buf, uint32_t length);
@@ -1522,7 +1571,7 @@ static int vrend_decode_dummy(struct vrend_context *ctx, const uint32_t *buf, ui
return 0;
}
-vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
+static const vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
[VIRGL_CCMD_NOP] = vrend_decode_dummy,
[VIRGL_CCMD_CREATE_OBJECT] = vrend_decode_create_object,
[VIRGL_CCMD_BIND_OBJECT] = vrend_decode_bind_object,
@@ -1571,7 +1620,9 @@ vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
[VIRGL_CCMD_COPY_TRANSFER3D] = vrend_decode_copy_transfer3d,
[VIRGL_CCMD_END_TRANSFERS] = vrend_decode_dummy,
[VIRGL_CCMD_SET_TWEAKS] = vrend_decode_set_tweaks,
- [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create
+ [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create,
+ [VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE] = vrend_decode_pipe_resource_set_type,
+ [VIRGL_CCMD_GET_MEMORY_INFO] = vrend_decode_get_memory_info,
};
static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
@@ -1615,19 +1666,41 @@ static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
VREND_DEBUG(dbg_cmd, gdctx->grctx, "%-4d %-20s len:%d\n",
cur_offset, vrend_get_comand_name(cmd), len);
- TRACE_SCOPE("%s", vrend_get_comand_name(cmd));
+ TRACE_SCOPE_SLOW(vrend_get_comand_name(cmd));
ret = decode_table[cmd](gdctx->grctx, buf, len);
if (ret) {
- if (ret == EINVAL) {
+ if (ret == EINVAL)
vrend_report_buffer_error(gdctx->grctx, *buf);
- return ret;
- }
+ return ret;
}
}
return 0;
}
+static int vrend_decode_ctx_get_fencing_fd(UNUSED struct virgl_context *ctx)
+{
+ return vrend_renderer_get_poll_fd();
+}
+
+static void vrend_decode_ctx_retire_fences(UNUSED struct virgl_context *ctx)
+{
+ vrend_renderer_check_fences();
+}
+
+static int vrend_decode_ctx_submit_fence(struct virgl_context *ctx,
+ uint32_t flags,
+ uint64_t queue_id,
+ void *fence_cookie)
+{
+ struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
+
+ if (queue_id)
+ return -EINVAL;
+
+ return vrend_renderer_create_fence(dctx->grctx, flags, fence_cookie);
+}
+
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
uint32_t ctx_id)
{
@@ -1644,4 +1717,8 @@ static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
ctx->get_blob = vrend_decode_ctx_get_blob;
ctx->get_blob_done = NULL;
ctx->submit_cmd = vrend_decode_ctx_submit_cmd;
+
+ ctx->get_fencing_fd = vrend_decode_ctx_get_fencing_fd;
+ ctx->retire_fences = vrend_decode_ctx_retire_fences;
+ ctx->submit_fence = vrend_decode_ctx_submit_fence;
}
diff --git a/src/vrend_formats.c b/src/vrend_formats.c
index af35dc95..8b269440 100644
--- a/src/vrend_formats.c
+++ b/src/vrend_formats.c
@@ -246,6 +246,36 @@ static struct vrend_format_table etc2_formats[] = {
{VIRGL_FORMAT_ETC2_RG11_UNORM, GL_COMPRESSED_RG11_EAC, GL_RG, GL_UNSIGNED_BYTE, NO_SWIZZLE},
{VIRGL_FORMAT_ETC2_RG11_SNORM, GL_COMPRESSED_SIGNED_RG11_EAC, GL_RG, GL_BYTE, NO_SWIZZLE},
};
+static struct vrend_format_table astc_formats[] = {
+ {VIRGL_FORMAT_ASTC_4x4, GL_COMPRESSED_RGBA_ASTC_4x4, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x4, GL_COMPRESSED_RGBA_ASTC_5x4, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x5, GL_COMPRESSED_RGBA_ASTC_5x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x5, GL_COMPRESSED_RGBA_ASTC_6x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x6, GL_COMPRESSED_RGBA_ASTC_6x6, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x5, GL_COMPRESSED_RGBA_ASTC_8x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x6, GL_COMPRESSED_RGBA_ASTC_8x6, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x8, GL_COMPRESSED_RGBA_ASTC_8x8, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x5, GL_COMPRESSED_RGBA_ASTC_10x5, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x6, GL_COMPRESSED_RGBA_ASTC_10x6, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x8, GL_COMPRESSED_RGBA_ASTC_10x8, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x10, GL_COMPRESSED_RGBA_ASTC_10x10, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x10, GL_COMPRESSED_RGBA_ASTC_12x10, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x12, GL_COMPRESSED_RGBA_ASTC_12x12, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_4x4_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x4_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_5x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_6x6_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x6_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_8x8_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x5_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x6_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x8_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_10x10_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x10_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+ {VIRGL_FORMAT_ASTC_12x12_SRGB, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12, GL_RGBA, GL_BYTE, NO_SWIZZLE },
+};
static struct vrend_format_table rgtc_formats[] = {
{ VIRGL_FORMAT_RGTC1_UNORM, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, NO_SWIZZLE },
@@ -433,6 +463,10 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
continue;
+ case UTIL_FORMAT_LAYOUT_ASTC:
+ if(epoxy_has_gl_extension("GL_KHR_texture_compression_astc_ldr"))
+ vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
+ continue;
default:
;/* do logic below */
}
@@ -552,6 +586,7 @@ void vrend_build_format_list_common(void)
/* compressed */
add_formats(etc2_formats);
+ add_formats(astc_formats);
add_formats(rgtc_formats);
add_formats(dxtn_formats);
add_formats(dxtn_srgb_formats);
@@ -712,6 +747,7 @@ unsigned vrend_renderer_query_multisample_caps(unsigned max_samples, struct virg
static int format_uncompressed_compressed_copy_compatible(enum virgl_formats src,
enum virgl_formats dst)
{
+
switch (src) {
case VIRGL_FORMAT_R32G32B32A32_UINT:
case VIRGL_FORMAT_R32G32B32A32_SINT:
@@ -734,6 +770,34 @@ static int format_uncompressed_compressed_copy_compatible(enum virgl_formats src
case VIRGL_FORMAT_ETC2_RG11_UNORM:
case VIRGL_FORMAT_ETC2_RG11_SNORM:
return 1;
+ case VIRGL_FORMAT_ASTC_4x4:
+ case VIRGL_FORMAT_ASTC_5x4:
+ case VIRGL_FORMAT_ASTC_5x5:
+ case VIRGL_FORMAT_ASTC_6x5:
+ case VIRGL_FORMAT_ASTC_6x6:
+ case VIRGL_FORMAT_ASTC_8x5:
+ case VIRGL_FORMAT_ASTC_8x6:
+ case VIRGL_FORMAT_ASTC_8x8:
+ case VIRGL_FORMAT_ASTC_10x5:
+ case VIRGL_FORMAT_ASTC_10x6:
+ case VIRGL_FORMAT_ASTC_10x8:
+ case VIRGL_FORMAT_ASTC_10x10:
+ case VIRGL_FORMAT_ASTC_12x10:
+ case VIRGL_FORMAT_ASTC_12x12:
+ case VIRGL_FORMAT_ASTC_4x4_SRGB:
+ case VIRGL_FORMAT_ASTC_5x5_SRGB:
+ case VIRGL_FORMAT_ASTC_6x5_SRGB:
+ case VIRGL_FORMAT_ASTC_6x6_SRGB:
+ case VIRGL_FORMAT_ASTC_8x5_SRGB:
+ case VIRGL_FORMAT_ASTC_8x6_SRGB:
+ case VIRGL_FORMAT_ASTC_8x8_SRGB:
+ case VIRGL_FORMAT_ASTC_10x5_SRGB:
+ case VIRGL_FORMAT_ASTC_10x6_SRGB:
+ case VIRGL_FORMAT_ASTC_10x8_SRGB:
+ case VIRGL_FORMAT_ASTC_10x10_SRGB:
+ case VIRGL_FORMAT_ASTC_12x10_SRGB:
+ case VIRGL_FORMAT_ASTC_12x12_SRGB:
+ return epoxy_is_desktop_gl() ? -1 : 1;
default:
return -1;
}
@@ -771,6 +835,26 @@ static int format_uncompressed_compressed_copy_compatible(enum virgl_formats src
static boolean format_compressed_compressed_copy_compatible(enum virgl_formats src, enum virgl_formats dst)
{
+ const bool is_desktop_gl = epoxy_is_desktop_gl();
+
+ if(!is_desktop_gl) {
+ if((src == VIRGL_FORMAT_ASTC_4x4 && dst == VIRGL_FORMAT_ASTC_4x4_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_5x4 && dst == VIRGL_FORMAT_ASTC_5x4_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_5x5 && dst == VIRGL_FORMAT_ASTC_5x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_6x5 && dst == VIRGL_FORMAT_ASTC_6x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_6x6 && dst == VIRGL_FORMAT_ASTC_6x6_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_8x5 && dst == VIRGL_FORMAT_ASTC_8x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_8x6 && dst == VIRGL_FORMAT_ASTC_8x6_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_8x8 && dst == VIRGL_FORMAT_ASTC_8x8_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x5 && dst == VIRGL_FORMAT_ASTC_10x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x6 && dst == VIRGL_FORMAT_ASTC_10x5_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x8 && dst == VIRGL_FORMAT_ASTC_10x8_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_10x10 && dst == VIRGL_FORMAT_ASTC_10x10_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_12x10 && dst == VIRGL_FORMAT_ASTC_12x10_SRGB) ||
+ (src == VIRGL_FORMAT_ASTC_12x12 && dst == VIRGL_FORMAT_ASTC_12x12_SRGB))
+ return true;
+ }
+
if ((src == VIRGL_FORMAT_RGTC1_UNORM && dst == VIRGL_FORMAT_RGTC1_SNORM) ||
(src == VIRGL_FORMAT_RGTC2_UNORM && dst == VIRGL_FORMAT_RGTC2_SNORM) ||
(src == VIRGL_FORMAT_BPTC_RGBA_UNORM && dst == VIRGL_FORMAT_BPTC_SRGBA) ||
diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
index 93b8ca2a..15474d23 100644
--- a/src/vrend_renderer.c
+++ b/src/vrend_renderer.c
@@ -55,9 +55,14 @@
#include "virgl_resource.h"
#include "virglrenderer.h"
#include "virglrenderer_hw.h"
+#include "virgl_protocol.h"
#include "tgsi/tgsi_text.h"
+#ifdef HAVE_EPOXY_GLX_H
+#include <epoxy/glx.h>
+#endif
+
/*
* VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both
* virgl_caps_v1 and are exactly the same.
@@ -76,8 +81,14 @@ static const uint32_t fake_occlusion_query_samples_passed_default = 1024;
const struct vrend_if_cbs *vrend_clicbs;
struct vrend_fence {
- uint32_t fence_id;
- uint32_t ctx_id;
+ /* When the sync thread is waiting on the fence and the main thread
+ * destroys the context, ctx is set to NULL. Otherwise, ctx is always
+ * valid.
+ */
+ struct vrend_context *ctx;
+ uint32_t flags;
+ void *fence_cookie;
+
union {
GLsync glsyncobj;
#ifdef HAVE_EPOXY_EGL_H
@@ -110,6 +121,7 @@ enum features_id
feat_arb_robustness,
feat_arb_buffer_storage,
feat_arrays_of_arrays,
+ feat_ati_meminfo,
feat_atomic_counters,
feat_base_instance,
feat_barrier,
@@ -154,6 +166,7 @@ enum features_id
feat_multi_draw_indirect,
feat_nv_conditional_render,
feat_nv_prim_restart,
+ feat_nvx_gpu_memory_info,
feat_polygon_offset_clamp,
feat_occlusion_query,
feat_occlusion_query_boolean,
@@ -162,6 +175,7 @@ enum features_id
feat_sample_mask,
feat_sample_shading,
feat_samplers,
+ feat_sampler_border_colors,
feat_shader_clock,
feat_separate_shader_objects,
feat_ssbo,
@@ -206,6 +220,7 @@ static const struct {
FEAT(arb_robustness, UNAVAIL, UNAVAIL, "GL_ARB_robustness" ),
FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage", "GL_EXT_buffer_storage"),
FEAT(arrays_of_arrays, 43, 31, "GL_ARB_arrays_of_arrays"),
+ FEAT(ati_meminfo, UNAVAIL, UNAVAIL, "GL_ATI_meminfo" ),
FEAT(atomic_counters, 42, 31, "GL_ARB_shader_atomic_counters" ),
FEAT(base_instance, 42, UNAVAIL, "GL_ARB_base_instance", "GL_EXT_base_instance" ),
FEAT(barrier, 42, 31, NULL),
@@ -250,6 +265,7 @@ static const struct {
FEAT(multi_draw_indirect, 43, UNAVAIL, "GL_ARB_multi_draw_indirect", "GL_EXT_multi_draw_indirect" ),
FEAT(nv_conditional_render, UNAVAIL, UNAVAIL, "GL_NV_conditional_render" ),
FEAT(nv_prim_restart, UNAVAIL, UNAVAIL, "GL_NV_primitive_restart" ),
+ FEAT(nvx_gpu_memory_info, UNAVAIL, UNAVAIL, "GL_NVX_gpu_memory_info" ),
FEAT(polygon_offset_clamp, 46, UNAVAIL, "GL_ARB_polygon_offset_clamp", "GL_EXT_polygon_offset_clamp"),
FEAT(occlusion_query, 15, UNAVAIL, "GL_ARB_occlusion_query"),
FEAT(occlusion_query_boolean, 33, 30, "GL_EXT_occlusion_query_boolean", "GL_ARB_occlusion_query2"),
@@ -258,6 +274,7 @@ static const struct {
FEAT(sample_mask, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(sample_shading, 40, 32, "GL_ARB_sample_shading", "GL_OES_sample_shading" ),
FEAT(samplers, 33, 30, "GL_ARB_sampler_objects" ),
+ FEAT(sampler_border_colors, 33, 32, "GL_ARB_sampler_objects", "GL_EXT_texture_border_clamp", "GL_OES_texture_border_clamp" ),
FEAT(separate_shader_objects, 41, 31, "GL_ARB_seperate_shader_objects"),
FEAT(shader_clock, UNAVAIL, UNAVAIL, "GL_ARB_shader_clock" ),
FEAT(ssbo, 43, 31, "GL_ARB_shader_storage_buffer_object" ),
@@ -270,7 +287,7 @@ static const struct {
FEAT(texture_barrier, 45, UNAVAIL, "GL_ARB_texture_barrier" ),
FEAT(texture_buffer_range, 43, 32, "GL_ARB_texture_buffer_range" ),
FEAT(texture_gather, 40, 31, "GL_ARB_texture_gather" ),
- FEAT(texture_multisample, 32, 30, "GL_ARB_texture_multisample" ),
+ FEAT(texture_multisample, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(texture_query_lod, 40, UNAVAIL, "GL_ARB_texture_query_lod", "GL_EXT_texture_query_lod"),
FEAT(texture_srgb_decode, UNAVAIL, UNAVAIL, "GL_EXT_texture_sRGB_decode" ),
FEAT(texture_storage, 42, 30, "GL_ARB_texture_storage" ),
@@ -316,8 +333,12 @@ struct global_renderer_state {
int eventfd;
pipe_mutex fence_mutex;
+ /* a fence is always on either of the lists, or is pointed to by
+ * fence_waiting
+ */
struct list_head fence_list;
struct list_head fence_wait_list;
+ struct vrend_fence *fence_waiting;
pipe_condvar fence_cond;
struct vrend_context *ctx0;
@@ -355,6 +376,7 @@ struct vrend_linked_shader_program {
bool dual_src_linked;
struct vrend_shader *ss[PIPE_SHADER_TYPES];
+ uint64_t vs_fs_key;
uint32_t ubo_used_mask[PIPE_SHADER_TYPES];
uint32_t samplers_used_mask[PIPE_SHADER_TYPES];
@@ -415,10 +437,7 @@ struct vrend_shader_selector {
struct vrend_texture {
struct vrend_resource base;
struct pipe_sampler_state state;
- GLenum cur_swizzle_r;
- GLenum cur_swizzle_g;
- GLenum cur_swizzle_b;
- GLenum cur_swizzle_a;
+ GLint cur_swizzle[4];
GLuint cur_srgb_decode;
GLuint cur_base, cur_max;
};
@@ -452,10 +471,7 @@ struct vrend_sampler_view {
enum virgl_formats format;
GLenum target;
GLuint val0, val1;
- GLuint gl_swizzle_r;
- GLuint gl_swizzle_g;
- GLuint gl_swizzle_b;
- GLuint gl_swizzle_a;
+ GLint gl_swizzle[4];
GLenum depth_texture_mode;
GLuint srgb_decode;
struct vrend_resource *texture;
@@ -540,6 +556,14 @@ struct vrend_streamout_object {
#define XFB_STATE_STARTED 2
#define XFB_STATE_PAUSED 3
+struct vrend_vertex_buffer {
+ struct pipe_vertex_buffer base;
+ uint32_t res_id;
+};
+
+#define VREND_PROGRAM_NQUEUES (1 << 8)
+#define VREND_PROGRAM_NQUEUE_MASK (VREND_PROGRAM_NQUEUES - 1)
+
struct vrend_sub_context {
struct list_head head;
@@ -550,14 +574,18 @@ struct vrend_sub_context {
GLuint vaoid;
uint32_t enabled_attribs_bitmask;
- struct list_head programs;
+ /* Using an array of lists only adds VREND_PROGRAM_NQUEUES - 1 list_head
+ * structures to the consumed memory, but looking up the program can
+ * be spead up by the factor VREND_PROGRAM_NQUEUES which makes this
+ * worthwile. */
+ struct list_head gl_programs[VREND_PROGRAM_NQUEUES];
+ struct list_head cs_programs;
struct util_hash_table *object_hash;
struct vrend_vertex_element_array *ve;
int num_vbos;
int old_num_vbos; /* for cleaning up */
- struct pipe_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
- uint32_t vbo_res_ids[PIPE_MAX_ATTRIBS];
+ struct vrend_vertex_buffer vbo[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer ib;
uint32_t index_buffer_res_id;
@@ -573,7 +601,7 @@ struct vrend_sub_context {
struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES];
struct vrend_linked_shader_program *prog;
- int prog_ids[PIPE_SHADER_TYPES];
+ GLuint prog_ids[PIPE_SHADER_TYPES];
struct vrend_shader_view views[PIPE_SHADER_TYPES];
struct vrend_constants consts[PIPE_SHADER_TYPES];
@@ -655,6 +683,13 @@ struct vrend_sub_context {
int fake_occlusion_query_samples_passed_multiplier;
int prim_mode;
+ bool drawing;
+ struct vrend_context *parent;
+};
+
+struct vrend_untyped_resource {
+ struct virgl_resource *resource;
+ struct list_head head;
};
struct vrend_context {
@@ -671,7 +706,6 @@ struct vrend_context {
bool in_error;
bool ctx_switch_pending;
bool pstip_inited;
- bool drawing;
GLuint pstipple_tex_id;
@@ -680,31 +714,55 @@ struct vrend_context {
/* resource bounds to this context */
struct util_hash_table *res_hash;
+ /*
+ * vrend_context only works with typed virgl_resources. More specifically,
+ * it works with vrend_resources that are inherited from pipe_resources
+ * wrapped in virgl_resources.
+ *
+ * Normally, a vrend_resource is created first by
+ * vrend_renderer_resource_create. It is then wrapped in a virgl_resource
+ * by virgl_resource_create_from_pipe. Depending on whether it is a blob
+ * resource or not, the two functions can be called from different paths.
+ * But we always get both a virgl_resource and a vrend_resource as a
+ * result.
+ *
+ * It is however possible that we encounter untyped virgl_resources that
+ * have no pipe_resources. To work with untyped virgl_resources, we park
+ * them in untyped_resources first when they are attached. We move them
+ * into res_hash only after we get the type information and create the
+ * vrend_resources in vrend_decode_pipe_resource_set_type.
+ */
+ struct list_head untyped_resources;
+ struct virgl_resource *untyped_resource_cache;
+
struct list_head active_nontimer_query_list;
struct vrend_shader_cfg shader_cfg;
unsigned debug_flags;
+
+ vrend_context_fence_retire fence_retire;
+ void *fence_retire_data;
};
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle);
static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause);
-static void vrend_update_viewport_state(struct vrend_context *ctx);
-static void vrend_update_scissor_state(struct vrend_context *ctx);
+static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx);
+static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx);
static void vrend_destroy_query_object(void *obj_ptr);
static void vrend_finish_context_switch(struct vrend_context *ctx);
-static void vrend_patch_blend_state(struct vrend_context *ctx);
-static void vrend_update_frontface_state(struct vrend_context *ctx);
+static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx);
+static void vrend_update_frontface_state(struct vrend_sub_context *ctx);
static void vrender_get_glsl_version(int *glsl_version);
static void vrend_destroy_program(struct vrend_linked_shader_program *ent);
-static void vrend_apply_sampler_state(struct vrend_context *ctx,
+static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id, int sampler_id,
struct vrend_sampler_view *tview);
static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples);
-void vrend_update_stencil_state(struct vrend_context *ctx);
+void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx);
static struct vrend_format_table tex_conv_table[VIRGL_FORMAT_MAX_EXTENDED];
@@ -1053,7 +1111,7 @@ static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
free(sel);
}
-static bool vrend_compile_shader(struct vrend_context *ctx,
+static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx,
struct vrend_shader *shader)
{
GLint param;
@@ -1068,7 +1126,7 @@ static bool vrend_compile_shader(struct vrend_context *ctx,
char infolog[65536];
int len;
glGetShaderInfoLog(shader->id, 65536, &len, infolog);
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_printf("shader failed to compile\n%s\n", infolog);
vrend_shader_dump(shader);
return false;
@@ -1151,11 +1209,11 @@ static bool vrend_is_timer_query(GLenum gltype)
gltype == GL_TIME_ELAPSED;
}
-static void vrend_use_program(struct vrend_context *ctx, GLuint program_id)
+static void vrend_use_program(struct vrend_sub_context *sub_ctx, GLuint program_id)
{
- if (ctx->sub->program_id != program_id) {
+ if (sub_ctx->program_id != program_id) {
glUseProgram(program_id);
- ctx->sub->program_id = program_id;
+ sub_ctx->program_id = program_id;
}
}
@@ -1198,10 +1256,10 @@ static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_e
}
}
-static void vrend_stencil_test_enable(struct vrend_context *ctx, bool stencil_test_enable)
+static void vrend_stencil_test_enable(struct vrend_sub_context *sub_ctx, bool stencil_test_enable)
{
- if (ctx->sub->stencil_test_enabled != stencil_test_enable) {
- ctx->sub->stencil_test_enabled = stencil_test_enable;
+ if (sub_ctx->stencil_test_enabled != stencil_test_enable) {
+ sub_ctx->stencil_test_enabled = stencil_test_enable;
if (stencil_test_enable)
glEnable(GL_STENCIL_TEST);
else
@@ -1257,7 +1315,7 @@ static char *get_skip_str(int *skip_val)
return start_skip;
}
-static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_context *ctx,
+static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_sub_context *sub_ctx,
int prog_id,
struct vrend_shader_info *sinfo)
{
@@ -1272,7 +1330,7 @@ static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_context *ctx,
if (!so->num_outputs)
return;
- VREND_DEBUG_EXT(dbg_shader_streamout, ctx, dump_stream_out(so));
+ VREND_DEBUG_EXT(dbg_shader_streamout, sub_ctx->parent, dump_stream_out(so));
for (i = 0; i < so->num_outputs; i++) {
if (last_buffer != so->output[i].output_buffer) {
@@ -1496,9 +1554,9 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs);
sprog->id = prog_id;
- list_addtail(&sprog->head, &ctx->sub->programs);
+ list_addtail(&sprog->head, &ctx->sub->cs_programs);
- vrend_use_program(ctx, prog_id);
+ vrend_use_program(ctx->sub, prog_id);
bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE, 0);
bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE, 0);
@@ -1508,7 +1566,7 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
return sprog;
}
-static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
+static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_context *sub_ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs,
@@ -1535,20 +1593,20 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
if (gs) {
if (gs->id > 0)
glAttachShader(prog_id, gs->id);
- set_stream_out_varyings(ctx, prog_id, &gs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &gs->sel->sinfo);
} else if (tes)
- set_stream_out_varyings(ctx, prog_id, &tes->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &tes->sel->sinfo);
else
- set_stream_out_varyings(ctx, prog_id, &vs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, prog_id, &vs->sel->sinfo);
glAttachShader(prog_id, fs->id);
if (fs->sel->sinfo.num_outputs > 1) {
- if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
+ if (util_blend_state_is_dual(&sub_ctx->blend_state, 0)) {
if (has_feature(feat_dual_src_blend)) {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
} else {
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
}
sprog->dual_src_linked = true;
} else {
@@ -1579,7 +1637,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
glGetProgramInfoLog(prog_id, 65536, &len, infolog);
vrend_printf("got error linking\n%s\n", infolog);
/* dump shaders */
- vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_shader_dump(vs);
if (gs)
vrend_shader_dump(gs);
@@ -1591,6 +1649,9 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
+ sprog->vs_fs_key = (((uint64_t)fs->id) << 32) | (vs->id & ~VREND_PROGRAM_NQUEUE_MASK) |
+ (sprog->dual_src_linked ? 1 : 0);
+
sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
sprog->ss[PIPE_SHADER_TESS_CTRL] = tcs;
sprog->ss[PIPE_SHADER_TESS_EVAL] = tes;
@@ -1607,7 +1668,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
sprog->id = prog_id;
- list_addtail(&sprog->head, &ctx->sub->programs);
+ list_addtail(&sprog->head, &sub_ctx->gl_programs[vs->id & VREND_PROGRAM_NQUEUE_MASK]);
if (fs->key.pstipple_tex)
sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
@@ -1619,7 +1680,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
sprog->fs_alpha_ref_val_loc = -1;
sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust_y");
- vrend_use_program(ctx, prog_id);
+ vrend_use_program(sub_ctx, prog_id);
int next_ubo_id = 0, next_sampler_id = 0;
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
@@ -1659,16 +1720,17 @@ static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend
GLuint cs_id)
{
struct vrend_linked_shader_program *ent;
- LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
- if (!ent->ss[PIPE_SHADER_COMPUTE])
- continue;
- if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id)
+ LIST_FOR_EACH_ENTRY(ent, &ctx->sub->cs_programs, head) {
+ if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id) {
+ list_del(&ent->head);
+ list_add(&ent->head, &ctx->sub->cs_programs);
return ent;
+ }
}
return NULL;
}
-static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_context *ctx,
+static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_sub_context *sub_ctx,
GLuint vs_id,
GLuint fs_id,
GLuint gs_id,
@@ -1676,16 +1738,15 @@ static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_co
GLuint tes_id,
bool dual_src)
{
+ uint64_t vs_fs_key = (((uint64_t)fs_id) << 32) | (vs_id & ~VREND_PROGRAM_NQUEUE_MASK) |
+ (dual_src ? 1 : 0);
+
struct vrend_linked_shader_program *ent;
- LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
- if (ent->dual_src_linked != dual_src)
- continue;
- if (ent->ss[PIPE_SHADER_COMPUTE])
+
+ struct list_head *programs = &sub_ctx->gl_programs[vs_id & VREND_PROGRAM_NQUEUE_MASK];
+ LIST_FOR_EACH_ENTRY(ent, programs, head) {
+ if (likely(ent->vs_fs_key != vs_fs_key))
continue;
- if (ent->ss[PIPE_SHADER_VERTEX]->id != vs_id)
- continue;
- if (ent->ss[PIPE_SHADER_FRAGMENT]->id != fs_id)
- continue;
if (ent->ss[PIPE_SHADER_GEOMETRY] &&
ent->ss[PIPE_SHADER_GEOMETRY]->id != gs_id)
continue;
@@ -1695,8 +1756,14 @@ static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_co
if (ent->ss[PIPE_SHADER_TESS_EVAL] &&
ent->ss[PIPE_SHADER_TESS_EVAL]->id != tes_id)
continue;
+ /* put the entry in front */
+ if (programs->next != &ent->head) {
+ list_del(&ent->head);
+ list_add(&ent->head, programs);
+ }
return ent;
}
+
return NULL;
}
@@ -1725,11 +1792,16 @@ static void vrend_free_programs(struct vrend_sub_context *sub)
{
struct vrend_linked_shader_program *ent, *tmp;
- if (LIST_IS_EMPTY(&sub->programs))
- return;
+ if (!LIST_IS_EMPTY(&sub->cs_programs)) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->cs_programs, head)
+ vrend_destroy_program(ent);
+ }
- LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
- vrend_destroy_program(ent);
+ for (unsigned i = 0; i < VREND_PROGRAM_NQUEUES; ++i) {
+ if (!LIST_IS_EMPTY(&sub->gl_programs[i])) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->gl_programs[i], head)
+ vrend_destroy_program(ent);
+ }
}
}
@@ -1944,6 +2016,16 @@ static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_fi
return 0;
}
+static void apply_sampler_border_color(GLuint sampler,
+ const GLuint colors[static 4])
+{
+ if (has_feature(feat_sampler_border_colors)) {
+ glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, colors);
+ } else if (colors[0] || colors[1] || colors[2] || colors[3]) {
+ vrend_printf("sampler border color setting requested but not supported\n");
+ }
+}
+
int vrend_create_sampler_state(struct vrend_context *ctx,
uint32_t handle,
struct pipe_sampler_state *templ)
@@ -1984,7 +2066,7 @@ int vrend_create_sampler_state(struct vrend_context *ctx,
}
- glSamplerParameterIuiv(state->ids[i], GL_TEXTURE_BORDER_COLOR, templ->border_color.ui);
+ apply_sampler_border_color(state->ids[i], templ->border_color.ui);
glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT, i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT);
}
}
@@ -2105,10 +2187,8 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]];
}
- view->gl_swizzle_r = to_gl_swizzle(swizzle[0]);
- view->gl_swizzle_g = to_gl_swizzle(swizzle[1]);
- view->gl_swizzle_b = to_gl_swizzle(swizzle[2]);
- view->gl_swizzle_a = to_gl_swizzle(swizzle[3]);
+ for (unsigned i = 0; i < 4; ++i)
+ view->gl_swizzle[i] = to_gl_swizzle(swizzle[i]);
if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
enum virgl_formats format;
@@ -2172,10 +2252,12 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
glTexParameteri(view->target, GL_TEXTURE_BASE_LEVEL, base_level);
glTexParameteri(view->target, GL_TEXTURE_MAX_LEVEL, max_level);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
- glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]);
+ }
+ } else
+ glTexParameteriv(view->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle);
if (util_format_is_srgb(view->format) &&
has_feature(feat_texture_srgb_decode)) {
glTexParameteri(view->target, GL_TEXTURE_SRGB_DECODE_EXT,
@@ -2330,9 +2412,9 @@ static void vrend_hw_set_zsurf_texture(struct vrend_context *ctx)
}
}
-static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
+static void vrend_hw_set_color_surface(struct vrend_sub_context *sub_ctx, int index)
{
- struct vrend_surface *surf = ctx->sub->surf[index];
+ struct vrend_surface *surf = sub_ctx->surf[index];
if (!surf) {
GLenum attachment = GL_COLOR_ATTACHMENT0 + index;
@@ -2340,15 +2422,15 @@ static void vrend_hw_set_color_surface(struct vrend_context *ctx, int index)
glFramebufferTexture2D(GL_FRAMEBUFFER, attachment,
GL_TEXTURE_2D, 0, 0);
} else {
- uint32_t first_layer = ctx->sub->surf[index]->val1 & 0xffff;
- uint32_t last_layer = (ctx->sub->surf[index]->val1 >> 16) & 0xffff;
+ uint32_t first_layer = sub_ctx->surf[index]->val1 & 0xffff;
+ uint32_t last_layer = (sub_ctx->surf[index]->val1 >> 16) & 0xffff;
vrend_fb_bind_texture_id(surf->texture, surf->id, index, surf->val0,
first_layer != last_layer ? 0xffffffff : first_layer);
}
}
-static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
+static void vrend_hw_emit_framebuffer_state(struct vrend_sub_context *sub_ctx)
{
static const GLenum buffers[8] = {
GL_COLOR_ATTACHMENT0,
@@ -2361,19 +2443,19 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
GL_COLOR_ATTACHMENT7,
};
- if (ctx->sub->nr_cbufs == 0) {
+ if (sub_ctx->nr_cbufs == 0) {
glReadBuffer(GL_NONE);
if (has_feature(feat_srgb_write_control)) {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
- ctx->sub->framebuffer_srgb_enabled = false;
+ sub_ctx->framebuffer_srgb_enabled = false;
}
} else if (has_feature(feat_srgb_write_control)) {
struct vrend_surface *surf = NULL;
bool use_srgb = false;
int i;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- surf = ctx->sub->surf[i];
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ surf = sub_ctx->surf[i];
if (util_format_is_srgb(surf->format)) {
use_srgb = true;
}
@@ -2384,25 +2466,25 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
} else {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
}
- ctx->sub->framebuffer_srgb_enabled = use_srgb;
+ sub_ctx->framebuffer_srgb_enabled = use_srgb;
}
if (vrend_state.use_gles &&
- vrend_get_tweak_is_active(&ctx->sub->tweaks, virgl_tweak_gles_brga_apply_dest_swizzle)) {
- ctx->sub->swizzle_output_rgb_to_bgr = 0;
- for (int i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- struct vrend_surface *surf = ctx->sub->surf[i];
+ vrend_get_tweak_is_active(&sub_ctx->tweaks, virgl_tweak_gles_brga_apply_dest_swizzle)) {
+ sub_ctx->swizzle_output_rgb_to_bgr = 0;
+ for (int i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ struct vrend_surface *surf = sub_ctx->surf[i];
if (surf->texture->base.bind & VIRGL_BIND_PREFER_EMULATED_BGRA) {
- VREND_DEBUG(dbg_tweak, ctx, "Swizzled BGRA output for 0x%x (%s)\n", i, util_format_name(surf->format));
- ctx->sub->swizzle_output_rgb_to_bgr |= 1 << i;
+ VREND_DEBUG(dbg_tweak, sub_ctx->parent, "Swizzled BGRA output for 0x%x (%s)\n", i, util_format_name(surf->format));
+ sub_ctx->swizzle_output_rgb_to_bgr |= 1 << i;
}
}
}
}
- glDrawBuffers(ctx->sub->nr_cbufs, buffers);
+ glDrawBuffers(sub_ctx->nr_cbufs, buffers);
}
void vrend_set_framebuffer_state(struct vrend_context *ctx,
@@ -2416,10 +2498,12 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
GLint new_height = -1;
bool new_ibf = false;
- glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->fb_id);
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
+ glBindFramebuffer(GL_FRAMEBUFFER, sub_ctx->fb_id);
if (zsurf_handle) {
- zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
+ zsurf = vrend_object_lookup(sub_ctx->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
@@ -2427,18 +2511,18 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
} else
zsurf = NULL;
- if (ctx->sub->zsurf != zsurf) {
- vrend_surface_reference(&ctx->sub->zsurf, zsurf);
+ if (sub_ctx->zsurf != zsurf) {
+ vrend_surface_reference(&sub_ctx->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
- old_num = ctx->sub->nr_cbufs;
- ctx->sub->nr_cbufs = nr_cbufs;
- ctx->sub->old_nr_cbufs = old_num;
+ old_num = sub_ctx->nr_cbufs;
+ sub_ctx->nr_cbufs = nr_cbufs;
+ sub_ctx->old_nr_cbufs = old_num;
for (i = 0; i < (int)nr_cbufs; i++) {
if (surf_handle[i] != 0) {
- surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
+ surf = vrend_object_lookup(sub_ctx->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
@@ -2446,32 +2530,32 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
} else
surf = NULL;
- if (ctx->sub->surf[i] != surf) {
- vrend_surface_reference(&ctx->sub->surf[i], surf);
- vrend_hw_set_color_surface(ctx, i);
+ if (sub_ctx->surf[i] != surf) {
+ vrend_surface_reference(&sub_ctx->surf[i], surf);
+ vrend_hw_set_color_surface(sub_ctx, i);
}
}
- if (old_num > ctx->sub->nr_cbufs) {
- for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
- vrend_surface_reference(&ctx->sub->surf[i], NULL);
- vrend_hw_set_color_surface(ctx, i);
+ if (old_num > sub_ctx->nr_cbufs) {
+ for (i = sub_ctx->nr_cbufs; i < old_num; i++) {
+ vrend_surface_reference(&sub_ctx->surf[i], NULL);
+ vrend_hw_set_color_surface(sub_ctx, i);
}
}
/* find a buffer to set fb_height from */
- if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
+ if (sub_ctx->nr_cbufs == 0 && !sub_ctx->zsurf) {
new_height = 0;
new_ibf = false;
- } else if (ctx->sub->nr_cbufs == 0) {
- new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
- new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
+ } else if (sub_ctx->nr_cbufs == 0) {
+ new_height = u_minify(sub_ctx->zsurf->texture->base.height0, sub_ctx->zsurf->val0);
+ new_ibf = sub_ctx->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i]) {
- surf = ctx->sub->surf[i];
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i]) {
+ surf = sub_ctx->surf[i];
break;
}
}
@@ -2484,23 +2568,23 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
}
if (new_height != -1) {
- if (ctx->sub->fb_height != (uint32_t)new_height || ctx->sub->inverted_fbo_content != new_ibf) {
- ctx->sub->fb_height = new_height;
- ctx->sub->inverted_fbo_content = new_ibf;
- ctx->sub->viewport_state_dirty = (1 << 0);
+ if (sub_ctx->fb_height != (uint32_t)new_height || sub_ctx->inverted_fbo_content != new_ibf) {
+ sub_ctx->fb_height = new_height;
+ sub_ctx->inverted_fbo_content = new_ibf;
+ sub_ctx->viewport_state_dirty = (1 << 0);
}
}
- vrend_hw_emit_framebuffer_state(ctx);
+ vrend_hw_emit_framebuffer_state(sub_ctx);
- if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
+ if (sub_ctx->nr_cbufs > 0 || sub_ctx->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
vrend_printf("failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
- ctx->sub->shader_dirty = true;
- ctx->sub->blend_state_dirty = true;
+ sub_ctx->shader_dirty = true;
+ sub_ctx->blend_state_dirty = true;
}
void vrend_set_framebuffer_state_no_attach(UNUSED struct vrend_context *ctx,
@@ -2622,41 +2706,48 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
}
type = GL_FALSE;
- if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
- if (desc->channel[0].size == 32)
- type = GL_FLOAT;
- else if (desc->channel[0].size == 64)
- type = GL_DOUBLE;
- else if (desc->channel[0].size == 16)
- type = GL_HALF_FLOAT;
- } else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 8)
- type = GL_UNSIGNED_BYTE;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 8)
- type = GL_BYTE;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 16)
- type = GL_UNSIGNED_SHORT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 16)
- type = GL_SHORT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
- desc->channel[0].size == 32)
- type = GL_UNSIGNED_INT;
- else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
- desc->channel[0].size == 32)
- type = GL_INT;
- else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
- elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
- elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
- type = GL_INT_2_10_10_10_REV;
- else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
- elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
- elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
- type = GL_UNSIGNED_INT_2_10_10_10_REV;
- else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
- type = GL_UNSIGNED_INT_10F_11F_11F_REV;
+ switch (desc->channel[0].type) {
+ case UTIL_FORMAT_TYPE_FLOAT:
+ switch (desc->channel[0].size) {
+ case 16: type = GL_HALF_FLOAT; break;
+ case 32: type = GL_FLOAT; break;
+ case 64: type = GL_DOUBLE; break;
+ }
+ break;
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ switch (desc->channel[0].size) {
+ case 8: type = GL_UNSIGNED_BYTE; break;
+ case 16: type = GL_UNSIGNED_SHORT; break;
+ case 32: type = GL_UNSIGNED_INT; break;
+ }
+ break;
+ case UTIL_FORMAT_TYPE_SIGNED:
+ switch (desc->channel[0].size) {
+ case 8: type = GL_BYTE; break;
+ case 16: type = GL_SHORT; break;
+ case 32: type = GL_INT; break;
+ }
+ break;
+ }
+ if (type == GL_FALSE) {
+ switch (elements[i].src_format) {
+ case PIPE_FORMAT_R10G10B10A2_SSCALED:
+ case PIPE_FORMAT_R10G10B10A2_SNORM:
+ case PIPE_FORMAT_B10G10R10A2_SNORM:
+ type = GL_INT_2_10_10_10_REV;
+ break;
+ case PIPE_FORMAT_R10G10B10A2_USCALED:
+ case PIPE_FORMAT_R10G10B10A2_UNORM:
+ case PIPE_FORMAT_B10G10R10A2_UNORM:
+ type = GL_UNSIGNED_INT_2_10_10_10_REV;
+ break;
+ case PIPE_FORMAT_R11G11B10_FLOAT:
+ type = GL_UNSIGNED_INT_10F_11F_11F_REV;
+ break;
+ default:
+ ;
+ }
+ }
if (type == GL_FALSE) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
@@ -2725,7 +2816,6 @@ void vrend_bind_vertex_elements_state(struct vrend_context *ctx,
void vrend_set_constants(struct vrend_context *ctx,
uint32_t shader,
- UNUSED uint32_t index,
uint32_t num_constant,
const float *data)
{
@@ -2759,6 +2849,9 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
if (!has_feature(feat_ubo))
return;
+ struct pipe_constant_buffer *cbs = &ctx->sub->cbs[shader][index];
+ const uint32_t mask = 1u << index;
+
if (res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
@@ -2766,18 +2859,17 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
- ctx->sub->cbs[shader][index].buffer = (struct pipe_resource *)res;
- ctx->sub->cbs[shader][index].buffer_offset = offset;
- ctx->sub->cbs[shader][index].buffer_size = length;
-
- ctx->sub->const_bufs_used_mask[shader] |= (1u << index);
+ cbs->buffer = (struct pipe_resource *)res;
+ cbs->buffer_offset = offset;
+ cbs->buffer_size = length;
+ ctx->sub->const_bufs_used_mask[shader] |= mask;
} else {
- ctx->sub->cbs[shader][index].buffer = NULL;
- ctx->sub->cbs[shader][index].buffer_offset = 0;
- ctx->sub->cbs[shader][index].buffer_size = 0;
- ctx->sub->const_bufs_used_mask[shader] &= ~(1u << index);
+ cbs->buffer = NULL;
+ cbs->buffer_offset = 0;
+ cbs->buffer_size = 0;
+ ctx->sub->const_bufs_used_mask[shader] &= ~mask;
}
- ctx->sub->const_bufs_dirty[shader] |= (1u << index);
+ ctx->sub->const_bufs_dirty[shader] |= mask;
}
void vrend_set_index_buffer(struct vrend_context *ctx,
@@ -2814,27 +2906,28 @@ void vrend_set_single_vbo(struct vrend_context *ctx,
uint32_t res_handle)
{
struct vrend_resource *res;
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[index];
- if (ctx->sub->vbo[index].stride != stride ||
- ctx->sub->vbo[index].buffer_offset != buffer_offset ||
- ctx->sub->vbo_res_ids[index] != res_handle)
+ if (vbo->base.stride != stride ||
+ vbo->base.buffer_offset != buffer_offset ||
+ vbo->res_id != res_handle)
ctx->sub->vbo_dirty = true;
- ctx->sub->vbo[index].stride = stride;
- ctx->sub->vbo[index].buffer_offset = buffer_offset;
+ vbo->base.stride = stride;
+ vbo->base.buffer_offset = buffer_offset;
if (res_handle == 0) {
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, NULL);
- ctx->sub->vbo_res_ids[index] = 0;
- } else if (ctx->sub->vbo_res_ids[index] != res_handle) {
+ vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, NULL);
+ vbo->res_id = 0;
+ } else if (vbo->res_id != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
- ctx->sub->vbo_res_ids[index] = 0;
+ vbo->res_id = 0;
return;
}
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, res);
- ctx->sub->vbo_res_ids[index] = res_handle;
+ vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, res);
+ vbo->res_id = res_handle;
}
}
@@ -2851,8 +2944,8 @@ void vrend_set_num_vbo(struct vrend_context *ctx,
ctx->sub->vbo_dirty = true;
for (i = num_vbo; i < old_num; i++) {
- vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].buffer, NULL);
- ctx->sub->vbo_res_ids[i] = 0;
+ vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].base.buffer, NULL);
+ ctx->sub->vbo[i].res_id = 0;
}
}
@@ -2916,22 +3009,18 @@ void vrend_set_single_sampler_view(struct vrend_context *ctx,
glTexParameteri(view->texture->target, GL_TEXTURE_MAX_LEVEL, max_level);
tex->cur_max = max_level;
}
- if (tex->cur_swizzle_r != view->gl_swizzle_r) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R, view->gl_swizzle_r);
- tex->cur_swizzle_r = view->gl_swizzle_r;
- }
- if (tex->cur_swizzle_g != view->gl_swizzle_g) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_G, view->gl_swizzle_g);
- tex->cur_swizzle_g = view->gl_swizzle_g;
- }
- if (tex->cur_swizzle_b != view->gl_swizzle_b) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_B, view->gl_swizzle_b);
- tex->cur_swizzle_b = view->gl_swizzle_b;
- }
- if (tex->cur_swizzle_a != view->gl_swizzle_a) {
- glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_A, view->gl_swizzle_a);
- tex->cur_swizzle_a = view->gl_swizzle_a;
+ if (memcmp(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint))) {
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ if (tex->cur_swizzle[i] != view->gl_swizzle[i]) {
+ glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]);
+ }
+ }
+ } else
+ glTexParameteriv(view->texture->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle);
+ memcpy(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint));
}
+
if (tex->cur_srgb_decode != view->srgb_decode && util_format_is_srgb(tex->base.base.format)) {
if (has_feature(feat_samplers))
ctx->sub->sampler_views_dirty[shader_type] |= (1u << index);
@@ -3146,7 +3235,7 @@ static inline bool can_emulate_logicop(enum pipe_logicop op)
}
-static inline void vrend_fill_shader_key(struct vrend_context *ctx,
+static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
struct vrend_shader_key *key)
{
@@ -3157,54 +3246,54 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
bool add_alpha_test = true;
key->cbufs_are_a8_bitmask = 0;
// Only use integer info when drawing to avoid stale info.
- if (vrend_state.use_integer && ctx->drawing) {
- key->attrib_signed_int_bitmask = ctx->sub->ve->signed_int_bitmask;
- key->attrib_unsigned_int_bitmask = ctx->sub->ve->unsigned_int_bitmask;
+ if (vrend_state.use_integer && sub_ctx->drawing) {
+ key->attrib_signed_int_bitmask = sub_ctx->ve->signed_int_bitmask;
+ key->attrib_unsigned_int_bitmask = sub_ctx->ve->unsigned_int_bitmask;
}
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (!ctx->sub->surf[i])
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (!sub_ctx->surf[i])
continue;
- if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format))
+ if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format))
key->cbufs_are_a8_bitmask |= (1 << i);
- if (util_format_is_pure_integer(ctx->sub->surf[i]->format)) {
+ if (util_format_is_pure_integer(sub_ctx->surf[i]->format)) {
add_alpha_test = false;
- update_int_sign_masks(ctx->sub->surf[i]->format, i,
+ update_int_sign_masks(sub_ctx->surf[i]->format, i,
&key->cbufs_signed_int_bitmask,
&key->cbufs_unsigned_int_bitmask);
}
- key->surface_component_bits[i] = util_format_get_component_bits(ctx->sub->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
+ key->surface_component_bits[i] = util_format_get_component_bits(sub_ctx->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
}
if (add_alpha_test) {
- key->add_alpha_test = ctx->sub->dsa_state.alpha.enabled;
- key->alpha_test = ctx->sub->dsa_state.alpha.func;
+ key->add_alpha_test = sub_ctx->dsa_state.alpha.enabled;
+ key->alpha_test = sub_ctx->dsa_state.alpha.func;
}
- key->pstipple_tex = ctx->sub->rs_state.poly_stipple_enable;
- key->color_two_side = ctx->sub->rs_state.light_twoside;
+ key->pstipple_tex = sub_ctx->rs_state.poly_stipple_enable;
+ key->color_two_side = sub_ctx->rs_state.light_twoside;
- key->clip_plane_enable = ctx->sub->rs_state.clip_plane_enable;
- key->flatshade = ctx->sub->rs_state.flatshade ? true : false;
+ key->clip_plane_enable = sub_ctx->rs_state.clip_plane_enable;
+ key->flatshade = sub_ctx->rs_state.flatshade ? true : false;
} else {
key->add_alpha_test = 0;
key->pstipple_tex = 0;
}
- if (type == PIPE_SHADER_FRAGMENT && vrend_state.use_gles && can_emulate_logicop(ctx->sub->blend_state.logicop_func)) {
- key->fs_logicop_enabled = ctx->sub->blend_state.logicop_enable;
- key->fs_logicop_func = ctx->sub->blend_state.logicop_func;
+ if (type == PIPE_SHADER_FRAGMENT && vrend_state.use_gles && can_emulate_logicop(sub_ctx->blend_state.logicop_func)) {
+ key->fs_logicop_enabled = sub_ctx->blend_state.logicop_enable;
+ key->fs_logicop_func = sub_ctx->blend_state.logicop_func;
key->fs_logicop_emulate_coherent = !has_feature(feat_framebuffer_fetch_non_coherent);
}
- key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
+ key->invert_fs_origin = !sub_ctx->inverted_fbo_content;
if (type == PIPE_SHADER_FRAGMENT)
- key->fs_swizzle_output_rgb_to_bgr = ctx->sub->swizzle_output_rgb_to_bgr;
+ key->fs_swizzle_output_rgb_to_bgr = sub_ctx->swizzle_output_rgb_to_bgr;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
+ if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY])
key->gs_present = true;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_CTRL])
key->tcs_present = true;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL])
key->tes_present = true;
int prev_type = -1;
@@ -3213,7 +3302,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
* old shader is still bound we should ignore the "previous" (as in
* execution order) shader when the key is evaluated, unless the currently
* bound shader selector is actually refers to the current shader. */
- if (ctx->sub->shaders[type] == sel) {
+ if (sub_ctx->shaders[type] == sel) {
switch (type) {
case PIPE_SHADER_GEOMETRY:
if (key->tcs_present || key->tes_present)
@@ -3243,36 +3332,36 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
}
}
- if (prev_type != -1 && ctx->sub->shaders[prev_type]) {
- key->prev_stage_num_clip_out = ctx->sub->shaders[prev_type]->sinfo.num_clip_out;
- key->prev_stage_num_cull_out = ctx->sub->shaders[prev_type]->sinfo.num_cull_out;
- key->num_indirect_generic_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
- key->num_indirect_patch_inputs = ctx->sub->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
- key->num_prev_generic_and_patch_outputs = ctx->sub->shaders[prev_type]->sinfo.num_generic_and_patch_outputs;
- key->guest_sent_io_arrays = ctx->sub->shaders[prev_type]->sinfo.guest_sent_io_arrays;
+ if (prev_type != -1 && sub_ctx->shaders[prev_type]) {
+ key->prev_stage_num_clip_out = sub_ctx->shaders[prev_type]->sinfo.num_clip_out;
+ key->prev_stage_num_cull_out = sub_ctx->shaders[prev_type]->sinfo.num_cull_out;
+ key->num_indirect_generic_inputs = sub_ctx->shaders[prev_type]->sinfo.num_indirect_generic_outputs;
+ key->num_indirect_patch_inputs = sub_ctx->shaders[prev_type]->sinfo.num_indirect_patch_outputs;
+ key->num_prev_generic_and_patch_outputs = sub_ctx->shaders[prev_type]->sinfo.num_generic_and_patch_outputs;
+ key->guest_sent_io_arrays = sub_ctx->shaders[prev_type]->sinfo.guest_sent_io_arrays;
memcpy(key->prev_stage_generic_and_patch_outputs_layout,
- ctx->sub->shaders[prev_type]->sinfo.generic_outputs_layout,
+ sub_ctx->shaders[prev_type]->sinfo.generic_outputs_layout,
64 * sizeof (struct vrend_layout_info));
- key->force_invariant_inputs = ctx->sub->shaders[prev_type]->sinfo.invariant_outputs;
+ key->force_invariant_inputs = sub_ctx->shaders[prev_type]->sinfo.invariant_outputs;
}
// Only use coord_replace if frag shader receives GL_POINTS
if (type == PIPE_SHADER_FRAGMENT) {
- int fs_prim_mode = ctx->sub->prim_mode; // inherit draw-call's mode
+ int fs_prim_mode = sub_ctx->prim_mode; // inherit draw-call's mode
switch (prev_type) {
case PIPE_SHADER_TESS_EVAL:
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
+ if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
fs_prim_mode = PIPE_PRIM_POINTS;
break;
case PIPE_SHADER_GEOMETRY:
- fs_prim_mode = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
+ fs_prim_mode = sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
break;
}
key->fs_prim_is_points = (fs_prim_mode == PIPE_PRIM_POINTS);
- key->coord_replace = ctx->sub->rs_state.point_quad_rasterization
+ key->coord_replace = sub_ctx->rs_state.point_quad_rasterization
&& key->fs_prim_is_points
- ? ctx->sub->rs_state.sprite_coord_enable
+ ? sub_ctx->rs_state.sprite_coord_enable
: 0x0;
}
@@ -3284,7 +3373,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
else if (key->gs_present)
next_type = PIPE_SHADER_GEOMETRY;
else if (key->tes_present) {
- if (!ctx->shader_cfg.use_gles)
+ if (!vrend_state.use_gles)
next_type = PIPE_SHADER_TESS_EVAL;
else
next_type = PIPE_SHADER_TESS_CTRL;
@@ -3306,17 +3395,17 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
break;
}
- if (next_type != -1 && ctx->sub->shaders[next_type]) {
- key->next_stage_pervertex_in = ctx->sub->shaders[next_type]->sinfo.has_pervertex_in;
- key->num_indirect_generic_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_generic_inputs;
- key->num_indirect_patch_outputs = ctx->sub->shaders[next_type]->sinfo.num_indirect_patch_inputs;
- key->generic_outputs_expected_mask = ctx->sub->shaders[next_type]->sinfo.generic_inputs_emitted_mask;
+ if (next_type != -1 && sub_ctx->shaders[next_type]) {
+ key->next_stage_pervertex_in = sub_ctx->shaders[next_type]->sinfo.has_pervertex_in;
+ key->num_indirect_generic_outputs = sub_ctx->shaders[next_type]->sinfo.num_indirect_generic_inputs;
+ key->num_indirect_patch_outputs = sub_ctx->shaders[next_type]->sinfo.num_indirect_patch_inputs;
+ key->generic_outputs_expected_mask = sub_ctx->shaders[next_type]->sinfo.generic_inputs_emitted_mask;
}
if (type != PIPE_SHADER_FRAGMENT &&
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]) {
struct vrend_shader *fs =
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current;
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current;
key->compiled_fs_uid = fs->uid;
key->fs_info = &fs->sel->sinfo;
}
@@ -3363,7 +3452,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
if (1) {//shader->sel->type == PIPE_SHADER_FRAGMENT || shader->sel->type == PIPE_SHADER_GEOMETRY) {
bool ret;
- ret = vrend_compile_shader(ctx, shader);
+ ret = vrend_compile_shader(ctx->sub, shader);
if (ret == false) {
glDeleteShader(shader->id);
strarray_free(&shader->glsl_strings, true);
@@ -3373,7 +3462,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
return 0;
}
-static int vrend_shader_select(struct vrend_context *ctx,
+static int vrend_shader_select(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
bool *dirty)
{
@@ -3382,7 +3471,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
int r;
memset(&key, 0, sizeof(key));
- vrend_fill_shader_key(ctx, sel, &key);
+ vrend_fill_shader_key(sub_ctx, sel, &key);
if (sel->current && !memcmp(&sel->current->key, &key, sizeof(key)))
return 0;
@@ -3406,7 +3495,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- r = vrend_shader_create(ctx, shader, &key);
+ r = vrend_shader_create(sub_ctx->parent, shader, &key);
if (r) {
sel->current = NULL;
FREE(shader);
@@ -3422,8 +3511,7 @@ static int vrend_shader_select(struct vrend_context *ctx,
return 0;
}
-static void *vrend_create_shader_state(UNUSED struct vrend_context *ctx,
- const struct pipe_stream_output_info *so_info,
+static void *vrend_create_shader_state(const struct pipe_stream_output_info *so_info,
uint32_t req_local_mem,
unsigned pipe_shader_type)
{
@@ -3448,7 +3536,7 @@ static int vrend_finish_shader(struct vrend_context *ctx,
sel->tokens = tgsi_dup_tokens(tokens);
- r = vrend_shader_select(ctx, sel, NULL);
+ r = vrend_shader_select(ctx->sub, sel, NULL);
if (r) {
return EINVAL;
}
@@ -3489,17 +3577,19 @@ int vrend_create_shader(struct vrend_context *ctx,
else if (((offlen + 3) / 4) > pkt_length)
long_shader = true;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
/* if we have an in progress one - don't allow a new shader
of that type or a different handle. */
- if (ctx->sub->long_shader_in_progress_handle[type]) {
+ if (sub_ctx->long_shader_in_progress_handle[type]) {
if (new_shader == true)
return EINVAL;
- if (handle != ctx->sub->long_shader_in_progress_handle[type])
+ if (handle != sub_ctx->long_shader_in_progress_handle[type])
return EINVAL;
}
if (new_shader) {
- sel = vrend_create_shader_state(ctx, so_info, req_local_mem, type);
+ sel = vrend_create_shader_state(so_info, req_local_mem, type);
if (sel == NULL)
return ENOMEM;
@@ -3512,11 +3602,11 @@ int vrend_create_shader(struct vrend_context *ctx,
}
memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
sel->buf_offset = pkt_length * 4;
- ctx->sub->long_shader_in_progress_handle[type] = handle;
+ sub_ctx->long_shader_in_progress_handle[type] = handle;
} else
finished = true;
} else {
- sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
+ sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
vrend_printf( "got continuation without original shader %d\n", handle);
ret = EINVAL;
@@ -3588,7 +3678,7 @@ int vrend_create_shader(struct vrend_context *ctx,
sel->tmp_buf = NULL;
}
free(tokens);
- ctx->sub->long_shader_in_progress_handle[type] = 0;
+ sub_ctx->long_shader_in_progress_handle[type] = 0;
}
if (new_shader) {
@@ -3618,31 +3708,33 @@ void vrend_bind_shader(struct vrend_context *ctx,
if (type > PIPE_SHADER_COMPUTE)
return;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
if (handle == 0) {
if (type == PIPE_SHADER_COMPUTE)
- ctx->sub->cs_shader_dirty = true;
+ sub_ctx->cs_shader_dirty = true;
else
- ctx->sub->shader_dirty = true;
- vrend_shader_state_reference(&ctx->sub->shaders[type], NULL);
+ sub_ctx->shader_dirty = true;
+ vrend_shader_state_reference(&sub_ctx->shaders[type], NULL);
return;
}
- sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
+ sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel)
return;
if (sel->type != type)
return;
- if (ctx->sub->shaders[sel->type] != sel) {
+ if (sub_ctx->shaders[sel->type] != sel) {
if (type == PIPE_SHADER_COMPUTE)
- ctx->sub->cs_shader_dirty = true;
+ sub_ctx->cs_shader_dirty = true;
else
- ctx->sub->shader_dirty = true;
- ctx->sub->prog_ids[sel->type] = 0;
+ sub_ctx->shader_dirty = true;
+ sub_ctx->prog_ids[sel->type] = 0;
}
- vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
+ vrend_shader_state_reference(&sub_ctx->shaders[sel->type], sel);
}
void vrend_clear(struct vrend_context *ctx,
@@ -3651,6 +3743,7 @@ void vrend_clear(struct vrend_context *ctx,
double depth, unsigned stencil)
{
GLbitfield bits = 0;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
if (ctx->in_error)
return;
@@ -3658,20 +3751,20 @@ void vrend_clear(struct vrend_context *ctx,
if (ctx->ctx_switch_pending)
vrend_finish_context_switch(ctx);
- vrend_update_frontface_state(ctx);
- if (ctx->sub->stencil_state_dirty)
- vrend_update_stencil_state(ctx);
- if (ctx->sub->scissor_state_dirty)
- vrend_update_scissor_state(ctx);
- if (ctx->sub->viewport_state_dirty)
- vrend_update_viewport_state(ctx);
+ vrend_update_frontface_state(sub_ctx);
+ if (sub_ctx->stencil_state_dirty)
+ vrend_update_stencil_state(sub_ctx);
+ if (sub_ctx->scissor_state_dirty)
+ vrend_update_scissor_state(sub_ctx);
+ if (sub_ctx->viewport_state_dirty)
+ vrend_update_viewport_state(sub_ctx);
- vrend_use_program(ctx, 0);
+ vrend_use_program(sub_ctx, 0);
glDisable(GL_SCISSOR_TEST);
if (buffers & PIPE_CLEAR_COLOR) {
- if (ctx->sub->nr_cbufs && ctx->sub->surf[0] && vrend_format_is_emulated_alpha(ctx->sub->surf[0]->format)) {
+ if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_format_is_emulated_alpha(sub_ctx->surf[0]->format)) {
glClearColor(color->f[3], 0.0, 0.0, 0.0);
} else {
glClearColor(color->f[0], color->f[1], color->f[2], color->f[3]);
@@ -3680,7 +3773,7 @@ void vrend_clear(struct vrend_context *ctx,
/* This function implements Gallium's full clear callback (st->pipe->clear) on the host. This
callback requires no color component be masked. We must unmask all components before
calling glClear* and restore the previous colormask afterwards, as Gallium expects. */
- if (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (sub_ctx->hw_blend_state.independent_blend_enable &&
has_feature(feat_indep_blend)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
@@ -3708,24 +3801,24 @@ void vrend_clear(struct vrend_context *ctx,
glClearStencil(stencil);
}
- if (ctx->sub->hw_rs_state.rasterizer_discard)
+ if (sub_ctx->hw_rs_state.rasterizer_discard)
glDisable(GL_RASTERIZER_DISCARD);
if (buffers & PIPE_CLEAR_COLOR) {
uint32_t mask = 0;
int i;
- for (i = 0; i < ctx->sub->nr_cbufs; i++) {
- if (ctx->sub->surf[i])
+ for (i = 0; i < sub_ctx->nr_cbufs; i++) {
+ if (sub_ctx->surf[i])
mask |= (1 << i);
}
if (mask != (buffers >> 2)) {
mask = buffers >> 2;
while (mask) {
i = u_bit_scan(&mask);
- if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_uint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
+ if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_uint(sub_ctx->surf[i] && sub_ctx->surf[i]->format))
glClearBufferuiv(GL_COLOR,
i, (GLuint *)color);
- else if (i < PIPE_MAX_COLOR_BUFS && ctx->sub->surf[i] && util_format_is_pure_sint(ctx->sub->surf[i] && ctx->sub->surf[i]->format))
+ else if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_sint(sub_ctx->surf[i] && sub_ctx->surf[i]->format))
glClearBufferiv(GL_COLOR,
i, (GLint *)color);
else
@@ -3748,40 +3841,40 @@ void vrend_clear(struct vrend_context *ctx,
* get here is because the guest cleared all those states but gallium
* didn't forward them before calling the clear command
*/
- if (ctx->sub->hw_rs_state.rasterizer_discard)
+ if (sub_ctx->hw_rs_state.rasterizer_discard)
glEnable(GL_RASTERIZER_DISCARD);
if (buffers & PIPE_CLEAR_DEPTH) {
- if (!ctx->sub->dsa_state.depth.writemask)
+ if (!sub_ctx->dsa_state.depth.writemask)
glDepthMask(GL_FALSE);
}
/* Restore previous stencil buffer write masks for both front and back faces */
if (buffers & PIPE_CLEAR_STENCIL) {
- glStencilMaskSeparate(GL_FRONT, ctx->sub->dsa_state.stencil[0].writemask);
- glStencilMaskSeparate(GL_BACK, ctx->sub->dsa_state.stencil[1].writemask);
+ glStencilMaskSeparate(GL_FRONT, sub_ctx->dsa_state.stencil[0].writemask);
+ glStencilMaskSeparate(GL_BACK, sub_ctx->dsa_state.stencil[1].writemask);
}
/* Restore previous colormask */
if (buffers & PIPE_CLEAR_COLOR) {
- if (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (sub_ctx->hw_blend_state.independent_blend_enable &&
has_feature(feat_indep_blend)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
- struct pipe_blend_state *blend = &ctx->sub->hw_blend_state;
+ struct pipe_blend_state *blend = &sub_ctx->hw_blend_state;
glColorMaskIndexedEXT(i, blend->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
blend->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
} else {
- glColorMask(ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
- ctx->sub->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
+ glColorMask(sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
+ sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
- if (ctx->sub->hw_rs_state.scissor)
+ if (sub_ctx->hw_rs_state.scissor)
glEnable(GL_SCISSOR_TEST);
else
glDisable(GL_SCISSOR_TEST);
@@ -3819,20 +3912,20 @@ void vrend_clear_texture(struct vrend_context* ctx,
}
}
-static void vrend_update_scissor_state(struct vrend_context *ctx)
+static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx)
{
struct pipe_scissor_state *ss;
GLint y;
GLuint idx;
- unsigned mask = ctx->sub->scissor_state_dirty;
+ unsigned mask = sub_ctx->scissor_state_dirty;
while (mask) {
idx = u_bit_scan(&mask);
if (idx >= PIPE_MAX_VIEWPORTS) {
- vrend_report_buffer_error(ctx, 0);
+ vrend_report_buffer_error(sub_ctx->parent, 0);
break;
}
- ss = &ctx->sub->ss[idx];
+ ss = &sub_ctx->ss[idx];
y = ss->miny;
if (idx > 0 && has_feature(feat_viewport_array))
@@ -3840,39 +3933,39 @@ static void vrend_update_scissor_state(struct vrend_context *ctx)
else
glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny);
}
- ctx->sub->scissor_state_dirty = 0;
+ sub_ctx->scissor_state_dirty = 0;
}
-static void vrend_update_viewport_state(struct vrend_context *ctx)
+static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx)
{
GLint cy;
- unsigned mask = ctx->sub->viewport_state_dirty;
+ unsigned mask = sub_ctx->viewport_state_dirty;
int idx;
while (mask) {
idx = u_bit_scan(&mask);
- if (ctx->sub->viewport_is_negative)
- cy = ctx->sub->vps[idx].cur_y - ctx->sub->vps[idx].height;
+ if (sub_ctx->viewport_is_negative)
+ cy = sub_ctx->vps[idx].cur_y - sub_ctx->vps[idx].height;
else
- cy = ctx->sub->vps[idx].cur_y;
+ cy = sub_ctx->vps[idx].cur_y;
if (idx > 0 && has_feature(feat_viewport_array))
- glViewportIndexedf(idx, ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
+ glViewportIndexedf(idx, sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height);
else
- glViewport(ctx->sub->vps[idx].cur_x, cy, ctx->sub->vps[idx].width, ctx->sub->vps[idx].height);
+ glViewport(sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height);
if (idx && has_feature(feat_viewport_array))
if (vrend_state.use_gles) {
- glDepthRangeIndexedfOES(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangeIndexedfOES(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
} else
- glDepthRangeIndexed(idx, ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangeIndexed(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
else
if (vrend_state.use_gles)
- glDepthRangefOES(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRangefOES(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
else
- glDepthRange(ctx->sub->vps[idx].near_val, ctx->sub->vps[idx].far_val);
+ glDepthRange(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val);
}
- ctx->sub->viewport_state_dirty = 0;
+ sub_ctx->viewport_state_dirty = 0;
}
static GLenum get_gs_xfb_mode(GLenum mode)
@@ -3947,7 +4040,7 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
/* XYZZY: debug this? */
break;
}
- res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].buffer;
+ res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].base.buffer;
if (!res) {
vrend_printf("cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
@@ -3978,10 +4071,12 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
glBindBuffer(GL_ARRAY_BUFFER, res->id);
- if (ctx->sub->vbo[vbo_index].stride == 0) {
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[vbo_index];
+
+ if (vbo->base.stride == 0) {
void *data;
/* for 0 stride we are kinda screwed */
- data = glMapBufferRange(GL_ARRAY_BUFFER, ctx->sub->vbo[vbo_index].buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
+ data = glMapBufferRange(GL_ARRAY_BUFFER, vbo->base.buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
switch (ve->nr_chan) {
case 1:
@@ -4003,9 +4098,9 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
} else {
enable_bitmask |= (1 << loc);
if (util_format_is_pure_integer(ve->base.src_format)) {
- glVertexAttribIPointer(loc, ve->nr_chan, ve->type, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
+ glVertexAttribIPointer(loc, ve->nr_chan, ve->type, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
} else {
- glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
+ glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
}
glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
}
@@ -4037,69 +4132,86 @@ static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
glBindVertexArray(va->id);
if (ctx->sub->vbo_dirty) {
- GLsizei count = 0;
- GLuint buffers[PIPE_MAX_ATTRIBS];
- GLintptr offsets[PIPE_MAX_ATTRIBS];
- GLsizei strides[PIPE_MAX_ATTRIBS];
+ struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[0];
- for (i = 0; i < ctx->sub->num_vbos; i++) {
- struct vrend_resource *res = (struct vrend_resource *)ctx->sub->vbo[i].buffer;
- if (!res) {
- buffers[count] = 0;
- offsets[count] = 0;
- strides[count++] = 0;
- } else {
- buffers[count] = res->id;
- offsets[count] = ctx->sub->vbo[i].buffer_offset,
- strides[count++] = ctx->sub->vbo[i].stride;
+ if (has_feature(feat_bind_vertex_buffers)) {
+ GLsizei count = MAX2(ctx->sub->num_vbos, ctx->sub->old_num_vbos);
+
+ GLuint buffers[PIPE_MAX_ATTRIBS];
+ GLintptr offsets[PIPE_MAX_ATTRIBS];
+ GLsizei strides[PIPE_MAX_ATTRIBS];
+
+ for (i = 0; i < ctx->sub->num_vbos; i++) {
+ struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
+ if (res) {
+ buffers[i] = res->id;
+ offsets[i] = vbo[i].base.buffer_offset;
+ strides[i] = vbo[i].base.stride;
+ } else {
+ buffers[i] = 0;
+ offsets[i] = 0;
+ strides[i] = 0;
+ }
+ }
+
+ for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
+ buffers[i] = 0;
+ offsets[i] = 0;
+ strides[i] = 0;
}
- }
- for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
- buffers[count] = 0;
- offsets[count] = 0;
- strides[count++] = 0;
- }
- if (has_feature(feat_bind_vertex_buffers))
glBindVertexBuffers(0, count, buffers, offsets, strides);
- else {
- for (i = 0; i < count; ++i)
- glBindVertexBuffer(i, buffers[i], offsets[i], strides[i]);
+ } else {
+ for (i = 0; i < ctx->sub->num_vbos; i++) {
+ struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer;
+ if (res)
+ glBindVertexBuffer(i, res->id, vbo[i].base.buffer_offset, vbo[i].base.stride);
+ else
+ glBindVertexBuffer(i, 0, 0, 0);
+ }
+ for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++)
+ glBindVertexBuffer(i, 0, 0, 0);
}
ctx->sub->vbo_dirty = false;
}
}
-static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
+static int vrend_draw_bind_samplers_shader(struct vrend_sub_context *sub_ctx,
int shader_type,
int next_sampler_id)
{
int index = 0;
- uint32_t dirty = ctx->sub->sampler_views_dirty[shader_type];
+ uint32_t dirty = sub_ctx->sampler_views_dirty[shader_type];
+
+ uint32_t mask = sub_ctx->prog->samplers_used_mask[shader_type];
+
+ struct vrend_shader_view *sviews = &sub_ctx->views[shader_type];
- uint32_t mask = ctx->sub->prog->samplers_used_mask[shader_type];
while (mask) {
int i = u_bit_scan(&mask);
- struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
- if (dirty & (1 << i) && tview) {
- if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
- glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
- (tview->gl_swizzle_r == GL_ZERO || tview->gl_swizzle_r == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_g == GL_ZERO || tview->gl_swizzle_g == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_b == GL_ZERO || tview->gl_swizzle_b == GL_ONE) ? 0.0 : 1.0,
- (tview->gl_swizzle_a == GL_ZERO || tview->gl_swizzle_a == GL_ONE) ? 0.0 : 1.0);
- glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
- tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
- tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
+ if (!(dirty & (1 << i)))
+ continue;
+
+ struct vrend_sampler_view *tview = sviews->views[i];
+ if (tview) {
+ if (sub_ctx->prog->shadow_samp_mask[shader_type] & (1 << i)) {
+ glUniform4f(sub_ctx->prog->shadow_samp_mask_locs[shader_type][index],
+ (tview->gl_swizzle[0] == GL_ZERO || tview->gl_swizzle[0] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[1] == GL_ZERO || tview->gl_swizzle[1] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[2] == GL_ZERO || tview->gl_swizzle[2] == GL_ONE) ? 0.0 : 1.0,
+ (tview->gl_swizzle[3] == GL_ZERO || tview->gl_swizzle[3] == GL_ONE) ? 0.0 : 1.0);
+ glUniform4f(sub_ctx->prog->shadow_samp_add_locs[shader_type][index],
+ tview->gl_swizzle[0] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[1] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[2] == GL_ONE ? 1.0 : 0.0,
+ tview->gl_swizzle[3] == GL_ONE ? 1.0 : 0.0);
}
if (tview->texture) {
- GLuint id;
+ GLuint id = tview->id;
struct vrend_resource *texture = tview->texture;
GLenum target = tview->target;
@@ -4108,17 +4220,16 @@ static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
if (has_bit(tview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
id = texture->tbo_tex_id;
target = GL_TEXTURE_BUFFER;
- } else
- id = tview->id;
+ }
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
glBindTexture(target, id);
- if (ctx->sub->views[shader_type].old_ids[i] != id ||
- ctx->sub->sampler_views_dirty[shader_type] & (1 << i)) {
- vrend_apply_sampler_state(ctx, texture, shader_type, i,
+ if (sviews->old_ids[i] != id ||
+ sub_ctx->sampler_views_dirty[shader_type] & (1 << i)) {
+ vrend_apply_sampler_state(sub_ctx, texture, shader_type, i,
next_sampler_id, tview);
- ctx->sub->views[shader_type].old_ids[i] = id;
+ sviews->old_ids[i] = id;
}
dirty &= ~(1 << i);
}
@@ -4126,12 +4237,12 @@ static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
next_sampler_id++;
index++;
}
- ctx->sub->sampler_views_dirty[shader_type] = dirty;
+ sub_ctx->sampler_views_dirty[shader_type] = dirty;
return next_sampler_id;
}
-static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
+static int vrend_draw_bind_ubo_shader(struct vrend_sub_context *sub_ctx,
int shader_type, int next_ubo_id)
{
uint32_t mask, dirty, update;
@@ -4141,9 +4252,9 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
if (!has_feature(feat_ubo))
return next_ubo_id;
- mask = ctx->sub->prog->ubo_used_mask[shader_type];
- dirty = ctx->sub->const_bufs_dirty[shader_type];
- update = dirty & ctx->sub->const_bufs_used_mask[shader_type];
+ mask = sub_ctx->prog->ubo_used_mask[shader_type];
+ dirty = sub_ctx->const_bufs_dirty[shader_type];
+ update = dirty & sub_ctx->const_bufs_used_mask[shader_type];
if (!update)
return next_ubo_id + util_bitcount(mask);
@@ -4154,7 +4265,7 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
if (update & (1 << i)) {
/* The cbs array is indexed using the gallium uniform buffer index */
- cb = &ctx->sub->cbs[shader_type][i];
+ cb = &sub_ctx->cbs[shader_type][i];
res = (struct vrend_resource *)cb->buffer;
glBindBufferRange(GL_UNIFORM_BUFFER, next_ubo_id, res->id,
@@ -4163,26 +4274,26 @@ static int vrend_draw_bind_ubo_shader(struct vrend_context *ctx,
}
next_ubo_id++;
}
- ctx->sub->const_bufs_dirty[shader_type] = dirty;
+ sub_ctx->const_bufs_dirty[shader_type] = dirty;
return next_ubo_id;
}
-static void vrend_draw_bind_const_shader(struct vrend_context *ctx,
+static void vrend_draw_bind_const_shader(struct vrend_sub_context *sub_ctx,
int shader_type, bool new_program)
{
- if (ctx->sub->consts[shader_type].consts &&
- ctx->sub->shaders[shader_type] &&
- (ctx->sub->prog->const_location[shader_type] != -1) &&
- (ctx->sub->const_dirty[shader_type] || new_program)) {
- glUniform4uiv(ctx->sub->prog->const_location[shader_type],
- ctx->sub->shaders[shader_type]->sinfo.num_consts,
- ctx->sub->consts[shader_type].consts);
- ctx->sub->const_dirty[shader_type] = false;
+ if (sub_ctx->consts[shader_type].consts &&
+ sub_ctx->shaders[shader_type] &&
+ (sub_ctx->prog->const_location[shader_type] != -1) &&
+ (sub_ctx->const_dirty[shader_type] || new_program)) {
+ glUniform4uiv(sub_ctx->prog->const_location[shader_type],
+ sub_ctx->shaders[shader_type]->sinfo.num_consts,
+ sub_ctx->consts[shader_type].consts);
+ sub_ctx->const_dirty[shader_type] = false;
}
}
-static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_type)
+static void vrend_draw_bind_ssbo_shader(struct vrend_sub_context *sub_ctx, int shader_type)
{
uint32_t mask;
struct vrend_ssbo *ssbo;
@@ -4192,30 +4303,30 @@ static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_ty
if (!has_feature(feat_ssbo))
return;
- if (!ctx->sub->prog->ssbo_locs[shader_type])
+ if (!sub_ctx->prog->ssbo_locs[shader_type])
return;
- if (!ctx->sub->ssbo_used_mask[shader_type])
+ if (!sub_ctx->ssbo_used_mask[shader_type])
return;
- mask = ctx->sub->ssbo_used_mask[shader_type];
+ mask = sub_ctx->ssbo_used_mask[shader_type];
while (mask) {
i = u_bit_scan(&mask);
- ssbo = &ctx->sub->ssbo[shader_type][i];
+ ssbo = &sub_ctx->ssbo[shader_type][i];
res = (struct vrend_resource *)ssbo->res;
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, i, res->id,
ssbo->buffer_offset, ssbo->buffer_size);
- if (ctx->sub->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
+ if (sub_ctx->prog->ssbo_locs[shader_type][i] != GL_INVALID_INDEX) {
if (!vrend_state.use_gles)
- glShaderStorageBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ssbo_locs[shader_type][i], i);
+ glShaderStorageBlockBinding(sub_ctx->prog->id, sub_ctx->prog->ssbo_locs[shader_type][i], i);
else
debug_printf("glShaderStorageBlockBinding not supported on gles \n");
}
}
}
-static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
+static void vrend_draw_bind_abo_shader(struct vrend_sub_context *sub_ctx)
{
uint32_t mask;
struct vrend_abo *abo;
@@ -4225,18 +4336,18 @@ static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
if (!has_feature(feat_atomic_counters))
return;
- mask = ctx->sub->abo_used_mask;
+ mask = sub_ctx->abo_used_mask;
while (mask) {
i = u_bit_scan(&mask);
- abo = &ctx->sub->abo[i];
+ abo = &sub_ctx->abo[i];
res = (struct vrend_resource *)abo->res;
glBindBufferRange(GL_ATOMIC_COUNTER_BUFFER, i, res->id,
abo->buffer_offset, abo->buffer_size);
}
}
-static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_type)
+static void vrend_draw_bind_images_shader(struct vrend_sub_context *sub_ctx, int shader_type)
{
GLenum access;
GLboolean layered;
@@ -4244,22 +4355,22 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
uint32_t mask, tex_id, level, first_layer;
- if (!ctx->sub->images_used_mask[shader_type])
+ if (!sub_ctx->images_used_mask[shader_type])
return;
- if (!ctx->sub->prog->img_locs[shader_type])
+ if (!sub_ctx->prog->img_locs[shader_type])
return;
if (!has_feature(feat_images))
return;
- mask = ctx->sub->images_used_mask[shader_type];
+ mask = sub_ctx->images_used_mask[shader_type];
while (mask) {
unsigned i = u_bit_scan(&mask);
- if (!(ctx->sub->prog->images_used_mask[shader_type] & (1 << i)))
+ if (!(sub_ctx->prog->images_used_mask[shader_type] & (1 << i)))
continue;
- iview = &ctx->sub->image_views[shader_type][i];
+ iview = &sub_ctx->image_views[shader_type][i];
tex_id = iview->texture->id;
if (has_bit(iview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
if (!iview->texture->tbo_tex_id)
@@ -4285,7 +4396,7 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
}
if (!vrend_state.use_gles)
- glUniform1i(ctx->sub->prog->img_locs[shader_type][i], i);
+ glUniform1i(sub_ctx->prog->img_locs[shader_type][i], i);
switch (iview->access) {
case PIPE_IMAGE_ACCESS_READ:
@@ -4306,61 +4417,162 @@ static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_
}
}
-static void vrend_draw_bind_objects(struct vrend_context *ctx, bool new_program)
+static void vrend_draw_bind_objects(struct vrend_sub_context *sub_ctx, bool new_program)
{
int next_ubo_id = 0, next_sampler_id = 0;
- for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
- next_ubo_id = vrend_draw_bind_ubo_shader(ctx, shader_type, next_ubo_id);
- vrend_draw_bind_const_shader(ctx, shader_type, new_program);
- next_sampler_id = vrend_draw_bind_samplers_shader(ctx, shader_type,
+ for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= sub_ctx->last_shader_idx; shader_type++) {
+ next_ubo_id = vrend_draw_bind_ubo_shader(sub_ctx, shader_type, next_ubo_id);
+ vrend_draw_bind_const_shader(sub_ctx, shader_type, new_program);
+ next_sampler_id = vrend_draw_bind_samplers_shader(sub_ctx, shader_type,
next_sampler_id);
- vrend_draw_bind_images_shader(ctx, shader_type);
- vrend_draw_bind_ssbo_shader(ctx, shader_type);
+ vrend_draw_bind_images_shader(sub_ctx, shader_type);
+ vrend_draw_bind_ssbo_shader(sub_ctx, shader_type);
}
- vrend_draw_bind_abo_shader(ctx);
+ vrend_draw_bind_abo_shader(sub_ctx);
- if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
+ if (vrend_state.use_core_profile && sub_ctx->prog->fs_stipple_loc != -1) {
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
- glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
- glUniform1i(ctx->sub->prog->fs_stipple_loc, next_sampler_id);
+ glBindTexture(GL_TEXTURE_2D, sub_ctx->parent->pstipple_tex_id);
+ glUniform1i(sub_ctx->prog->fs_stipple_loc, next_sampler_id);
}
- if (vrend_state.use_core_profile && ctx->sub->prog->fs_alpha_ref_val_loc != -1) {
- glUniform1f(ctx->sub->prog->fs_alpha_ref_val_loc, ctx->sub->dsa_state.alpha.ref_value);
+ if (vrend_state.use_core_profile && sub_ctx->prog->fs_alpha_ref_val_loc != -1) {
+ glUniform1f(sub_ctx->prog->fs_alpha_ref_val_loc, sub_ctx->dsa_state.alpha.ref_value);
}
}
static
-void vrend_inject_tcs(struct vrend_context *ctx, int vertices_per_patch)
+void vrend_inject_tcs(struct vrend_sub_context *sub_ctx, int vertices_per_patch)
{
struct pipe_stream_output_info so_info;
memset(&so_info, 0, sizeof(so_info));
- struct vrend_shader_selector *sel = vrend_create_shader_state(ctx,
- &so_info,
+ struct vrend_shader_selector *sel = vrend_create_shader_state(&so_info,
false, PIPE_SHADER_TESS_CTRL);
struct vrend_shader *shader;
shader = CALLOC_STRUCT(vrend_shader);
- vrend_fill_shader_key(ctx, sel, &shader->key);
+ vrend_fill_shader_key(sub_ctx, sel, &shader->key);
shader->sel = sel;
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- vrend_shader_create_passthrough_tcs(ctx, &ctx->shader_cfg,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->tokens,
+ vrend_shader_create_passthrough_tcs(sub_ctx->parent, &sub_ctx->parent->shader_cfg,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->tokens,
&shader->key, vrend_state.tess_factors, &sel->sinfo,
&shader->glsl_strings, vertices_per_patch);
// Need to add inject the selected shader to the shader selector and then the code below
// can continue
sel->tokens = NULL;
sel->current = shader;
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] = sel;
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
+ sub_ctx->shaders[PIPE_SHADER_TESS_CTRL] = sel;
+ sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
shader->id = glCreateShader(conv_shader_type(shader->sel->type));
- vrend_compile_shader(ctx, shader);
+ vrend_compile_shader(sub_ctx, shader);
+}
+
+
+static bool
+vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_info *info)
+{
+ struct vrend_linked_shader_program *prog;
+ bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
+ bool new_program = false;
+
+ struct vrend_shader_selector **shaders = sub_ctx->shaders;
+
+ sub_ctx->shader_dirty = false;
+
+ if (!shaders[PIPE_SHADER_VERTEX] || !shaders[PIPE_SHADER_FRAGMENT]) {
+ vrend_printf("dropping rendering due to missing shaders: %s\n", sub_ctx->parent->debug_name);
+ return false;
+ }
+
+ // For some GPU, we'd like to use integer variable in generated GLSL if
+ // the input buffers are integer formats. But we actually don't know the
+ // buffer formats when the shader is created, we only know it here.
+ // Set it to true so the underlying code knows to use the buffer formats
+ // now.
+ sub_ctx->drawing = true;
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
+ sub_ctx->drawing = false;
+
+ if (shaders[PIPE_SHADER_TESS_CTRL] && shaders[PIPE_SHADER_TESS_CTRL]->tokens)
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
+ else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) {
+ VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n");
+ vrend_inject_tcs(sub_ctx, info->vertices_per_patch);
+
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
+ }
+
+ if (shaders[PIPE_SHADER_TESS_EVAL])
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
+ if (shaders[PIPE_SHADER_GEOMETRY])
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
+ vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
+
+ if (!shaders[PIPE_SHADER_VERTEX]->current ||
+ !shaders[PIPE_SHADER_FRAGMENT]->current ||
+ (shaders[PIPE_SHADER_GEOMETRY] && !shaders[PIPE_SHADER_GEOMETRY]->current) ||
+ (shaders[PIPE_SHADER_TESS_CTRL] && !shaders[PIPE_SHADER_TESS_CTRL]->current) ||
+ (shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_TESS_EVAL]->current)) {
+ vrend_printf( "failure to compile shader variants: %s\n", sub_ctx->parent->debug_name);
+ return false;
+ }
+
+ GLuint vs_id = shaders[PIPE_SHADER_VERTEX]->current->id;
+ GLuint fs_id = shaders[PIPE_SHADER_FRAGMENT]->current->id;
+ GLuint gs_id = shaders[PIPE_SHADER_GEOMETRY] ? shaders[PIPE_SHADER_GEOMETRY]->current->id : 0;
+ GLuint tcs_id = shaders[PIPE_SHADER_TESS_CTRL] ? shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0;
+ GLuint tes_id = shaders[PIPE_SHADER_TESS_EVAL] ? shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0;
+
+ bool same_prog = sub_ctx->prog &&
+ vs_id == sub_ctx->prog_ids[PIPE_SHADER_VERTEX] &&
+ fs_id == sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] &&
+ gs_id == sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] &&
+ tcs_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] &&
+ tes_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] &&
+ sub_ctx->prog->dual_src_linked == dual_src;
+
+ if (!same_prog) {
+ prog = lookup_shader_program(sub_ctx, vs_id, fs_id, gs_id, tcs_id, tes_id, dual_src);
+ if (!prog) {
+ prog = add_shader_program(sub_ctx,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->current,
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current,
+ gs_id ? sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
+ tcs_id ? sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
+ tes_id ? sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
+ if (!prog)
+ return false;
+ }
+
+ sub_ctx->last_shader_idx = sub_ctx->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (sub_ctx->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
+ } else
+ prog = sub_ctx->prog;
+ if (sub_ctx->prog != prog) {
+ new_program = true;
+ sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = vs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] = fs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] = gs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] = tcs_id;
+ sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] = tes_id;
+ sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = 0;
+ sub_ctx->prog = prog;
+
+ /* mark all constbufs and sampler views as dirty */
+ for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) {
+ sub_ctx->const_bufs_dirty[stage] = ~0;
+ sub_ctx->sampler_views_dirty[stage] = ~0;
+ }
+
+ prog->ref_context = sub_ctx;
+ }
+ return new_program;
}
int vrend_draw_vbo(struct vrend_context *ctx,
@@ -4372,6 +4584,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
bool new_program = false;
struct vrend_resource *indirect_res = NULL;
struct vrend_resource *indirect_params_res = NULL;
+ struct vrend_sub_context *sub_ctx = ctx->sub;
if (ctx->in_error)
return 0;
@@ -4410,173 +4623,63 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (ctx->ctx_switch_pending)
vrend_finish_context_switch(ctx);
- vrend_update_frontface_state(ctx);
+ vrend_update_frontface_state(sub_ctx);
if (ctx->sub->stencil_state_dirty)
- vrend_update_stencil_state(ctx);
+ vrend_update_stencil_state(sub_ctx);
if (ctx->sub->scissor_state_dirty)
- vrend_update_scissor_state(ctx);
+ vrend_update_scissor_state(sub_ctx);
if (ctx->sub->viewport_state_dirty)
- vrend_update_viewport_state(ctx);
+ vrend_update_viewport_state(sub_ctx);
if (ctx->sub->blend_state_dirty)
- vrend_patch_blend_state(ctx);
+ vrend_patch_blend_state(sub_ctx);
// enable primitive-mode-dependent shader variants
- if (ctx->sub->prim_mode != (int)info->mode) {
+ if (sub_ctx->prim_mode != (int)info->mode) {
// Only refresh shader program when switching in/out of GL_POINTS primitive mode
- if (ctx->sub->prim_mode == PIPE_PRIM_POINTS
+ if (sub_ctx->prim_mode == PIPE_PRIM_POINTS
|| (int)info->mode == PIPE_PRIM_POINTS)
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
- ctx->sub->prim_mode = (int)info->mode;
+ sub_ctx->prim_mode = (int)info->mode;
}
- if (ctx->sub->shader_dirty || ctx->sub->swizzle_output_rgb_to_bgr) {
- struct vrend_linked_shader_program *prog;
- bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
- bool same_prog;
-
- ctx->sub->shader_dirty = false;
-
- if (!ctx->sub->shaders[PIPE_SHADER_VERTEX] || !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
- vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
- return 0;
- }
-
- // For some GPU, we'd like to use integer variable in generated GLSL if
- // the input buffers are integer formats. But we actually don't know the
- // buffer formats when the shader is created, we only know it here.
- // Set it to true so the underlying code knows to use the buffer formats
- // now.
- ctx->drawing = true;
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
- ctx->drawing = false;
-
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->tokens)
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
- else if (vrend_state.use_gles && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]) {
- VREND_DEBUG(dbg_shader, ctx, "Need to inject a TCS\n");
- vrend_inject_tcs(ctx, info->vertices_per_patch);
-
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
- }
-
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
-
- if (!ctx->sub->shaders[PIPE_SHADER_VERTEX]->current ||
- !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current ||
- (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && !ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current) ||
- (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && !ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current) ||
- (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && !ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current)) {
- vrend_printf( "failure to compile shader variants: %s\n", ctx->debug_name);
- return 0;
- }
- same_prog = true;
- if (ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_VERTEX])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY])
- same_prog = false;
- if (ctx->sub->prog && ctx->sub->prog->dual_src_linked != dual_src)
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] && ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL])
- same_prog = false;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] && ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL])
- same_prog = false;
-
- if (!same_prog) {
- prog = lookup_shader_program(ctx,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id,
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id,
- ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id : 0,
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0,
- dual_src);
- if (!prog) {
- prog = add_shader_program(ctx,
- ctx->sub->shaders[PIPE_SHADER_VERTEX]->current,
- ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current,
- ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
- ctx->sub->shaders[PIPE_SHADER_TESS_CTRL] ? ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
- if (!prog)
- return 0;
- }
-
- ctx->sub->last_shader_idx = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
- } else
- prog = ctx->sub->prog;
- if (ctx->sub->prog != prog) {
- new_program = true;
- ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id;
- ctx->sub->prog_ids[PIPE_SHADER_FRAGMENT] = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- ctx->sub->prog_ids[PIPE_SHADER_GEOMETRY] = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_CTRL])
- ctx->sub->prog_ids[PIPE_SHADER_TESS_CTRL] = ctx->sub->shaders[PIPE_SHADER_TESS_CTRL]->current->id;
- if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- ctx->sub->prog_ids[PIPE_SHADER_TESS_EVAL] = ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->current->id;
- ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = -1;
- ctx->sub->prog = prog;
-
- /* mark all constbufs and sampler views as dirty */
- for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) {
- ctx->sub->const_bufs_dirty[stage] = ~0;
- ctx->sub->sampler_views_dirty[stage] = ~0;
- }
+ if (sub_ctx->shader_dirty || sub_ctx->swizzle_output_rgb_to_bgr)
+ new_program = vrend_select_program(sub_ctx, info);
- prog->ref_context = ctx->sub;
- }
- }
- if (!ctx->sub->prog) {
+ if (!sub_ctx->prog) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return 0;
}
- vrend_use_program(ctx, ctx->sub->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog->id);
- vrend_draw_bind_objects(ctx, new_program);
+ vrend_draw_bind_objects(sub_ctx, new_program);
- if (!ctx->sub->ve) {
+ if (!sub_ctx->ve) {
vrend_printf("illegal VE setup - skipping renderering\n");
return 0;
}
- float viewport_neg_val = ctx->sub->viewport_is_negative ? -1.0 : 1.0;
- if (ctx->sub->prog->viewport_neg_val != viewport_neg_val) {
- glUniform1f(ctx->sub->prog->vs_ws_adjust_loc, viewport_neg_val);
- ctx->sub->prog->viewport_neg_val = viewport_neg_val;
+ float viewport_neg_val = sub_ctx->viewport_is_negative ? -1.0 : 1.0;
+ if (sub_ctx->prog->viewport_neg_val != viewport_neg_val) {
+ glUniform1f(sub_ctx->prog->vs_ws_adjust_loc, viewport_neg_val);
+ sub_ctx->prog->viewport_neg_val = viewport_neg_val;
}
- if (ctx->sub->rs_state.clip_plane_enable) {
+ if (sub_ctx->rs_state.clip_plane_enable) {
for (i = 0 ; i < 8; i++) {
- glUniform4fv(ctx->sub->prog->clip_locs[i], 1, (const GLfloat *)&ctx->sub->ucp_state.ucp[i]);
+ glUniform4fv(sub_ctx->prog->clip_locs[i], 1, (const GLfloat *)&sub_ctx->ucp_state.ucp[i]);
}
}
if (has_feature(feat_gles31_vertex_attrib_binding))
- vrend_draw_bind_vertex_binding(ctx, ctx->sub->ve);
+ vrend_draw_bind_vertex_binding(ctx, sub_ctx->ve);
else
- vrend_draw_bind_vertex_legacy(ctx, ctx->sub->ve);
-
- for (i = 0 ; i < ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs; i++) {
- struct vrend_vertex_element_array *va = ctx->sub->ve;
- struct vrend_vertex_element *ve = &va->elements[i];
- int vbo_index = ve->base.vertex_buffer_index;
- if (!ctx->sub->vbo[vbo_index].buffer) {
- vrend_printf( "VBO missing vertex buffer\n");
- return 0;
- }
- }
+ vrend_draw_bind_vertex_legacy(ctx, sub_ctx->ve);
if (info->indexed) {
- struct vrend_resource *res = (struct vrend_resource *)ctx->sub->ib.buffer;
+ struct vrend_resource *res = (struct vrend_resource *)sub_ctx->ib.buffer;
if (!res) {
vrend_printf( "VBO missing indexed array buffer\n");
return 0;
@@ -4585,19 +4688,19 @@ int vrend_draw_vbo(struct vrend_context *ctx,
} else
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
- if (ctx->sub->current_so) {
- if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
- if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
- glBeginTransformFeedback(get_gs_xfb_mode(ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
- else if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL])
- glBeginTransformFeedback(get_tess_xfb_mode(ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
- ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
+ if (sub_ctx->current_so) {
+ if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
+ if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY])
+ glBeginTransformFeedback(get_gs_xfb_mode(sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
+ else if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL])
+ glBeginTransformFeedback(get_tess_xfb_mode(sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim,
+ sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode));
else
glBeginTransformFeedback(get_xfb_mode(info->mode));
- ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
- } else if (ctx->sub->current_so->xfb_state == XFB_STATE_PAUSED) {
+ sub_ctx->current_so->xfb_state = XFB_STATE_STARTED;
+ } else if (sub_ctx->current_so->xfb_state == XFB_STATE_PAUSED) {
glResumeTransformFeedback();
- ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
+ sub_ctx->current_so->xfb_state = XFB_STATE_STARTED;
}
}
@@ -4615,16 +4718,16 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (has_feature(feat_indirect_draw)) {
GLint buf = indirect_res ? indirect_res->id : 0;
- if (ctx->sub->draw_indirect_buffer != buf) {
+ if (sub_ctx->draw_indirect_buffer != buf) {
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buf);
- ctx->sub->draw_indirect_buffer = buf;
+ sub_ctx->draw_indirect_buffer = buf;
}
if (has_feature(feat_indirect_params)) {
GLint buf = indirect_params_res ? indirect_params_res->id : 0;
- if (ctx->sub->draw_indirect_params_buffer != buf) {
+ if (sub_ctx->draw_indirect_params_buffer != buf) {
glBindBuffer(GL_PARAMETER_BUFFER_ARB, buf);
- ctx->sub->draw_indirect_params_buffer = buf;
+ sub_ctx->draw_indirect_params_buffer = buf;
}
}
}
@@ -4637,9 +4740,9 @@ int vrend_draw_vbo(struct vrend_context *ctx,
* accept those blend equations.
* When we transmit the blend mode through alpha_src_factor, alpha_dst_factor is always 0.
*/
- uint32_t blend_mask_shader = ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced;
- uint32_t blend_mode = ctx->sub->blend_state.rt[0].alpha_src_factor;
- uint32_t alpha_dst_factor = ctx->sub->blend_state.rt[0].alpha_dst_factor;
+ uint32_t blend_mask_shader = sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced;
+ uint32_t blend_mode = sub_ctx->blend_state.rt[0].alpha_src_factor;
+ uint32_t alpha_dst_factor = sub_ctx->blend_state.rt[0].alpha_dst_factor;
bool use_advanced_blending = !has_feature(feat_framebuffer_fetch) &&
has_feature(feat_blend_equation_advanced) &&
blend_mask_shader != 0 &&
@@ -4674,7 +4777,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
} else {
GLenum elsz;
GLenum mode = info->mode;
- switch (ctx->sub->ib.index_size) {
+ switch (sub_ctx->ib.index_size) {
case 1:
elsz = GL_UNSIGNED_BYTE;
break;
@@ -4697,17 +4800,17 @@ int vrend_draw_vbo(struct vrend_context *ctx,
glDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset);
} else if (info->index_bias) {
if (info->instance_count > 1)
- glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count, info->index_bias);
+ glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count, info->index_bias);
else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
+ glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
else
- glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->index_bias);
+ glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
} else if (info->instance_count > 1) {
- glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset, info->instance_count);
+ glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count);
} else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
+ glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
else
- glDrawElements(mode, info->count, elsz, (void *)(unsigned long)ctx->sub->ib.offset);
+ glDrawElements(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
}
if (info->primitive_restart) {
@@ -4720,10 +4823,10 @@ int vrend_draw_vbo(struct vrend_context *ctx,
}
}
- if (ctx->sub->current_so && has_feature(feat_transform_feedback2)) {
- if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED) {
+ if (sub_ctx->current_so && has_feature(feat_transform_feedback2)) {
+ if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED) {
glPauseTransformFeedback();
- ctx->sub->current_so->xfb_state = XFB_STATE_PAUSED;
+ sub_ctx->current_so->xfb_state = XFB_STATE_PAUSED;
}
}
return 0;
@@ -4741,56 +4844,58 @@ void vrend_launch_grid(struct vrend_context *ctx,
if (!has_feature(feat_compute_shader))
return;
- if (ctx->sub->cs_shader_dirty) {
+ struct vrend_sub_context *sub_ctx = ctx->sub;
+
+ if (sub_ctx->cs_shader_dirty) {
struct vrend_linked_shader_program *prog;
bool cs_dirty;
- ctx->sub->cs_shader_dirty = false;
+ sub_ctx->cs_shader_dirty = false;
- if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]) {
+ if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return;
}
- vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
- if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current) {
+ vrend_shader_select(sub_ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
+ if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current) {
vrend_printf( "failure to compile shader variants: %s\n", ctx->debug_name);
return;
}
- if (ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_COMPUTE]) {
- prog = lookup_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id);
+ if (sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)sub_ctx->prog_ids[PIPE_SHADER_COMPUTE]) {
+ prog = lookup_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id);
if (!prog) {
- prog = add_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current);
+ prog = add_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current);
if (!prog)
return;
}
} else
- prog = ctx->sub->prog;
+ prog = sub_ctx->prog;
- if (ctx->sub->prog != prog) {
+ if (sub_ctx->prog != prog) {
new_program = true;
- ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = -1;
- ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id;
- ctx->sub->prog = prog;
- prog->ref_context = ctx->sub;
+ sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = 0;
+ sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id;
+ sub_ctx->prog = prog;
+ prog->ref_context = sub_ctx;
}
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
}
- if (!ctx->sub->prog) {
+ if (!sub_ctx->prog) {
vrend_printf("%s: Skipping compute shader execution due to missing shaders: %s\n",
__func__, ctx->debug_name);
return;
}
- vrend_use_program(ctx, ctx->sub->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog->id);
- vrend_draw_bind_ubo_shader(ctx, PIPE_SHADER_COMPUTE, 0);
- vrend_draw_bind_const_shader(ctx, PIPE_SHADER_COMPUTE, new_program);
- vrend_draw_bind_samplers_shader(ctx, PIPE_SHADER_COMPUTE, 0);
- vrend_draw_bind_images_shader(ctx, PIPE_SHADER_COMPUTE);
- vrend_draw_bind_ssbo_shader(ctx, PIPE_SHADER_COMPUTE);
- vrend_draw_bind_abo_shader(ctx);
+ vrend_draw_bind_ubo_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
+ vrend_draw_bind_const_shader(sub_ctx, PIPE_SHADER_COMPUTE, new_program);
+ vrend_draw_bind_samplers_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
+ vrend_draw_bind_images_shader(sub_ctx, PIPE_SHADER_COMPUTE);
+ vrend_draw_bind_ssbo_shader(sub_ctx, PIPE_SHADER_COMPUTE);
+ vrend_draw_bind_abo_shader(sub_ctx);
if (indirect_handle) {
indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
@@ -4943,15 +5048,15 @@ static inline bool is_const_blend(int blend_factor)
blend_factor == PIPE_BLENDFACTOR_INV_CONST_ALPHA);
}
-static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_state *state)
+static void vrend_hw_emit_blend(struct vrend_sub_context *sub_ctx, struct pipe_blend_state *state)
{
- if (state->logicop_enable != ctx->sub->hw_blend_state.logicop_enable) {
- ctx->sub->hw_blend_state.logicop_enable = state->logicop_enable;
+ if (state->logicop_enable != sub_ctx->hw_blend_state.logicop_enable) {
+ sub_ctx->hw_blend_state.logicop_enable = state->logicop_enable;
if (vrend_state.use_gles) {
if (can_emulate_logicop(state->logicop_func))
- ctx->sub->shader_dirty = true;
+ sub_ctx->shader_dirty = true;
else
- report_gles_warn(ctx, GLES_WARN_LOGIC_OP);
+ report_gles_warn(sub_ctx->parent, GLES_WARN_LOGIC_OP);
} else if (state->logicop_enable) {
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(translate_logicop(state->logicop_func));
@@ -4969,7 +5074,7 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
if (state->rt[i].blend_enable) {
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, i);
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, i);
if (dual_src && !has_feature(feat_dual_src_blend)) {
vrend_printf( "dual src blend requested but not supported for rt %d\n", i);
continue;
@@ -4985,8 +5090,8 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
} else
glDisableIndexedEXT(GL_BLEND, i);
- if (state->rt[i].colormask != ctx->sub->hw_blend_state.rt[i].colormask) {
- ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
+ if (state->rt[i].colormask != sub_ctx->hw_blend_state.rt[i].colormask) {
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
@@ -4995,7 +5100,7 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
}
} else {
if (state->rt[0].blend_enable) {
- bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
+ bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
if (dual_src && !has_feature(feat_dual_src_blend)) {
vrend_printf( "dual src blend requested but not supported for rt 0\n");
}
@@ -5010,19 +5115,19 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
else
glDisable(GL_BLEND);
- if (state->rt[0].colormask != ctx->sub->hw_blend_state.rt[0].colormask ||
- (ctx->sub->hw_blend_state.independent_blend_enable &&
+ if (state->rt[0].colormask != sub_ctx->hw_blend_state.rt[0].colormask ||
+ (sub_ctx->hw_blend_state.independent_blend_enable &&
!state->independent_blend_enable)) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
- ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
- ctx->sub->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
+ sub_ctx->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
if (has_feature(feat_multisample)) {
if (state->alpha_to_coverage)
@@ -5049,22 +5154,22 @@ static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_sta
b) patching colormask/blendcolor/blendfactors for A8/A16 format
emulation using GL_R8/GL_R16.
*/
-static void vrend_patch_blend_state(struct vrend_context *ctx)
+static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_blend_state new_state = ctx->sub->blend_state;
- struct pipe_blend_state *state = &ctx->sub->blend_state;
+ struct pipe_blend_state new_state = sub_ctx->blend_state;
+ struct pipe_blend_state *state = &sub_ctx->blend_state;
bool swizzle_blend_color = false;
- struct pipe_blend_color blend_color = ctx->sub->blend_color;
+ struct pipe_blend_color blend_color = sub_ctx->blend_color;
int i;
- if (ctx->sub->nr_cbufs == 0) {
- ctx->sub->blend_state_dirty = false;
+ if (sub_ctx->nr_cbufs == 0) {
+ sub_ctx->blend_state_dirty = false;
return;
}
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
- if (i < ctx->sub->nr_cbufs && ctx->sub->surf[i]) {
- if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format)) {
+ if (i < sub_ctx->nr_cbufs && sub_ctx->surf[i]) {
+ if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format)) {
if (state->rt[i].blend_enable) {
new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor);
@@ -5078,7 +5183,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
is_const_blend(new_state.rt[i].rgb_dst_factor)) {
swizzle_blend_color = true;
}
- } else if (!util_format_has_alpha(ctx->sub->surf[i]->format)) {
+ } else if (!util_format_has_alpha(sub_ctx->surf[i]->format)) {
if (!(is_dst_blend(state->rt[i].rgb_src_factor) ||
is_dst_blend(state->rt[i].rgb_dst_factor) ||
is_dst_blend(state->rt[i].alpha_src_factor) ||
@@ -5092,7 +5197,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
}
}
- vrend_hw_emit_blend(ctx, &new_state);
+ vrend_hw_emit_blend(sub_ctx, &new_state);
if (swizzle_blend_color) {
blend_color.color[0] = blend_color.color[3];
@@ -5106,7 +5211,7 @@ static void vrend_patch_blend_state(struct vrend_context *ctx)
blend_color.color[2],
blend_color.color[3]);
- ctx->sub->blend_state_dirty = false;
+ sub_ctx->blend_state_dirty = false;
}
void vrend_object_bind_blend(struct vrend_context *ctx,
@@ -5184,41 +5289,41 @@ void vrend_object_bind_dsa(struct vrend_context *ctx,
vrend_hw_emit_dsa(ctx);
}
-static void vrend_update_frontface_state(struct vrend_context *ctx)
+static void vrend_update_frontface_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
+ struct pipe_rasterizer_state *state = &sub_ctx->rs_state;
int front_ccw = state->front_ccw;
- front_ccw ^= (ctx->sub->inverted_fbo_content ? 0 : 1);
+ front_ccw ^= (sub_ctx->inverted_fbo_content ? 0 : 1);
if (front_ccw)
glFrontFace(GL_CCW);
else
glFrontFace(GL_CW);
}
-void vrend_update_stencil_state(struct vrend_context *ctx)
+void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx)
{
- struct pipe_depth_stencil_alpha_state *state = ctx->sub->dsa;
+ struct pipe_depth_stencil_alpha_state *state = sub_ctx->dsa;
int i;
if (!state)
return;
if (!state->stencil[1].enabled) {
if (state->stencil[0].enabled) {
- vrend_stencil_test_enable(ctx, true);
+ vrend_stencil_test_enable(sub_ctx, true);
glStencilOp(translate_stencil_op(state->stencil[0].fail_op),
translate_stencil_op(state->stencil[0].zfail_op),
translate_stencil_op(state->stencil[0].zpass_op));
glStencilFunc(GL_NEVER + state->stencil[0].func,
- ctx->sub->stencil_refs[0],
+ sub_ctx->stencil_refs[0],
state->stencil[0].valuemask);
glStencilMask(state->stencil[0].writemask);
} else
- vrend_stencil_test_enable(ctx, false);
+ vrend_stencil_test_enable(sub_ctx, false);
} else {
- vrend_stencil_test_enable(ctx, true);
+ vrend_stencil_test_enable(sub_ctx, true);
for (i = 0; i < 2; i++) {
GLenum face = (i == 1) ? GL_BACK : GL_FRONT;
@@ -5228,12 +5333,12 @@ void vrend_update_stencil_state(struct vrend_context *ctx)
translate_stencil_op(state->stencil[i].zpass_op));
glStencilFuncSeparate(face, GL_NEVER + state->stencil[i].func,
- ctx->sub->stencil_refs[i],
+ sub_ctx->stencil_refs[i],
state->stencil[i].valuemask);
glStencilMaskSeparate(face, state->stencil[i].writemask);
}
}
- ctx->sub->stencil_state_dirty = false;
+ sub_ctx->stencil_state_dirty = false;
}
static inline GLenum translate_fill(uint32_t mode)
@@ -5595,7 +5700,7 @@ static bool get_swizzled_border_color(enum virgl_formats fmt,
return false;
}
-static void vrend_apply_sampler_state(struct vrend_context *ctx,
+static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id,
@@ -5603,7 +5708,7 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
struct vrend_sampler_view *tview)
{
struct vrend_texture *tex = (struct vrend_texture *)res;
- struct vrend_sampler_state *vstate = ctx->sub->sampler_state[shader_type][id];
+ struct vrend_sampler_state *vstate = sub_ctx->sampler_state[shader_type][id];
struct pipe_sampler_state *state = &vstate->base;
bool set_all = false;
GLenum target = tex->base.target;
@@ -5635,11 +5740,11 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
border_color = state->border_color;
border_color.ui[0] = border_color.ui[3];
border_color.ui[3] = 0;
- glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, border_color.ui);
+ apply_sampler_border_color(sampler, border_color.ui);
} else {
union pipe_color_union border_color;
if (get_swizzled_border_color(tview->format, &state->border_color, &border_color))
- glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, border_color.ui);
+ apply_sampler_border_color(sampler, border_color.ui);
}
glBindSampler(sampler_id, sampler);
@@ -5668,7 +5773,7 @@ static void vrend_apply_sampler_state(struct vrend_context *ctx,
if (tex->state.lod_bias != state->lod_bias || set_all) {
if (vrend_state.use_gles) {
if (state->lod_bias)
- report_gles_warn(ctx, GLES_WARN_LOD_BIAS);
+ report_gles_warn(sub_ctx->parent, GLES_WARN_LOD_BIAS);
} else {
glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias);
}
@@ -5785,6 +5890,33 @@ static void vrend_free_fences(void)
free_fence_locked(fence);
}
+static void vrend_free_fences_for_context(struct vrend_context *ctx)
+{
+ struct vrend_fence *fence, *stor;
+
+ if (vrend_state.sync_thread) {
+ pipe_mutex_lock(vrend_state.fence_mutex);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ if (vrend_state.fence_waiting) {
+ /* mark the fence invalid as the sync thread is still waiting on it */
+ vrend_state.fence_waiting->ctx = NULL;
+ }
+ pipe_mutex_unlock(vrend_state.fence_mutex);
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
+ if (fence->ctx == ctx)
+ free_fence_locked(fence);
+ }
+ }
+}
+
static bool do_wait(struct vrend_fence *fence, bool can_block)
{
bool done = false;
@@ -5816,6 +5948,7 @@ static void wait_sync(struct vrend_fence *fence)
pipe_mutex_lock(vrend_state.fence_mutex);
list_addtail(&fence->fences, &vrend_state.fence_list);
+ vrend_state.fence_waiting = NULL;
pipe_mutex_unlock(vrend_state.fence_mutex);
if (write_eventfd(vrend_state.eventfd, 1)) {
@@ -5843,6 +5976,7 @@ static int thread_sync(UNUSED void *arg)
if (vrend_state.stop_sync_thread)
break;
list_del(&fence->fences);
+ vrend_state.fence_waiting = fence;
pipe_mutex_unlock(vrend_state.fence_mutex);
wait_sync(fence);
pipe_mutex_lock(vrend_state.fence_mutex);
@@ -5977,6 +6111,8 @@ static bool use_integer() {
return true;
const char * a = (const char *) glGetString(GL_VENDOR);
+ if (!a)
+ return false;
if (strcmp(a, "ARM") == 0)
return true;
return false;
@@ -6203,6 +6339,7 @@ void vrend_destroy_context(struct vrend_context *ctx)
bool switch_0 = (ctx == vrend_state.current_ctx);
struct vrend_context *cur = vrend_state.current_ctx;
struct vrend_sub_context *sub, *tmp;
+ struct vrend_untyped_resource *untyped_res, *untyped_res_tmp;
if (switch_0) {
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
@@ -6234,6 +6371,10 @@ void vrend_destroy_context(struct vrend_context *ctx)
if(ctx->ctx_id)
vrend_renderer_force_ctx_0();
+ vrend_free_fences_for_context(ctx);
+
+ LIST_FOR_EACH_ENTRY_SAFE(untyped_res, untyped_res_tmp, &ctx->untyped_resources, head)
+ free(untyped_res);
vrend_ctx_resource_fini_table(ctx->res_hash);
FREE(ctx);
@@ -6265,6 +6406,7 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
list_inithead(&grctx->active_nontimer_query_list);
grctx->res_hash = vrend_ctx_resource_init_table();
+ list_inithead(&grctx->untyped_resources);
grctx->shader_cfg.use_gles = vrend_state.use_gles;
grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
@@ -6282,10 +6424,13 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
vrender_get_glsl_version(&grctx->shader_cfg.glsl_version);
+ if (!grctx->ctx_id)
+ grctx->fence_retire = vrend_clicbs->ctx0_fence_retire;
+
return grctx;
}
-static int check_resource_valid(struct vrend_renderer_resource_create_args *args,
+static int check_resource_valid(const struct vrend_renderer_resource_create_args *args,
char errmsg[256])
{
/* limit the target */
@@ -6320,6 +6465,10 @@ static int check_resource_valid(struct vrend_renderer_resource_create_args *args
snprintf(errmsg, 256, "Multisample textures don't support mipmaps");
return -1;
}
+ if (!format_can_texture_storage && vrend_state.use_gles) {
+ snprintf(errmsg, 256, "Unsupported multisample texture format %d", args->format);
+ return -1;
+ }
}
if (args->last_level > 0) {
@@ -6386,8 +6535,8 @@ static int check_resource_valid(struct vrend_renderer_resource_create_args *args
}
}
- if (format_can_texture_storage && !args->width) {
- snprintf(errmsg, 256, "Texture storage texture width must be >0");
+ if (args->target != PIPE_BUFFER && !args->width) {
+ snprintf(errmsg, 256, "Texture width must be >0");
return -1;
}
@@ -6589,8 +6738,67 @@ static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint3
glBindBufferARB(gr->target, 0);
}
+static int
+vrend_resource_alloc_buffer(struct vrend_resource *gr, uint32_t flags)
+{
+ const uint32_t bind = gr->base.bind;
+ const uint32_t size = gr->base.width0;
+
+ if (bind == VIRGL_BIND_CUSTOM) {
+ /* use iovec directly when attached */
+ gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
+ gr->ptr = malloc(size);
+ if (!gr->ptr)
+ return -ENOMEM;
+ } else if (bind == VIRGL_BIND_STAGING) {
+ /* staging buffers only use guest memory -- nothing to do. */
+ } else if (bind == VIRGL_BIND_INDEX_BUFFER) {
+ gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_STREAM_OUTPUT) {
+ gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_VERTEX_BUFFER) {
+ gr->target = GL_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_CONSTANT_BUFFER) {
+ gr->target = GL_UNIFORM_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_QUERY_BUFFER) {
+ gr->target = GL_QUERY_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == VIRGL_BIND_COMMAND_ARGS) {
+ gr->target = GL_DRAW_INDIRECT_BUFFER;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind == 0 || bind == VIRGL_BIND_SHADER_BUFFER) {
+ gr->target = GL_ARRAY_BUFFER_ARB;
+ vrend_create_buffer(gr, size, flags);
+ } else if (bind & VIRGL_BIND_SAMPLER_VIEW) {
+ /*
+ * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
+ * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
+ */
+#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
+#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
+#endif
+
+ /* need to check GL version here */
+ if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
+ gr->target = GL_TEXTURE_BUFFER;
+ } else {
+ gr->target = GL_PIXEL_PACK_BUFFER_ARB;
+ }
+ vrend_create_buffer(gr, size, flags);
+ } else {
+ vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, bind);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline void
-vrend_renderer_resource_copy_args(struct vrend_renderer_resource_create_args *args,
+vrend_renderer_resource_copy_args(const struct vrend_renderer_resource_create_args *args,
struct vrend_resource *gr)
{
assert(gr);
@@ -6647,7 +6855,7 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
if (!virgl_gbm_gpu_import_required(gr->base.bind))
return;
- gr->egl_image = virgl_egl_image_from_dmabuf(egl, bo);
+ gr->egl_image = virgl_egl_image_from_gbm_bo(egl, bo);
if (!gr->egl_image) {
gr->gbm_bo = NULL;
gbm_bo_destroy(bo);
@@ -6661,19 +6869,12 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
#endif
}
-static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
- void *image_oes)
+static enum virgl_formats vrend_resource_fixup_emulated_bgra(struct vrend_resource *gr,
+ bool imported)
{
- uint level;
- GLenum internalformat, glformat, gltype;
- enum virgl_formats format = gr->base.format;
- struct vrend_texture *gt = (struct vrend_texture *)gr;
- struct pipe_resource *pr = &gr->base;
-
- if (pr->width0 == 0)
- return EINVAL;
-
- bool format_can_texture_storage = has_feature(feat_texture_storage) &&
+ const struct pipe_resource *pr = &gr->base;
+ const enum virgl_formats format = pr->format;
+ const bool format_can_texture_storage = has_feature(feat_texture_storage) &&
(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE);
/* On GLES there is no support for glTexImage*DMultisample and
@@ -6685,7 +6886,7 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
gr->base.bind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
}
- if (image_oes && !has_feature(feat_egl_image_storage))
+ if (imported && !has_feature(feat_egl_image_storage))
gr->base.bind &= ~VIRGL_BIND_PREFER_EMULATED_BGRA;
#ifdef ENABLE_MINIGBM_ALLOCATION
@@ -6694,8 +6895,19 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
gr->base.bind &= ~VIRGL_BIND_PREFER_EMULATED_BGRA;
#endif
- format = vrend_format_replace_emulated(gr->base.bind, gr->base.format);
- format_can_texture_storage = has_feature(feat_texture_storage) &&
+ return vrend_format_replace_emulated(gr->base.bind, format);
+}
+
+static int vrend_resource_alloc_texture(struct vrend_resource *gr,
+ enum virgl_formats format,
+ void *image_oes)
+{
+ uint level;
+ GLenum internalformat, glformat, gltype;
+ struct vrend_texture *gt = (struct vrend_texture *)gr;
+ struct pipe_resource *pr = &gr->base;
+
+ const bool format_can_texture_storage = has_feature(feat_texture_storage) &&
(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE);
if (format_can_texture_storage)
@@ -6855,21 +7067,21 @@ static int vrend_renderer_resource_allocate_texture(struct vrend_resource *gr,
!vrend_format_can_texture_view(gr->base.format)) {
for (int i = 0; i < gbm_bo_get_plane_count(gr->gbm_bo); i++) {
gr->aux_plane_egl_image[i] =
- virgl_egl_aux_plane_image_from_dmabuf(egl, gr->gbm_bo, i);
+ virgl_egl_aux_plane_image_from_gbm_bo(egl, gr->gbm_bo, i);
}
}
#endif
}
gt->state.max_lod = -1;
- gt->cur_swizzle_r = gt->cur_swizzle_g = gt->cur_swizzle_b = gt->cur_swizzle_a = -1;
+ gt->cur_swizzle[0] = gt->cur_swizzle[1] = gt->cur_swizzle[2] = gt->cur_swizzle[3] = -1;
gt->cur_base = -1;
gt->cur_max = 10000;
return 0;
}
-struct pipe_resource *
-vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, void *image_oes)
+static struct vrend_resource *
+vrend_resource_create(const struct vrend_renderer_resource_create_args *args)
{
struct vrend_resource *gr;
int ret;
@@ -6893,65 +7105,31 @@ vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args,
pipe_reference_init(&gr->base.reference, 1);
- if (args->target == PIPE_BUFFER) {
- if (args->bind == VIRGL_BIND_CUSTOM) {
- /* use iovec directly when attached */
- gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
- gr->ptr = malloc(args->width);
- if (!gr->ptr) {
- FREE(gr);
- return NULL;
- }
- } else if (args->bind == VIRGL_BIND_STAGING) {
- /* staging buffers only use guest memory -- nothing to do. */
- } else if (args->bind == VIRGL_BIND_INDEX_BUFFER) {
- gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_STREAM_OUTPUT) {
- gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_VERTEX_BUFFER) {
- gr->target = GL_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_CONSTANT_BUFFER) {
- gr->target = GL_UNIFORM_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_QUERY_BUFFER) {
- gr->target = GL_QUERY_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == VIRGL_BIND_COMMAND_ARGS) {
- gr->target = GL_DRAW_INDIRECT_BUFFER;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind == 0 || args->bind == VIRGL_BIND_SHADER_BUFFER) {
- gr->target = GL_ARRAY_BUFFER_ARB;
- vrend_create_buffer(gr, args->width, args->flags);
- } else if (args->bind & VIRGL_BIND_SAMPLER_VIEW) {
- /*
- * On Desktop we use GL_ARB_texture_buffer_object on GLES we use
- * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack).
- */
-#if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT
-#error "GL_TEXTURE_BUFFER enums differ, they shouldn't."
-#endif
+ return gr;
+}
- /* need to check GL version here */
- if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
- gr->target = GL_TEXTURE_BUFFER;
- } else {
- gr->target = GL_PIXEL_PACK_BUFFER_ARB;
- }
- vrend_create_buffer(gr, args->width, args->flags);
- } else {
- vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, args->bind);
- FREE(gr);
- return NULL;
- }
+struct pipe_resource *
+vrend_renderer_resource_create(const struct vrend_renderer_resource_create_args *args,
+ void *image_oes)
+{
+ struct vrend_resource *gr;
+ int ret;
+
+ gr = vrend_resource_create(args);
+ if (!gr)
+ return NULL;
+
+ if (args->target == PIPE_BUFFER) {
+ ret = vrend_resource_alloc_buffer(gr, args->flags);
} else {
- int r = vrend_renderer_resource_allocate_texture(gr, image_oes);
- if (r) {
- FREE(gr);
- return NULL;
- }
+ const enum virgl_formats format =
+ vrend_resource_fixup_emulated_bgra(gr, image_oes);
+ ret = vrend_resource_alloc_texture(gr, format, image_oes);
+ }
+
+ if (ret) {
+ FREE(gr);
+ return NULL;
}
return &gr->base;
@@ -6959,9 +7137,6 @@ vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args,
void vrend_renderer_resource_destroy(struct vrend_resource *res)
{
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
if (has_bit(res->storage_bits, VREND_STORAGE_GL_TEXTURE)) {
glDeleteTextures(1, &res->id);
} else if (has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER)) {
@@ -6976,7 +7151,7 @@ void vrend_renderer_resource_destroy(struct vrend_resource *res)
glDeleteMemoryObjectsEXT(1, &res->memobj);
}
-#ifdef ENABLE_MINIGBM_ALLOCATION
+#if HAVE_EPOXY_EGL_H
if (res->egl_image) {
virgl_egl_image_destroy(egl, res->egl_image);
for (unsigned i = 0; i < ARRAY_SIZE(res->aux_plane_egl_image); i++) {
@@ -6985,6 +7160,8 @@ void vrend_renderer_resource_destroy(struct vrend_resource *res)
}
}
}
+#endif
+#ifdef ENABLE_MINIGBM_ALLOCATION
if (res->gbm_bo)
gbm_bo_destroy(res->gbm_bo);
#endif
@@ -7274,8 +7451,8 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
{
void *data;
- if (is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) ||
- (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY) && res->iov)) {
+ if ((is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) ||
+ has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) && res->iov) {
return vrend_copy_iovec(iov, num_iovs, info->offset,
res->iov, res->num_iovs, info->box->x,
info->box->width, res->ptr);
@@ -7321,7 +7498,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
uint32_t layer_stride = info->layer_stride;
if (ctx)
- vrend_use_program(ctx, 0);
+ vrend_use_program(ctx->sub, 0);
else
glUseProgram(0);
@@ -7393,21 +7570,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if ((!vrend_state.use_core_profile) && (res->y_0_top)) {
GLuint buffers;
+ GLuint fb_id;
- if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level) {
- GLuint fb_id;
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
- vrend_fb_bind_texture(res, 0, info->level, 0);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = info->level;
- } else {
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
- }
+ glGenFramebuffers(1, &fb_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
+ vrend_fb_bind_texture(res, 0, info->level, 0);
buffers = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, &buffers);
@@ -7415,7 +7582,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if (ctx) {
vrend_depth_test_enable(ctx, false);
vrend_alpha_test_enable(ctx, false);
- vrend_stencil_test_enable(ctx, false);
+ vrend_stencil_test_enable(ctx->sub, false);
} else {
glDisable(GL_DEPTH_TEST);
glDisable(GL_ALPHA_TEST);
@@ -7425,6 +7592,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
glWindowPos2i(info->box->x, res->y_0_top ? (int)res->base.height0 - info->box->y : info->box->y);
glDrawPixels(info->box->width, info->box->height, glformat, gltype,
data);
+ glDeleteFramebuffers(1, &fb_id);
} else {
uint32_t comp_size;
GLint old_tex = 0;
@@ -7624,17 +7792,28 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
return 0;
}
-static void do_readpixels(GLint x, GLint y,
+static void do_readpixels(struct vrend_resource *res,
+ int idx, uint32_t level, uint32_t layer,
+ GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
GLsizei bufSize, void *data)
{
+ GLuint fb_id;
+
+ glGenFramebuffers(1, &fb_id);
+ glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
+
+ vrend_fb_bind_texture(res, idx, level, layer);
+
if (has_feature(feat_arb_robustness))
glReadnPixelsARB(x, y, width, height, format, type, bufSize, data);
else if (has_feature(feat_gles_khr_robustness))
glReadnPixelsKHR(x, y, width, height, format, type, bufSize, data);
else
glReadPixels(x, y, width, height, format, type, data);
+
+ glDeleteFramebuffers(1, &fb_id);
}
static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
@@ -7644,7 +7823,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
{
char *myptr = (char*)iov[0].iov_base + info->offset;
int need_temp = 0;
- GLuint fb_id;
char *data;
bool actually_invert, separate_invert = false;
GLenum format, type;
@@ -7657,7 +7835,7 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
GLint old_fbo;
if (ctx)
- vrend_use_program(ctx, 0);
+ vrend_use_program(ctx->sub, 0);
else
glUseProgram(0);
@@ -7705,22 +7883,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &old_fbo);
- if (res->readback_fb_id == 0 || (int)res->readback_fb_level != info->level ||
- (int)res->readback_fb_z != info->box->z) {
-
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
-
- vrend_fb_bind_texture(res, 0, info->level, info->box->z);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = info->level;
- res->readback_fb_z = info->box->z;
- } else
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
if (actually_invert)
y1 = h - info->box->y - info->box->height;
else
@@ -7728,8 +7890,6 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
if (has_feature(feat_mesa_invert) && actually_invert)
glPixelStorei(GL_PACK_INVERT_MESA, 1);
- if (!vrend_format_is_ds(res->base.format))
- glReadBuffer(GL_COLOR_ATTACHMENT0);
if (!need_temp && row_stride)
glPixelStorei(GL_PACK_ROW_LENGTH, row_stride);
@@ -7787,7 +7947,8 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
}
}
- do_readpixels(info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data);
+ do_readpixels(res, 0, info->level, info->box->z, info->box->x, y1,
+ info->box->width, info->box->height, format, type, send_size, data);
if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) {
if (!vrend_state.use_core_profile)
@@ -8812,7 +8973,8 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
args.array_size = src_res->base.array_size;
intermediate_copy = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
vrend_renderer_resource_copy_args(&args, intermediate_copy);
- MAYBE_UNUSED int r = vrend_renderer_resource_allocate_texture(intermediate_copy, NULL);
+ /* this is PIPE_MASK_ZS and bgra fixup is not needed */
+ MAYBE_UNUSED int r = vrend_resource_alloc_texture(intermediate_copy, args.format, NULL);
assert(!r);
glGenFramebuffers(1, &intermediate_fbo);
@@ -9003,16 +9165,32 @@ void vrend_renderer_blit(struct vrend_context *ctx,
vrend_pause_render_condition(ctx, false);
}
-int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
+void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
+ vrend_context_fence_retire retire,
+ void *retire_data)
+{
+ assert(ctx->ctx_id);
+ ctx->fence_retire = retire;
+ ctx->fence_retire_data = retire_data;
+}
+
+int vrend_renderer_create_fence(struct vrend_context *ctx,
+ uint32_t flags,
+ void *fence_cookie)
{
struct vrend_fence *fence;
+ if (!ctx)
+ return EINVAL;
+
fence = malloc(sizeof(struct vrend_fence));
if (!fence)
return ENOMEM;
- fence->ctx_id = ctx_id;
- fence->fence_id = client_fence_id;
+ fence->ctx = ctx;
+ fence->flags = flags;
+ fence->fence_cookie = fence_cookie;
+
#ifdef HAVE_EPOXY_EGL_H
if (vrend_state.use_egl_fence) {
fence->eglsyncobj = virgl_egl_fence_create(egl);
@@ -9043,18 +9221,51 @@ int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
static void vrend_renderer_check_queries(void);
+static bool need_fence_retire_signal_locked(struct vrend_fence *fence)
+{
+ struct vrend_fence *next;
+
+ /* last fence */
+ if (fence->fences.next == &vrend_state.fence_list)
+ return true;
+
+ /* next fence belongs to a different context */
+ next = LIST_ENTRY(struct vrend_fence, fence->fences.next, fences);
+ if (next->ctx != fence->ctx)
+ return true;
+
+ /* not mergeable */
+ if (!(fence->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
+ return true;
+
+ return false;
+}
+
void vrend_renderer_check_fences(void)
{
+ struct list_head retired_fences;
struct vrend_fence *fence, *stor;
- uint32_t latest_id = 0;
+
+ list_inithead(&retired_fences);
if (vrend_state.sync_thread) {
flush_eventfd(vrend_state.eventfd);
pipe_mutex_lock(vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
- if (fence->fence_id > latest_id)
- latest_id = fence->fence_id;
- free_fence_locked(fence);
+ /* vrend_free_fences_for_context might have marked the fence invalid
+ * by setting fence->ctx to NULL
+ */
+ if (!fence->ctx) {
+ free_fence_locked(fence);
+ continue;
+ }
+
+ if (need_fence_retire_signal_locked(fence)) {
+ list_del(&fence->fences);
+ list_addtail(&fence->fences, &retired_fences);
+ } else {
+ free_fence_locked(fence);
+ }
}
pipe_mutex_unlock(vrend_state.fence_mutex);
} else {
@@ -9062,8 +9273,12 @@ void vrend_renderer_check_fences(void)
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
if (do_wait(fence, /* can_block */ false)) {
- latest_id = fence->fence_id;
- free_fence_locked(fence);
+ if (need_fence_retire_signal_locked(fence)) {
+ list_del(&fence->fences);
+ list_addtail(&fence->fences, &retired_fences);
+ } else {
+ free_fence_locked(fence);
+ }
} else {
/* don't bother checking any subsequent ones */
break;
@@ -9071,12 +9286,17 @@ void vrend_renderer_check_fences(void)
}
}
- if (latest_id == 0)
+ if (LIST_IS_EMPTY(&retired_fences))
return;
vrend_renderer_check_queries();
- vrend_clicbs->write_fence(latest_id);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) {
+ struct vrend_context *ctx = fence->ctx;
+ ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data);
+
+ free_fence_locked(fence);
+ }
}
static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result)
@@ -9387,7 +9607,11 @@ void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
if (buf) memcpy(buf, &value, size); \
glUnmapBuffer(GL_QUERY_BUFFER);
-#define BUFFER_OFFSET(i) ((void *)((char *)NULL + i))
+static inline void *buffer_offset(intptr_t i)
+{
+ return (void *)i;
+}
+
void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle,
uint32_t qbo_handle,
uint32_t wait, uint32_t result_type, uint32_t offset,
@@ -9422,16 +9646,16 @@ void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle,
glBindBuffer(GL_QUERY_BUFFER, res->id);
switch ((enum pipe_query_value_type)result_type) {
case PIPE_QUERY_TYPE_I32:
- glGetQueryObjectiv(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectiv(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_U32:
- glGetQueryObjectuiv(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectuiv(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_I64:
- glGetQueryObjecti64v(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjecti64v(q->id, qtype, buffer_offset(offset));
break;
case PIPE_QUERY_TYPE_U64:
- glGetQueryObjectui64v(q->id, qtype, BUFFER_OFFSET(offset));
+ glGetQueryObjectui64v(q->id, qtype, buffer_offset(offset));
break;
}
} else {
@@ -9877,6 +10101,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
{
GLint max;
GLfloat range[2];
+ uint32_t video_memory;
/* Count this up when you add a feature flag that is used to set a CAP in
* the guest that was set unconditionally before. Then check that flag and
@@ -9990,7 +10215,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
else
caps->v2.max_vertex_attrib_stride = 2048;
- if (has_feature(feat_compute_shader)) {
+ if (has_feature(feat_compute_shader) && (vrend_state.use_gles || gl_ver >= 33)) {
glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, (GLint*)&caps->v2.max_compute_work_group_invocations);
glGetIntegerv(GL_MAX_COMPUTE_SHARED_MEMORY_SIZE, (GLint*)&caps->v2.max_compute_shared_memory_size);
glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0, (GLint*)&caps->v2.max_compute_grid_size[0]);
@@ -10177,6 +10402,22 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (has_feature(feat_blend_equation_advanced))
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_BLEND_EQUATION;
+
+#ifdef HAVE_EPOXY_EGL_H
+ if (egl)
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_UNTYPED_RESOURCE;
+#endif
+
+ video_memory = vrend_winsys_query_video_memory();
+ if (video_memory) {
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_VIDEO_MEMORY;
+ caps->v2.max_video_memory = video_memory;
+ }
+
+ if (has_feature(feat_ati_meminfo) || has_feature(feat_nvx_gpu_memory_info)) {
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_MEMINFO;
+ }
+
}
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
@@ -10279,26 +10520,7 @@ void *vrend_renderer_get_cursor_contents(struct pipe_resource *pres,
glBindTexture(res->target, res->id);
glGetnTexImageARB(res->target, 0, format, type, size, data);
} else if (vrend_state.use_gles) {
- GLuint fb_id;
-
- if (res->readback_fb_id == 0 || res->readback_fb_level != 0 || res->readback_fb_z != 0) {
-
- if (res->readback_fb_id)
- glDeleteFramebuffers(1, &res->readback_fb_id);
-
- glGenFramebuffers(1, &fb_id);
- glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
-
- vrend_fb_bind_texture(res, 0, 0, 0);
-
- res->readback_fb_id = fb_id;
- res->readback_fb_level = 0;
- res->readback_fb_z = 0;
- } else {
- glBindFramebuffer(GL_FRAMEBUFFER, res->readback_fb_id);
- }
-
- do_readpixels(0, 0, *width, *height, format, type, size, data);
+ do_readpixels(res, 0, 0, 0, 0, 0, *width, *height, format, type, size, data);
} else {
glBindTexture(res->target, res->id);
glGetTexImage(res->target, 0, format, type, data);
@@ -10355,17 +10577,52 @@ void vrend_renderer_get_rect(struct pipe_resource *pres,
}
void vrend_renderer_attach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id,
- struct pipe_resource *pres)
-{
- struct vrend_resource *res = (struct vrend_resource *)pres;
- vrend_ctx_resource_insert(ctx->res_hash, res_id, res);
+ struct virgl_resource *res)
+{
+ if (!res->pipe_resource) {
+ /* move the last untyped resource from cache to list */
+ if (unlikely(ctx->untyped_resource_cache)) {
+ struct virgl_resource *last = ctx->untyped_resource_cache;
+ struct vrend_untyped_resource *wrapper = malloc(sizeof(*wrapper));
+ if (wrapper) {
+ wrapper->resource = last;
+ list_add(&wrapper->head, &ctx->untyped_resources);
+ } else {
+ vrend_printf("dropping attached resource %d due to OOM\n", last->res_id);
+ }
+ }
+
+ ctx->untyped_resource_cache = res;
+ /* defer to vrend_renderer_pipe_resource_set_type */
+ return;
+ }
+
+ vrend_ctx_resource_insert(ctx->res_hash,
+ res->res_id,
+ (struct vrend_resource *)res->pipe_resource);
}
void vrend_renderer_detach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id)
+ struct virgl_resource *res)
{
- vrend_ctx_resource_remove(ctx->res_hash, res_id);
+ if (!res->pipe_resource) {
+ if (ctx->untyped_resource_cache == res) {
+ ctx->untyped_resource_cache = NULL;
+ } else {
+ struct vrend_untyped_resource *iter;
+ LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) {
+ if (iter->resource == res) {
+ list_del(&iter->head);
+ free(iter);
+ break;
+ }
+ }
+ }
+
+ return;
+ }
+
+ vrend_ctx_resource_remove(ctx->res_hash, res->res_id);
}
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
@@ -10438,6 +10695,7 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
+ sub->parent = ctx;
vrend_clicbs->make_current(sub->gl_context);
/* enable if vrend_renderer_init function has done it as well */
@@ -10463,7 +10721,9 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
glBindFramebuffer(GL_FRAMEBUFFER, sub->fb_id);
glGenFramebuffers(2, sub->blit_fb_ids);
- list_inithead(&sub->programs);
+ for (int i = 0; i < VREND_PROGRAM_NQUEUES; ++i)
+ list_inithead(&sub->gl_programs[i]);
+ list_inithead(&sub->cs_programs);
list_inithead(&sub->streamout_list);
sub->object_hash = vrend_object_init_ctx_table();
@@ -10571,6 +10831,7 @@ int vrend_renderer_export_query(struct pipe_resource *pres,
*/
export_query->out_num_fds = 0;
export_query->out_fourcc = 0;
+ export_query->out_modifier = DRM_FORMAT_MOD_INVALID;
if (export_query->in_export_fds)
return -EINVAL;
@@ -10578,7 +10839,7 @@ int vrend_renderer_export_query(struct pipe_resource *pres,
}
int vrend_renderer_pipe_resource_create(struct vrend_context *ctx, uint32_t blob_id,
- struct vrend_renderer_resource_create_args *args)
+ const struct vrend_renderer_resource_create_args *args)
{
struct vrend_resource *res;
res = (struct vrend_resource *)vrend_renderer_resource_create(args, NULL);
@@ -10608,16 +10869,128 @@ struct pipe_resource *vrend_get_blob_pipe(struct vrend_context *ctx, uint64_t bl
return NULL;
}
-int vrend_renderer_resource_get_map_info(struct pipe_resource *pres, uint32_t *map_info)
+int
+vrend_renderer_pipe_resource_set_type(struct vrend_context *ctx,
+ uint32_t res_id,
+ const struct vrend_renderer_resource_set_type_args *args)
{
- struct vrend_resource *res = (struct vrend_resource *)pres;
- if (!res->map_info)
- return -EINVAL;
+ struct virgl_resource *res = NULL;
+
+ /* look up the untyped resource */
+ if (ctx->untyped_resource_cache &&
+ ctx->untyped_resource_cache->res_id == res_id) {
+ res = ctx->untyped_resource_cache;
+ ctx->untyped_resource_cache = NULL;
+ } else {
+ /* cache miss */
+ struct vrend_untyped_resource *iter;
+ LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) {
+ if (iter->resource->res_id == res_id) {
+ res = iter->resource;
+ list_del(&iter->head);
+ free(iter);
+ break;
+ }
+ }
+ }
+
+ /* either a bad res_id or the resource is already typed */
+ if (!res) {
+ if (vrend_renderer_ctx_res_lookup(ctx, res_id))
+ return 0;
+
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_id);
+ return EINVAL;
+ }
+
+ /* resource is still untyped */
+ if (!res->pipe_resource) {
+#ifdef HAVE_EPOXY_EGL_H
+ const struct vrend_renderer_resource_create_args create_args = {
+ .target = PIPE_TEXTURE_2D,
+ .format = args->format,
+ .bind = args->bind,
+ .width = args->width,
+ .height = args->height,
+ .depth = 1,
+ .array_size = 1,
+ .last_level = 0,
+ .nr_samples = 0,
+ .flags = 0,
+ };
+ int plane_fds[VIRGL_GBM_MAX_PLANES];
+ struct vrend_resource *gr;
+ uint32_t virgl_format;
+ uint32_t drm_format;
+ int ret;
+
+ if (res->fd_type != VIRGL_RESOURCE_FD_DMABUF)
+ return EINVAL;
+
+ for (uint32_t i = 0; i < args->plane_count; i++)
+ plane_fds[i] = res->fd;
+
+ gr = vrend_resource_create(&create_args);
+ if (!gr)
+ return ENOMEM;
+
+ virgl_format = vrend_resource_fixup_emulated_bgra(gr, true);
+ drm_format = 0;
+ if (virgl_gbm_convert_format(&virgl_format, &drm_format)) {
+ vrend_printf("%s: unsupported format %d\n", __func__, virgl_format);
+ FREE(gr);
+ return EINVAL;
+ }
+
+ gr->egl_image = virgl_egl_image_from_dmabuf(egl,
+ args->width,
+ args->height,
+ drm_format,
+ args->modifier,
+ args->plane_count,
+ plane_fds,
+ args->plane_strides,
+ args->plane_offsets);
+ if (!gr->egl_image) {
+ vrend_printf("%s: failed to create egl image\n", __func__);
+ FREE(gr);
+ return EINVAL;
+ }
+
+ gr->storage_bits |= VREND_STORAGE_EGL_IMAGE;
+
+ ret = vrend_resource_alloc_texture(gr, virgl_format, gr->egl_image);
+ if (ret) {
+ virgl_egl_image_destroy(egl, gr->egl_image);
+ FREE(gr);
+ return ret;
+ }
+
+ /* "promote" the fd to pipe_resource */
+ close(res->fd);
+ res->fd = -1;
+ res->fd_type = VIRGL_RESOURCE_FD_INVALID;
+ res->pipe_resource = &gr->base;
+#else /* HAVE_EPOXY_EGL_H */
+ (void)args;
+ vrend_printf("%s: no EGL support \n", __func__);
+ return EINVAL;
+#endif /* HAVE_EPOXY_EGL_H */
+ }
+
+ vrend_ctx_resource_insert(ctx->res_hash,
+ res->res_id,
+ (struct vrend_resource *)res->pipe_resource);
- *map_info = res->map_info;
return 0;
}
+uint32_t vrend_renderer_resource_get_map_info(struct pipe_resource *pres)
+{
+ struct vrend_resource *res = (struct vrend_resource *)pres;
+ return res->map_info;
+}
+
int vrend_renderer_resource_map(struct pipe_resource *pres, void **map, uint64_t *out_size)
{
struct vrend_resource *res = (struct vrend_resource *)pres;
@@ -10646,7 +11019,41 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres)
return 0;
}
-int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
+int vrend_renderer_create_ctx0_fence(uint32_t fence_id)
+{
+ void *fence_cookie = (void *)(uintptr_t)fence_id;
+ return vrend_renderer_create_fence(vrend_state.ctx0,
+ VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_cookie);
+}
+
+static bool find_ctx0_fence_locked(struct list_head *fence_list,
+ void *fence_cookie,
+ bool *seen_first,
+ struct vrend_fence **fence)
+{
+ struct vrend_fence *iter;
+
+ LIST_FOR_EACH_ENTRY(iter, fence_list, fences) {
+ /* only consider ctx0 fences */
+ if (iter->ctx != vrend_state.ctx0)
+ continue;
+
+ if (iter->fence_cookie == fence_cookie) {
+ *fence = iter;
+ return true;
+ }
+
+ if (!*seen_first) {
+ if (fence_cookie < iter->fence_cookie)
+ return true;
+ *seen_first = true;
+ }
+ }
+
+ return false;
+}
+
+int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd) {
#ifdef HAVE_EPOXY_EGL_H
if (!vrend_state.use_egl_fence) {
return -EINVAL;
@@ -10655,45 +11062,61 @@ int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
if (vrend_state.sync_thread)
pipe_mutex_lock(vrend_state.fence_mutex);
+ void *fence_cookie = (void *)(uintptr_t)fence_id;
+ bool seen_first = false;
struct vrend_fence *fence = NULL;
- struct vrend_fence *iter;
- uint32_t min_fence_id = UINT_MAX;
-
- if (!LIST_IS_EMPTY(&vrend_state.fence_list)) {
- min_fence_id = LIST_ENTRY(struct vrend_fence, vrend_state.fence_list.next, fences)->fence_id;
- } else if (!LIST_IS_EMPTY(&vrend_state.fence_wait_list)) {
- min_fence_id =
- LIST_ENTRY(struct vrend_fence, vrend_state.fence_wait_list.next, fences)->fence_id;
- }
-
- if (fence_id < min_fence_id) {
- if (vrend_state.sync_thread)
- pipe_mutex_unlock(vrend_state.fence_mutex);
- return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
- }
-
- LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_list, fences) {
- if (iter->fence_id == fence_id) {
- fence = iter;
- break;
- }
- }
-
- if (!fence) {
- LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_wait_list, fences) {
- if (iter->fence_id == fence_id) {
- fence = iter;
- break;
- }
- }
+ bool found = find_ctx0_fence_locked(&vrend_state.fence_list,
+ fence_cookie,
+ &seen_first,
+ &fence);
+ if (!found) {
+ found = find_ctx0_fence_locked(&vrend_state.fence_wait_list,
+ fence_cookie,
+ &seen_first,
+ &fence);
+ /* consider signaled when no active ctx0 fence at all */
+ if (!found && !seen_first)
+ found = true;
}
if (vrend_state.sync_thread)
pipe_mutex_unlock(vrend_state.fence_mutex);
- if (fence && virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd)) {
- return 0;
+ if (found) {
+ if (fence)
+ return virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd) ? 0 : -EINVAL;
+ else
+ return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
}
#endif
return -EINVAL;
}
+
+void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle)
+{
+ struct vrend_resource *res;
+ struct virgl_memory_info *info;
+
+ res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
+
+ info = (struct virgl_memory_info *)res->iov->iov_base;
+
+ if (has_feature(feat_nvx_gpu_memory_info)) {
+ int i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &i);
+ info->total_device_memory = i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &i);
+ info->total_staging_memory = i - info->total_device_memory;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX, &i);
+ info->nr_device_memory_evictions = i;
+ glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX, &i);
+ info->device_memory_evicted = i;
+ }
+
+ if (has_feature(feat_ati_meminfo)) {
+ int i[4];
+ glGetIntegerv(GL_VBO_FREE_MEMORY_ATI, i);
+ info->avail_device_memory = i[0];
+ info->avail_staging_memory = i[2];
+ }
+}
diff --git a/src/vrend_renderer.h b/src/vrend_renderer.h
index da4ffe9c..297fc5c9 100644
--- a/src/vrend_renderer.h
+++ b/src/vrend_renderer.h
@@ -46,6 +46,7 @@ struct virgl_gl_ctx_param {
};
struct virgl_context;
+struct virgl_resource;
struct vrend_context;
/* Number of mipmap levels for which to keep the backing iov offsets.
@@ -70,11 +71,6 @@ struct vrend_resource {
GLuint id;
GLenum target;
- /* fb id if we need to readback this resource */
- GLuint readback_fb_id;
- GLuint readback_fb_level;
- GLuint readback_fb_z;
-
GLuint tbo_tex_id;/* tbos have two ids to track */
bool y_0_top;
@@ -111,8 +107,11 @@ struct vrend_format_table {
uint32_t flags;
};
+typedef void (*vrend_context_fence_retire)(void *fence_cookie,
+ void *retire_data);
+
struct vrend_if_cbs {
- void (*write_fence)(unsigned fence_id);
+ vrend_context_fence_retire ctx0_fence_retire;
virgl_gl_context (*create_gl_context)(int scanout, struct virgl_gl_ctx_param *params);
void (*destroy_gl_context)(virgl_gl_context ctx);
@@ -185,8 +184,22 @@ struct vrend_renderer_resource_create_args {
uint32_t flags;
};
+/* set the type info of an untyped blob resource */
+struct vrend_renderer_resource_set_type_args {
+ uint32_t format;
+ uint32_t bind;
+ uint32_t width;
+ uint32_t height;
+ uint32_t usage;
+ uint64_t modifier;
+ uint32_t plane_count;
+ uint32_t plane_strides[VIRGL_GBM_MAX_PLANES];
+ uint32_t plane_offsets[VIRGL_GBM_MAX_PLANES];
+};
+
struct pipe_resource *
-vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, void *image_eos);
+vrend_renderer_resource_create(const struct vrend_renderer_resource_create_args *args,
+ void *image_eos);
int vrend_create_surface(struct vrend_context *ctx,
uint32_t handle,
@@ -326,7 +339,6 @@ void vrend_set_min_samples(struct vrend_context *ctx, unsigned min_samples);
void vrend_set_constants(struct vrend_context *ctx,
uint32_t shader,
- uint32_t index,
uint32_t num_constant,
const float *data);
@@ -343,11 +355,18 @@ void vrend_set_tess_state(struct vrend_context *ctx, const float tess_factors[6]
void vrend_renderer_fini(void);
-int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id);
+void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
+ vrend_context_fence_retire retire,
+ void *retire_data);
+
+int vrend_renderer_create_fence(struct vrend_context *ctx,
+ uint32_t flags,
+ void *fence_cookie);
void vrend_renderer_check_fences(void);
-int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd);
+int vrend_renderer_create_ctx0_fence(uint32_t fence_id);
+int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd);
bool vrend_hw_switch_context(struct vrend_context *ctx, bool now);
uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
@@ -405,10 +424,9 @@ void vrend_renderer_get_rect(struct pipe_resource *pres,
int x, int y, int width, int height);
void vrend_renderer_attach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id,
- struct pipe_resource *pres);
+ struct virgl_resource *res);
void vrend_renderer_detach_res_ctx(struct vrend_context *ctx,
- uint32_t res_id);
+ struct virgl_resource *res);
struct vrend_context_tweaks *vrend_get_context_tweaks(struct vrend_context *ctx);
@@ -487,14 +505,20 @@ void vrend_sync_make_current(virgl_gl_context);
int
vrend_renderer_pipe_resource_create(struct vrend_context *ctx, uint32_t blob_id,
- struct vrend_renderer_resource_create_args *args);
+ const struct vrend_renderer_resource_create_args *args);
struct pipe_resource *vrend_get_blob_pipe(struct vrend_context *ctx, uint64_t blob_id);
-int vrend_renderer_resource_get_map_info(struct pipe_resource *pres, uint32_t *map_info);
+int
+vrend_renderer_pipe_resource_set_type(struct vrend_context *ctx,
+ uint32_t res_id,
+ const struct vrend_renderer_resource_set_type_args *args);
+
+uint32_t vrend_renderer_resource_get_map_info(struct pipe_resource *pres);
int vrend_renderer_resource_map(struct pipe_resource *pres, void **map, uint64_t *out_size);
int vrend_renderer_resource_unmap(struct pipe_resource *pres);
+void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle);
#endif
diff --git a/src/vrend_winsys.c b/src/vrend_winsys.c
index 4e4bbc10..43f2e4e6 100644
--- a/src/vrend_winsys.c
+++ b/src/vrend_winsys.c
@@ -198,3 +198,12 @@ int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int
return -1;
#endif
}
+
+uint32_t vrend_winsys_query_video_memory(void)
+{
+#ifdef HAVE_EPOXY_GLX_H
+ return virgl_glx_query_video_memory(glx_info);
+#else
+ return 0;
+#endif
+} \ No newline at end of file
diff --git a/src/vrend_winsys.h b/src/vrend_winsys.h
index 24b1e5bb..5be90ea0 100644
--- a/src/vrend_winsys.h
+++ b/src/vrend_winsys.h
@@ -34,6 +34,10 @@
#include "virglrenderer.h"
+#ifndef DRM_FORMAT_MOD_INVALID
+#define DRM_FORMAT_MOD_INVALID 0x00ffffffffffffffULL
+#endif
+
struct virgl_gl_ctx_param;
#ifdef HAVE_EPOXY_EGL_H
@@ -54,4 +58,6 @@ int vrend_winsys_get_fourcc_for_texture(uint32_t tex_id, uint32_t format, int *f
int vrend_winsys_get_fd_for_texture(uint32_t tex_id, int *fd);
int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset);
+uint32_t vrend_winsys_query_video_memory(void);
+
#endif /* VREND_WINSYS_H */
diff --git a/src/vrend_winsys_egl.c b/src/vrend_winsys_egl.c
index 6a6e7f77..de116d5a 100644
--- a/src/vrend_winsys_egl.c
+++ b/src/vrend_winsys_egl.c
@@ -39,6 +39,7 @@
#include "util/u_memory.h"
#include "virglrenderer.h"
+#include "vrend_winsys.h"
#include "vrend_winsys_egl.h"
#include "virgl_hw.h"
#include "vrend_winsys_gbm.h"
@@ -405,121 +406,148 @@ bool virgl_has_egl_khr_gl_colorspace(struct virgl_egl *egl)
return has_bit(egl->extension_bits, EGL_KHR_GL_COLORSPACE);
}
+void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl,
+ uint32_t width,
+ uint32_t height,
+ uint32_t drm_format,
+ uint64_t drm_modifier,
+ uint32_t plane_count,
+ const int *plane_fds,
+ const uint32_t *plane_strides,
+ const uint32_t *plane_offsets)
+{
+ EGLint attrs[6 + VIRGL_GBM_MAX_PLANES * 10 + 1];
+ uint32_t count;
+
+ assert(VIRGL_GBM_MAX_PLANES <= 4);
+ assert(plane_count && plane_count <= VIRGL_GBM_MAX_PLANES);
+
+ count = 0;
+ attrs[count++] = EGL_WIDTH;
+ attrs[count++] = width;
+ attrs[count++] = EGL_HEIGHT;
+ attrs[count++] = height;
+ attrs[count++] = EGL_LINUX_DRM_FOURCC_EXT;
+ attrs[count++] = drm_format;
+ for (uint32_t i = 0; i < plane_count; i++) {
+ if (i < 3) {
+ attrs[count++] = EGL_DMA_BUF_PLANE0_FD_EXT + i * 3;
+ attrs[count++] = plane_fds[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE0_PITCH_EXT + i * 3;
+ attrs[count++] = plane_strides[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT + i * 3;
+ attrs[count++] = plane_offsets[i];
+ }
+
+ if (has_bit(egl->extension_bits, EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
+ if (i == 3) {
+ attrs[count++] = EGL_DMA_BUF_PLANE3_FD_EXT;
+ attrs[count++] = plane_fds[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
+ attrs[count++] = plane_strides[i];
+ attrs[count++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
+ attrs[count++] = plane_offsets[i];
+ }
+
+ if (drm_modifier != DRM_FORMAT_MOD_INVALID) {
+ attrs[count++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT + i * 2;
+ attrs[count++] = (uint32_t)drm_modifier;
+ attrs[count++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT + i * 2;
+ attrs[count++] = (uint32_t)(drm_modifier >> 32);
+ }
+ }
+ }
+ attrs[count++] = EGL_NONE;
+ assert(count <= ARRAY_SIZE(attrs));
+
+ return (void *)eglCreateImageKHR(egl->egl_display,
+ EGL_NO_CONTEXT,
+ EGL_LINUX_DMA_BUF_EXT,
+ (EGLClientBuffer)NULL,
+ attrs);
+}
+
+void virgl_egl_image_destroy(struct virgl_egl *egl, void *image)
+{
+ eglDestroyImageKHR(egl->egl_display, image);
+}
+
#ifdef ENABLE_MINIGBM_ALLOCATION
-void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo)
+void *virgl_egl_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo)
{
int ret;
- EGLImageKHR image;
+ void *image = NULL;
int fds[VIRGL_GBM_MAX_PLANES] = {-1, -1, -1, -1};
+ uint32_t strides[VIRGL_GBM_MAX_PLANES];
+ uint32_t offsets[VIRGL_GBM_MAX_PLANES];
int num_planes = gbm_bo_get_plane_count(bo);
- // When the bo has 3 planes with modifier support, it requires 37 components.
- EGLint khr_image_attrs[37] = {
- EGL_WIDTH,
- gbm_bo_get_width(bo),
- EGL_HEIGHT,
- gbm_bo_get_height(bo),
- EGL_LINUX_DRM_FOURCC_EXT,
- (int)gbm_bo_get_format(bo),
- EGL_NONE,
- };
if (num_planes < 0 || num_planes > VIRGL_GBM_MAX_PLANES)
- return (void *)EGL_NO_IMAGE_KHR;
+ return NULL;
for (int plane = 0; plane < num_planes; plane++) {
uint32_t handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
ret = virgl_gbm_export_fd(egl->gbm->device, handle, &fds[plane]);
if (ret < 0) {
vrend_printf( "failed to export plane handle\n");
- image = (void *)EGL_NO_IMAGE_KHR;
goto out_close;
}
- }
- size_t attrs_index = 6;
- for (int plane = 0; plane < num_planes; plane++) {
- khr_image_attrs[attrs_index++] = EGL_DMA_BUF_PLANE0_FD_EXT + plane * 3;
- khr_image_attrs[attrs_index++] = fds[plane];
- khr_image_attrs[attrs_index++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT + plane * 3;
- khr_image_attrs[attrs_index++] = gbm_bo_get_offset(bo, plane);
- khr_image_attrs[attrs_index++] = EGL_DMA_BUF_PLANE0_PITCH_EXT + plane * 3;
- khr_image_attrs[attrs_index++] = gbm_bo_get_stride_for_plane(bo, plane);
- if (has_bit(egl->extension_bits, EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
- const uint64_t modifier = gbm_bo_get_modifier(bo);
- khr_image_attrs[attrs_index++] =
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT + plane * 2;
- khr_image_attrs[attrs_index++] = modifier & 0xfffffffful;
- khr_image_attrs[attrs_index++] =
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT + plane * 2;
- khr_image_attrs[attrs_index++] = modifier >> 32;
- }
+ strides[plane] = gbm_bo_get_stride_for_plane(bo, plane);
+ offsets[plane] = gbm_bo_get_offset(bo, plane);
}
- khr_image_attrs[attrs_index++] = EGL_NONE;
- image = eglCreateImageKHR(egl->egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL,
- khr_image_attrs);
+ image = virgl_egl_image_from_dmabuf(egl,
+ gbm_bo_get_width(bo),
+ gbm_bo_get_height(bo),
+ gbm_bo_get_format(bo),
+ gbm_bo_get_modifier(bo),
+ num_planes,
+ fds,
+ strides,
+ offsets);
out_close:
for (int plane = 0; plane < num_planes; plane++)
close(fds[plane]);
- return (void*)image;
+ return image;
}
-void *virgl_egl_aux_plane_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo, int plane)
+void *virgl_egl_aux_plane_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo, int plane)
{
int ret;
- EGLImageKHR image = EGL_NO_IMAGE_KHR;
+ void *image = NULL;
int fd = -1;
int bytes_per_pixel = virgl_gbm_get_plane_bytes_per_pixel(bo, plane);
if (bytes_per_pixel != 1 && bytes_per_pixel != 2)
- return (void *)EGL_NO_IMAGE_KHR;
+ return NULL;
uint32_t handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
ret = drmPrimeHandleToFD(gbm_device_get_fd(egl->gbm->device), handle, DRM_CLOEXEC, &fd);
if (ret < 0) {
vrend_printf("failed to export plane handle %d\n", errno);
- return (void *)EGL_NO_IMAGE_KHR;
- }
-
- EGLint khr_image_attrs[17] = {
- EGL_WIDTH,
- virgl_gbm_get_plane_width(bo, plane),
- EGL_HEIGHT,
- virgl_gbm_get_plane_height(bo, plane),
- EGL_LINUX_DRM_FOURCC_EXT,
- (int) (bytes_per_pixel == 1 ? GBM_FORMAT_R8 : GBM_FORMAT_GR88),
- EGL_DMA_BUF_PLANE0_FD_EXT,
- fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT,
- gbm_bo_get_offset(bo, plane),
- EGL_DMA_BUF_PLANE0_PITCH_EXT,
- gbm_bo_get_stride_for_plane(bo, plane),
- };
-
- if (has_bit(egl->extension_bits, EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
- const uint64_t modifier = gbm_bo_get_modifier(bo);
- khr_image_attrs[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
- khr_image_attrs[13] = modifier & 0xfffffffful;
- khr_image_attrs[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
- khr_image_attrs[15] = modifier >> 32;
- khr_image_attrs[16] = EGL_NONE;
- } else {
- khr_image_attrs[12] = EGL_NONE;
+ return NULL;
}
- image = eglCreateImageKHR(egl->egl_display, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, khr_image_attrs);
-
+ const uint32_t format = bytes_per_pixel == 1 ? GBM_FORMAT_R8 : GBM_FORMAT_GR88;
+ const uint32_t stride = gbm_bo_get_stride_for_plane(bo, plane);
+ const uint32_t offset = gbm_bo_get_offset(bo, plane);
+ image = virgl_egl_image_from_dmabuf(egl,
+ virgl_gbm_get_plane_width(bo, plane),
+ virgl_gbm_get_plane_height(bo, plane),
+ format,
+ gbm_bo_get_modifier(bo),
+ 1,
+ &fd,
+ &stride,
+ &offset);
close(fd);
- return (void*)image;
-}
-void virgl_egl_image_destroy(struct virgl_egl *egl, void *image)
-{
- eglDestroyImageKHR(egl->egl_display, image);
+ return image;
}
-#endif
+#endif /* ENABLE_MINIGBM_ALLOCATION */
bool virgl_egl_supports_fences(struct virgl_egl *egl)
{
diff --git a/src/vrend_winsys_egl.h b/src/vrend_winsys_egl.h
index 1fb0ccbc..b4c9b21e 100644
--- a/src/vrend_winsys_egl.h
+++ b/src/vrend_winsys_egl.h
@@ -56,10 +56,20 @@ int virgl_egl_get_fd_for_texture(struct virgl_egl *egl, uint32_t tex_id, int *fd
int virgl_egl_get_fd_for_texture2(struct virgl_egl *egl, uint32_t tex_id, int *fd, int *stride,
int *offset);
-void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo);
-void *virgl_egl_aux_plane_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo, int plane);
+void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl,
+ uint32_t width,
+ uint32_t height,
+ uint32_t drm_format,
+ uint64_t drm_modifier,
+ uint32_t plane_count,
+ const int *plane_fds,
+ const uint32_t *plane_strides,
+ const uint32_t *plane_offsets);
void virgl_egl_image_destroy(struct virgl_egl *egl, void *image);
+void *virgl_egl_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo);
+void *virgl_egl_aux_plane_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo *bo, int plane);
+
bool virgl_egl_supports_fences(struct virgl_egl *egl);
EGLSyncKHR virgl_egl_fence_create(struct virgl_egl *egl);
void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence);
diff --git a/src/vrend_winsys_gbm.c b/src/vrend_winsys_gbm.c
index 578086d4..65197b4a 100644
--- a/src/vrend_winsys_gbm.c
+++ b/src/vrend_winsys_gbm.c
@@ -38,6 +38,7 @@
#include "util/u_memory.h"
#include "pipe/p_state.h"
+#include "vrend_winsys.h"
#include "vrend_winsys_gbm.h"
#include "virgl_hw.h"
#include "vrend_debug.h"
@@ -461,7 +462,7 @@ int virgl_gbm_export_query(struct gbm_bo *bo, struct virgl_renderer_export_query
query->out_num_fds = 0;
query->out_fourcc = 0;
- query->out_modifier = 0;
+ query->out_modifier = DRM_FORMAT_MOD_INVALID;
for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
query->out_fds[plane] = -1;
query->out_strides[plane] = 0;
diff --git a/src/vrend_winsys_glx.c b/src/vrend_winsys_glx.c
index 23bb9834..5b907ad6 100644
--- a/src/vrend_winsys_glx.c
+++ b/src/vrend_winsys_glx.c
@@ -102,3 +102,15 @@ int virgl_glx_make_context_current(struct virgl_glx *d, virgl_renderer_gl_contex
{
return glXMakeContextCurrent(d->display, d->pbuffer, d->pbuffer, virglctx);
}
+
+uint32_t virgl_glx_query_video_memory(struct virgl_glx *d)
+{
+ uint32_t video_memory = 0;
+ if (d) {
+ if (epoxy_has_glx_extension(d->display, DefaultScreen(d->display), "GLX_MESA_query_renderer")) {
+ glXQueryCurrentRendererIntegerMESA(GLX_RENDERER_VIDEO_MEMORY_MESA, &video_memory);
+ }
+ }
+
+ return video_memory;
+} \ No newline at end of file
diff --git a/src/vrend_winsys_glx.h b/src/vrend_winsys_glx.h
index e5cecbac..e8f7697a 100644
--- a/src/vrend_winsys_glx.h
+++ b/src/vrend_winsys_glx.h
@@ -33,5 +33,6 @@ void virgl_glx_destroy(struct virgl_glx *ve);
virgl_renderer_gl_context virgl_glx_create_context(struct virgl_glx *ve, struct virgl_gl_ctx_param *vparams);
void virgl_glx_destroy_context(struct virgl_glx *ve, virgl_renderer_gl_context virglctx);
int virgl_glx_make_context_current(struct virgl_glx *ve, virgl_renderer_gl_context virglctx);
+uint32_t virgl_glx_query_video_memory(struct virgl_glx *ve);
#endif
diff --git a/tests/meson.build b/tests/meson.build
index 66b11575..01649ebf 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -42,6 +42,7 @@ libvrtest = static_library(
tests = [
['test_virgl_init', 'test_virgl_init.c'],
+ ['test_virgl_fence', 'test_virgl_fence.c'],
['test_virgl_resource', 'test_virgl_resource.c'],
['test_virgl_transfer', 'test_virgl_transfer.c'],
['test_virgl_cmd', 'test_virgl_cmd.c'],
diff --git a/tests/test_virgl_cmd.c b/tests/test_virgl_cmd.c
index 91f25821..d845ef50 100644
--- a/tests/test_virgl_cmd.c
+++ b/tests/test_virgl_cmd.c
@@ -1048,6 +1048,8 @@ int main(void)
if (getenv("VRENDTEST_USE_EGL_SURFACELESS"))
context_flags |= VIRGL_RENDERER_USE_SURFACELESS;
+ if (getenv("VRENDTEST_USE_EGL_GLES"))
+ context_flags |= VIRGL_RENDERER_USE_GLES;
s = virgl_init_suite();
sr = srunner_create(s);
diff --git a/tests/test_virgl_fence.c b/tests/test_virgl_fence.c
new file mode 100644
index 00000000..9e498ce4
--- /dev/null
+++ b/tests/test_virgl_fence.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright 2020 Google LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/*
+ * basic library initialisation, teardown, reset
+ * and context creation tests.
+ */
+
+#include <check.h>
+#include <errno.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <virglrenderer.h>
+
+#include "testvirgl.h"
+
+START_TEST(virgl_fence_create)
+{
+ int ret;
+ ret = testvirgl_init_single_ctx();
+ ck_assert_int_eq(ret, 0);
+
+ testvirgl_reset_fence();
+ ret = virgl_renderer_create_fence(1, 0);
+ ck_assert_int_eq(ret, 0);
+
+ testvirgl_fini_single_ctx();
+}
+END_TEST
+
+START_TEST(virgl_fence_poll)
+{
+ const int target_seqno = 50;
+ int ret;
+ ret = testvirgl_init_single_ctx();
+ ck_assert_int_eq(ret, 0);
+
+ testvirgl_reset_fence();
+ ret = virgl_renderer_create_fence(target_seqno, 0);
+ ck_assert_int_eq(ret, 0);
+
+ do {
+ int seqno;
+
+ virgl_renderer_poll();
+ seqno = testvirgl_get_last_fence();
+ if (seqno == target_seqno)
+ break;
+
+ ck_assert_int_eq(seqno, 0);
+ usleep(1000);
+ } while(1);
+
+ testvirgl_fini_single_ctx();
+}
+END_TEST
+
+START_TEST(virgl_fence_poll_many)
+{
+ const int fence_count = 100;
+ const int base_seqno = 50;
+ const int target_seqno = base_seqno + fence_count - 1;
+ int last_seqno;
+ int ret;
+ int i;
+
+ ret = testvirgl_init_single_ctx();
+ ck_assert_int_eq(ret, 0);
+
+ testvirgl_reset_fence();
+ last_seqno = 0;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = virgl_renderer_create_fence(base_seqno + i, 0);
+ ck_assert_int_eq(ret, 0);
+ }
+
+ do {
+ int seqno;
+
+ virgl_renderer_poll();
+ seqno = testvirgl_get_last_fence();
+ if (seqno == target_seqno)
+ break;
+
+ ck_assert(seqno == 0 || (seqno >= base_seqno && seqno < target_seqno));
+
+ /* monotonic increasing */
+ ck_assert_int_ge(seqno, last_seqno);
+ last_seqno = seqno;
+
+ usleep(1000);
+ } while(1);
+
+ testvirgl_fini_single_ctx();
+}
+END_TEST
+
+static int
+wait_sync_fd(int fd, int timeout)
+{
+ struct pollfd pollfd = {
+ .fd = fd,
+ .events = POLLIN,
+ };
+ int ret;
+ do {
+ ret = poll(&pollfd, 1, timeout);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ if (ret < 0)
+ return -errno;
+ else if (ret > 0 && !(pollfd.revents & POLLIN))
+ return -EINVAL;
+
+ return ret ? 0 : -ETIME;
+}
+
+START_TEST(virgl_fence_export)
+{
+ const int target_seqno = 50;
+ int fd;
+ int ret;
+
+ ret = testvirgl_init_single_ctx();
+ ck_assert_int_eq(ret, 0);
+
+ testvirgl_reset_fence();
+ ret = virgl_renderer_create_fence(target_seqno, 0);
+ ck_assert_int_eq(ret, 0);
+
+ ret = virgl_renderer_export_fence(target_seqno, &fd);
+ ck_assert_int_eq(ret, 0);
+
+ ret = wait_sync_fd(fd, -1);
+ ck_assert_int_eq(ret, 0);
+
+ virgl_renderer_poll();
+ ck_assert_int_eq(testvirgl_get_last_fence(), target_seqno);
+
+ close(fd);
+
+ testvirgl_fini_single_ctx();
+}
+END_TEST
+
+START_TEST(virgl_fence_export_signaled)
+{
+ const int target_seqno = 50;
+ const int test_range = 10;
+ int fd;
+ int ret;
+ int i;
+
+ ret = testvirgl_init_single_ctx();
+ ck_assert_int_eq(ret, 0);
+
+ /* when there is no active fence, a signaled fd is always returned */
+ for (i = 0; i < test_range; i++) {
+ ret = virgl_renderer_export_fence(target_seqno + 1 + i, &fd);
+ ck_assert_int_eq(ret, 0);
+
+ ret = wait_sync_fd(fd, 0);
+ ck_assert_int_eq(ret, 0);
+
+ close(fd);
+ }
+
+ ret = virgl_renderer_create_fence(target_seqno, 0);
+ ck_assert_int_eq(ret, 0);
+
+ /* when there is any active fence, a signaled fd is returned when the
+ * requested seqno is smaller than the first active fence
+ */
+ for (i = 0; i < test_range; i++) {
+ ret = virgl_renderer_export_fence(target_seqno - 1 - i, &fd);
+ ck_assert_int_eq(ret, 0);
+
+ ret = wait_sync_fd(fd, 0);
+ ck_assert_int_eq(ret, 0);
+
+ close(fd);
+ }
+
+ testvirgl_fini_single_ctx();
+}
+END_TEST
+
+START_TEST(virgl_fence_export_invalid)
+{
+ const int target_seqno = 50;
+ const int target_seqno2 = 55;
+ int seqno;
+ int fd;
+ int ret;
+
+ ret = testvirgl_init_single_ctx();
+ ck_assert_int_eq(ret, 0);
+
+ ret = virgl_renderer_create_fence(target_seqno, 0);
+ ck_assert_int_eq(ret, 0);
+ ret = virgl_renderer_create_fence(target_seqno2, 0);
+ ck_assert_int_eq(ret, 0);
+
+ for (seqno = target_seqno; seqno <= target_seqno2 + 1; seqno++) {
+ ret = virgl_renderer_export_fence(seqno, &fd);
+ if (seqno == target_seqno || seqno == target_seqno2) {
+ ck_assert_int_eq(ret, 0);
+ close(fd);
+ } else {
+ ck_assert_int_eq(ret, -EINVAL);
+ }
+ }
+
+ testvirgl_fini_single_ctx();
+}
+END_TEST
+
+static Suite *virgl_init_suite(bool include_fence_export)
+{
+ Suite *s;
+ TCase *tc_core;
+
+ s = suite_create("virgl_fence");
+ tc_core = tcase_create("fence");
+
+ tcase_add_test(tc_core, virgl_fence_create);
+ tcase_add_test(tc_core, virgl_fence_poll);
+ tcase_add_test(tc_core, virgl_fence_poll_many);
+
+ if (include_fence_export) {
+ tcase_add_test(tc_core, virgl_fence_export);
+ tcase_add_test(tc_core, virgl_fence_export_signaled);
+ tcase_add_test(tc_core, virgl_fence_export_invalid);
+ }
+
+ suite_add_tcase(s, tc_core);
+
+ return s;
+}
+
+static bool detect_fence_export_support(void)
+{
+ int dummy_cookie;
+ struct virgl_renderer_callbacks dummy_cbs;
+ int fd;
+ int ret;
+
+ memset(&dummy_cbs, 0, sizeof(dummy_cbs));
+ dummy_cbs.version = 1;
+
+ ret = virgl_renderer_init(&dummy_cookie, context_flags, &dummy_cbs);
+ if (ret)
+ return false;
+
+ ret = virgl_renderer_export_fence(0, &fd);
+ if (ret) {
+ virgl_renderer_cleanup(&dummy_cookie);
+ return false;
+ }
+
+ close(fd);
+ virgl_renderer_cleanup(&dummy_cookie);
+ return true;
+}
+
+int main(void)
+{
+ Suite *s;
+ SRunner *sr;
+ int number_failed;
+ bool include_fence_export = false;
+
+ if (getenv("VRENDTEST_USE_EGL_SURFACELESS"))
+ context_flags |= VIRGL_RENDERER_USE_SURFACELESS;
+ if (getenv("VRENDTEST_USE_EGL_GLES")) {
+ context_flags |= VIRGL_RENDERER_USE_GLES;
+ include_fence_export = detect_fence_export_support();
+ }
+
+ s = virgl_init_suite(include_fence_export);
+ sr = srunner_create(s);
+
+ srunner_run_all(sr, CK_NORMAL);
+ number_failed = srunner_ntests_failed(sr);
+ srunner_free(sr);
+
+ return number_failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tests/test_virgl_init.c b/tests/test_virgl_init.c
index e61502c1..fd5cfd81 100644
--- a/tests/test_virgl_init.c
+++ b/tests/test_virgl_init.c
@@ -551,6 +551,8 @@ int main(void)
if (getenv("VRENDTEST_USE_EGL_SURFACELESS"))
context_flags |= VIRGL_RENDERER_USE_SURFACELESS;
+ if (getenv("VRENDTEST_USE_EGL_GLES"))
+ context_flags |= VIRGL_RENDERER_USE_GLES;
s = virgl_init_suite();
sr = srunner_create(s);
diff --git a/tests/test_virgl_resource.c b/tests/test_virgl_resource.c
index 2f81693e..7fd0698e 100644
--- a/tests/test_virgl_resource.c
+++ b/tests/test_virgl_resource.c
@@ -329,6 +329,8 @@ int main(void)
if (getenv("VRENDTEST_USE_EGL_SURFACELESS"))
context_flags |= VIRGL_RENDERER_USE_SURFACELESS;
+ if (getenv("VRENDTEST_USE_EGL_GLES"))
+ context_flags |= VIRGL_RENDERER_USE_GLES;
s = virgl_init_suite();
sr = srunner_create(s);
diff --git a/tests/test_virgl_transfer.c b/tests/test_virgl_transfer.c
index 2c8669ae..bf7f4381 100644
--- a/tests/test_virgl_transfer.c
+++ b/tests/test_virgl_transfer.c
@@ -1023,6 +1023,8 @@ int main(void)
if (getenv("VRENDTEST_USE_EGL_SURFACELESS"))
context_flags |= VIRGL_RENDERER_USE_SURFACELESS;
+ if (getenv("VRENDTEST_USE_EGL_GLES"))
+ context_flags |= VIRGL_RENDERER_USE_GLES;
s = virgl_init_suite();
sr = srunner_create(s);
diff --git a/vtest/vtest.h b/vtest/vtest.h
index d8f5a414..43f030e0 100644
--- a/vtest/vtest.h
+++ b/vtest/vtest.h
@@ -47,6 +47,7 @@ void vtest_cleanup_renderer(void);
int vtest_create_context(struct vtest_input *input, int out_fd,
uint32_t length_dw, struct vtest_context **out_ctx);
+int vtest_lazy_init_context(struct vtest_context *ctx);
void vtest_destroy_context(struct vtest_context *ctx);
void vtest_set_current_context(struct vtest_context *ctx);
@@ -77,6 +78,11 @@ int vtest_poll(void);
int vtest_ping_protocol_version(uint32_t length_dw);
int vtest_protocol_version(uint32_t length_dw);
+/* since protocol version 3 */
+int vtest_get_param(uint32_t length_dw);
+int vtest_get_capset(uint32_t length_dw);
+int vtest_context_init(uint32_t length_dw);
+
void vtest_set_max_length(uint32_t length);
#endif
diff --git a/vtest/vtest_fuzzer.c b/vtest/vtest_fuzzer.c
index 68d1b147..9fe09586 100644
--- a/vtest/vtest_fuzzer.c
+++ b/vtest/vtest_fuzzer.c
@@ -109,6 +109,9 @@ static void vtest_fuzzer_run_renderer(int out_fd, struct vtest_input *input,
if (ret >= 0) {
ret = vtest_create_context(input, out_fd, header[0], &context);
}
+ if (ret >= 0) {
+ ret = vtest_lazy_init_context(context);
+ }
if (ret < 0) {
break;
}
diff --git a/vtest/vtest_protocol.h b/vtest/vtest_protocol.h
index 68090f21..81e1fbff 100644
--- a/vtest/vtest_protocol.h
+++ b/vtest/vtest_protocol.h
@@ -64,6 +64,13 @@
#define VCMD_TRANSFER_GET2 13
#define VCMD_TRANSFER_PUT2 14
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+/* since protocol version 3 */
+#define VCMD_GET_PARAM 15
+#define VCMD_GET_CAPSET 16
+#define VCMD_CONTEXT_INIT 17
+#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
+
#define VCMD_RES_CREATE_SIZE 10
#define VCMD_RES_CREATE_RES_HANDLE 0
#define VCMD_RES_CREATE_TARGET 1
@@ -128,4 +135,20 @@
#define VCMD_PROTOCOL_VERSION_SIZE 1
#define VCMD_PROTOCOL_VERSION_VERSION 0
-#endif
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+
+#define VCMD_GET_PARAM_SIZE 1
+#define VCMD_GET_PARAM_PARAM 0
+/* resp param validity and value */
+
+#define VCMD_GET_CAPSET_SIZE 2
+#define VCMD_GET_CAPSET_ID 0
+#define VCMD_GET_CAPSET_VERSION 1
+/* resp capset validity and contents */
+
+#define VCMD_CONTEXT_INIT_SIZE 1
+#define VCMD_CONTEXT_INIT_CAPSET_ID 0
+
+#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
+
+#endif /* VTEST_PROTOCOL */
diff --git a/vtest/vtest_renderer.c b/vtest/vtest_renderer.c
index 58631c97..61e81e4b 100644
--- a/vtest/vtest_renderer.c
+++ b/vtest/vtest_renderer.c
@@ -61,7 +61,11 @@ struct vtest_context {
struct vtest_input *input;
int out_fd;
+ char *debug_name;
+
unsigned protocol_version;
+ unsigned capset_id;
+ bool context_initialized;
struct util_hash_table *resource_table;
};
@@ -278,9 +282,7 @@ void vtest_cleanup_renderer(void)
struct vtest_context *ctx, *tmp;
LIST_FOR_EACH_ENTRY_SAFE(ctx, tmp, &renderer.active_contexts, head) {
- virgl_renderer_context_destroy(ctx->ctx_id);
- util_hash_table_clear(ctx->resource_table);
- vtest_free_context(ctx, true);
+ vtest_destroy_context(ctx);
}
LIST_FOR_EACH_ENTRY_SAFE(ctx, tmp, &renderer.free_contexts, head) {
vtest_free_context(ctx, true);
@@ -323,8 +325,11 @@ static struct vtest_context *vtest_new_context(struct vtest_input *input,
ctx->input = input;
ctx->out_fd = out_fd;
+ ctx->debug_name = NULL;
/* By default we support version 0 unless VCMD_PROTOCOL_VERSION is sent */
ctx->protocol_version = 0;
+ ctx->capset_id = 0;
+ ctx->context_initialized = false;
return ctx;
}
@@ -358,26 +363,46 @@ int vtest_create_context(struct vtest_input *input, int out_fd,
vtestname = calloc(1, length + 1);
if (!vtestname) {
ret = -1;
- goto end;
+ goto err;
}
ret = ctx->input->read(ctx->input, vtestname, length);
if (ret != (int)length) {
ret = -1;
- goto end;
+ goto err;
}
- ret = virgl_renderer_context_create(ctx->ctx_id, strlen(vtestname), vtestname);
+ ctx->debug_name = vtestname;
-end:
+ list_addtail(&ctx->head, &renderer.active_contexts);
+ *out_ctx = ctx;
+
+ return 0;
+
+err:
free(vtestname);
+ vtest_free_context(ctx, false);
+ return ret;
+}
- if (ret) {
- vtest_free_context(ctx, false);
+int vtest_lazy_init_context(struct vtest_context *ctx)
+{
+ int ret;
+
+ if (ctx->context_initialized)
+ return 0;
+
+ if (ctx->capset_id) {
+ ret = virgl_renderer_context_create_with_flags(ctx->ctx_id,
+ ctx->capset_id,
+ strlen(ctx->debug_name),
+ ctx->debug_name);
} else {
- list_addtail(&ctx->head, &renderer.active_contexts);
- *out_ctx = ctx;
+ ret = virgl_renderer_context_create(ctx->ctx_id,
+ strlen(ctx->debug_name),
+ ctx->debug_name);
}
+ ctx->context_initialized = (ret == 0);
return ret;
}
@@ -389,7 +414,9 @@ void vtest_destroy_context(struct vtest_context *ctx)
}
list_del(&ctx->head);
- virgl_renderer_context_destroy(ctx->ctx_id);
+ free(ctx->debug_name);
+ if (ctx->context_initialized)
+ virgl_renderer_context_destroy(ctx->ctx_id);
util_hash_table_clear(ctx->resource_table);
vtest_free_context(ctx, false);
}
@@ -471,6 +498,111 @@ int vtest_protocol_version(UNUSED uint32_t length_dw)
return 0;
}
+int vtest_get_param(UNUSED uint32_t length_dw)
+{
+ struct vtest_context *ctx = vtest_get_current_context();
+ uint32_t get_param_buf[VCMD_GET_PARAM_SIZE];
+ uint32_t resp_buf[VTEST_HDR_SIZE + 2];
+ uint32_t param;
+ uint32_t *resp;
+ int ret;
+
+ ret = ctx->input->read(ctx->input, get_param_buf, sizeof(get_param_buf));
+ if (ret != sizeof(get_param_buf))
+ return -1;
+
+ param = get_param_buf[VCMD_GET_PARAM_PARAM];
+
+ resp_buf[VTEST_CMD_LEN] = 2;
+ resp_buf[VTEST_CMD_ID] = VCMD_GET_PARAM;
+ resp = &resp_buf[VTEST_CMD_DATA_START];
+ switch (param) {
+ default:
+ resp[0] = false;
+ resp[1] = 0;
+ break;
+ }
+
+ ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+int vtest_get_capset(UNUSED uint32_t length_dw)
+{
+ struct vtest_context *ctx = vtest_get_current_context();
+ uint32_t get_capset_buf[VCMD_GET_CAPSET_SIZE];
+ uint32_t resp_buf[VTEST_HDR_SIZE + 1];
+ uint32_t id;
+ uint32_t version;
+ uint32_t max_version;
+ uint32_t max_size;
+ void *caps;
+ int ret;
+
+ ret = ctx->input->read(ctx->input, get_capset_buf, sizeof(get_capset_buf));
+ if (ret != sizeof(get_capset_buf))
+ return -1;
+
+ id = get_capset_buf[VCMD_GET_CAPSET_ID];
+ version = get_capset_buf[VCMD_GET_CAPSET_VERSION];
+
+ virgl_renderer_get_cap_set(id, &max_version, &max_size);
+
+ /* unsupported id or version */
+ if ((!max_version && !max_size) || version > max_version) {
+ resp_buf[VTEST_CMD_LEN] = 1;
+ resp_buf[VTEST_CMD_ID] = VCMD_GET_CAPSET;
+ resp_buf[VTEST_CMD_DATA_START] = false;
+ return vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
+ }
+
+ if (max_size % 4)
+ return -EINVAL;
+
+ caps = malloc(max_size);
+ if (!caps)
+ return -ENOMEM;
+
+ virgl_renderer_fill_caps(id, version, caps);
+
+ resp_buf[VTEST_CMD_LEN] = 1 + max_size / 4;
+ resp_buf[VTEST_CMD_ID] = VCMD_GET_CAPSET;
+ resp_buf[VTEST_CMD_DATA_START] = true;
+ ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
+ if (ret >= 0)
+ ret = vtest_block_write(ctx->out_fd, caps, max_size);
+
+ free(caps);
+ return ret >= 0 ? 0 : ret;
+}
+
+int vtest_context_init(UNUSED uint32_t length_dw)
+{
+ struct vtest_context *ctx = vtest_get_current_context();
+ uint32_t context_init_buf[VCMD_CONTEXT_INIT_SIZE];
+ uint32_t capset_id;
+ int ret;
+
+ ret = ctx->input->read(ctx->input, context_init_buf, sizeof(context_init_buf));
+ if (ret != sizeof(context_init_buf))
+ return -1;
+
+ capset_id = context_init_buf[VCMD_CONTEXT_INIT_CAPSET_ID];
+ if (!capset_id)
+ return -EINVAL;
+
+ if (ctx->context_initialized) {
+ return ctx->capset_id == capset_id ? 0 : -EINVAL;
+ }
+
+ ctx->capset_id = capset_id;
+
+ return vtest_lazy_init_context(ctx);
+}
+
int vtest_send_caps2(UNUSED uint32_t length_dw)
{
struct vtest_context *ctx = vtest_get_current_context();
@@ -1054,6 +1186,14 @@ int vtest_resource_busy_wait(UNUSED uint32_t length_dw)
return -1;
}
+ /* clients often send VCMD_PING_PROTOCOL_VERSION followed by
+ * VCMD_RESOURCE_BUSY_WAIT with handle 0 to figure out if
+ * VCMD_PING_PROTOCOL_VERSION is supported. We need to make a special case
+ * for that.
+ */
+ if (!ctx->context_initialized && bw_buf[VCMD_BUSY_WAIT_HANDLE])
+ return -1;
+
/* handle = bw_buf[VCMD_BUSY_WAIT_HANDLE]; unused as of now */
flags = bw_buf[VCMD_BUSY_WAIT_FLAGS];
diff --git a/vtest/vtest_server.c b/vtest/vtest_server.c
index 821577d0..835c359b 100644
--- a/vtest/vtest_server.c
+++ b/vtest/vtest_server.c
@@ -580,28 +580,39 @@ static void vtest_server_run(void)
vtest_server_close_socket();
}
-typedef int (*vtest_cmd_fptr_t)(uint32_t);
-
-static const vtest_cmd_fptr_t vtest_commands[] = {
- NULL /* CMD ids starts at 1 */,
- vtest_send_caps,
- vtest_create_resource,
- vtest_resource_unref,
- vtest_transfer_get,
- vtest_transfer_put,
- vtest_submit_cmd,
- vtest_resource_busy_wait,
- NULL, /* VCMD_CREATE_RENDERER is a specific case */
- vtest_send_caps2,
- vtest_ping_protocol_version,
- vtest_protocol_version,
- vtest_create_resource2,
- vtest_transfer_get2,
- vtest_transfer_put2,
+static const struct vtest_command {
+ int (*dispatch)(uint32_t);
+ bool init_context;
+} vtest_commands[] = {
+ /* CMD ids starts at 1 */
+ [0] = { NULL, false },
+ [VCMD_GET_CAPS] = { vtest_send_caps, false },
+ [VCMD_RESOURCE_CREATE] = { vtest_create_resource, true },
+ [VCMD_RESOURCE_UNREF] = { vtest_resource_unref, true },
+ [VCMD_TRANSFER_GET] = { vtest_transfer_get, true },
+ [VCMD_TRANSFER_PUT] = { vtest_transfer_put, true },
+ [VCMD_SUBMIT_CMD] = { vtest_submit_cmd, true },
+ [VCMD_RESOURCE_BUSY_WAIT] = { vtest_resource_busy_wait, false },
+ /* VCMD_CREATE_RENDERER is a special case */
+ [VCMD_CREATE_RENDERER] = { NULL, false },
+ [VCMD_GET_CAPS2] = { vtest_send_caps2, false },
+ [VCMD_PING_PROTOCOL_VERSION] = { vtest_ping_protocol_version, false },
+ [VCMD_PROTOCOL_VERSION] = { vtest_protocol_version, false },
+
+ /* since protocol version 2 */
+ [VCMD_RESOURCE_CREATE2] = { vtest_create_resource2, true },
+ [VCMD_TRANSFER_GET2] = { vtest_transfer_get2, true },
+ [VCMD_TRANSFER_PUT2] = { vtest_transfer_put2, true },
+
+ /* since protocol version 3 */
+ [VCMD_GET_PARAM] = { vtest_get_param, false },
+ [VCMD_GET_CAPSET] = { vtest_get_capset, false },
+ [VCMD_CONTEXT_INIT] = { vtest_context_init, false },
};
static int vtest_client_dispatch_commands(struct vtest_client *client)
{
+ const struct vtest_command *cmd;
int ret;
uint32_t header[VTEST_HDR_SIZE];
@@ -632,13 +643,22 @@ static int vtest_client_dispatch_commands(struct vtest_client *client)
return VTEST_CLIENT_ERROR_COMMAND_ID;
}
- if (vtest_commands[header[1]] == NULL) {
+ cmd = &vtest_commands[header[1]];
+ if (cmd->dispatch == NULL) {
return VTEST_CLIENT_ERROR_COMMAND_UNEXPECTED;
}
+ /* we should consider per-context dispatch table to get rid of if's */
+ if (cmd->init_context) {
+ ret = vtest_lazy_init_context(client->context);
+ if (ret) {
+ return VTEST_CLIENT_ERROR_CONTEXT_FAILED;
+ }
+ }
+
vtest_set_current_context(client->context);
- ret = vtest_commands[header[1]](header[0]);
+ ret = cmd->dispatch(header[0]);
if (ret < 0) {
return VTEST_CLIENT_ERROR_COMMAND_DISPATCH;
}