aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 05:16:44 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 05:16:44 +0000
commit633e59bba2f8ff3d9b4c77b7b4fb7140e5679405 (patch)
treef077cb6e83127e951533857ea5d6f241363b0edb
parent00d361e7138fec6beeed6a18968d1aecf40e2b86 (diff)
parent0a702ccae60f64c78e9b39c3767ecaaf51835726 (diff)
downloadvirglrenderer-android14-mainline-sdkext-release.tar.gz
Snap for 10453563 from 0a702ccae60f64c78e9b39c3767ecaaf51835726 to mainline-sdkext-releaseaml_sdk_341510000aml_sdk_341410000aml_sdk_341110080aml_sdk_341110000aml_sdk_341010000aml_sdk_340912010android14-mainline-sdkext-release
Change-Id: I9729dacb12b3822b3cd2fcea0206592c260c5d28
-rw-r--r--.editorconfig31
-rw-r--r--.gitignore1
-rw-r--r--.gitlab-ci.yml349
-rwxr-xr-x.gitlab-ci/container/debian/x86_test.sh104
-rw-r--r--.gitlab-ci/expectations/host/deqp-virgl-gl.toml51
-rw-r--r--.gitlab-ci/expectations/host/deqp-virgl-gles.toml36
-rw-r--r--.gitlab-ci/expectations/host/virgl-gl-fails.txt546
-rw-r--r--.gitlab-ci/expectations/host/virgl-gl-flakes.txt392
-rw-r--r--.gitlab-ci/expectations/host/virgl-gl-skips.txt49
-rw-r--r--.gitlab-ci/expectations/host/virgl-gles-fails.txt3173
-rw-r--r--.gitlab-ci/expectations/host/virgl-gles-flakes.txt49
-rw-r--r--.gitlab-ci/expectations/host/virgl-gles-skips.txt170
-rw-r--r--.gitlab-ci/expectations/virt/deqp-venus.toml6
-rw-r--r--.gitlab-ci/expectations/virt/deqp-virgl-gl.toml57
-rw-r--r--.gitlab-ci/expectations/virt/deqp-virgl-gles.toml57
-rw-r--r--.gitlab-ci/expectations/virt/traces-virgl.yml311
-rw-r--r--.gitlab-ci/expectations/virt/venus-fails.txt15
-rw-r--r--.gitlab-ci/expectations/virt/venus-flakes.txt8
-rw-r--r--.gitlab-ci/expectations/virt/venus-skips.txt2
-rw-r--r--.gitlab-ci/expectations/virt/virgl-gl-fails.txt543
-rw-r--r--.gitlab-ci/expectations/virt/virgl-gl-flakes.txt81
-rw-r--r--.gitlab-ci/expectations/virt/virgl-gl-skips.txt75
-rw-r--r--.gitlab-ci/expectations/virt/virgl-gles-fails.txt3225
-rw-r--r--.gitlab-ci/expectations/virt/virgl-gles-flakes.txt158
-rw-r--r--.gitlab-ci/expectations/virt/virgl-gles-skips.txt181
-rwxr-xr-x.gitlab-ci/meson/build.sh96
-rwxr-xr-x.gitlab-ci/meson/time-strace.sh27
-rwxr-xr-x.gitlab-ci/meson/time.sh17
-rw-r--r--Android.bp50
-rw-r--r--METADATA19
-rw-r--r--ci/.gitlab-ci.yml125
-rw-r--r--config.h.meson42
-rw-r--r--meson.build191
-rw-r--r--meson_options.txt39
-rw-r--r--prebuilt-intermediates/config.h83
-rw-r--r--prebuilt-intermediates/src/u_format_table.c684
l---------server/.clang-format1
-rw-r--r--server/main.c40
-rw-r--r--server/meson.build33
-rw-r--r--server/render_client.c325
-rw-r--r--server/render_client.h29
-rw-r--r--server/render_common.c26
-rw-r--r--server/render_common.h38
-rw-r--r--server/render_context.c474
-rw-r--r--server/render_context.h52
-rw-r--r--server/render_protocol.h226
-rw-r--r--server/render_server.c220
-rw-r--r--server/render_server.h37
-rw-r--r--server/render_socket.c262
-rw-r--r--server/render_socket.h55
-rw-r--r--server/render_virgl.c162
-rw-r--r--server/render_virgl.h70
-rw-r--r--server/render_worker.c426
-rw-r--r--server/render_worker.h50
-rw-r--r--src/drm/.clang-format137
-rw-r--r--src/drm/drm-uapi/msm_drm.h382
-rw-r--r--src/drm/drm_fence.c169
-rw-r--r--src/drm/drm_fence.h52
-rw-r--r--src/drm/drm_renderer.c137
-rw-r--r--src/drm/drm_renderer.h60
-rw-r--r--src/drm/drm_util.c48
-rw-r--r--src/drm/drm_util.h34
-rw-r--r--src/drm/linux/overflow.h250
-rw-r--r--src/drm/msm/msm_proto.h350
-rw-r--r--src/drm/msm/msm_renderer.c1286
-rw-r--r--src/drm/msm/msm_renderer.h25
-rw-r--r--src/drm_hw.h33
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_cache.h1
-rw-r--r--src/gallium/auxiliary/os/os_memory_debug.h92
-rw-r--r--src/gallium/auxiliary/os/os_mman.h87
-rw-r--r--src/gallium/auxiliary/os/os_thread.h312
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_build.c2
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_dump.c20
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_info.c4
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h218
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_parse.c28
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_scan.c6
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_strings.c23
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_strings.h2
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_text.c60
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_transform.c250
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_transform.h96
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.c1736
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.h1206
-rw-r--r--src/gallium/auxiliary/util/rgtc.c61
-rw-r--r--src/gallium/auxiliary/util/rgtc.h39
-rw-r--r--src/gallium/auxiliary/util/u_atomic.h349
-rw-r--r--src/gallium/auxiliary/util/u_bitmask.c328
-rw-r--r--src/gallium/auxiliary/util/u_bitmask.h117
-rw-r--r--src/gallium/auxiliary/util/u_box.h80
-rw-r--r--src/gallium/auxiliary/util/u_cpu_detect.c458
-rw-r--r--src/gallium/auxiliary/util/u_debug_describe.c20
-rw-r--r--src/gallium/auxiliary/util/u_double_list.h116
-rw-r--r--src/gallium/auxiliary/util/u_format.c1
-rw-r--r--src/gallium/auxiliary/util/u_format.csv1
-rwxr-xr-xsrc/gallium/auxiliary/util/u_format_table.py2
-rw-r--r--src/gallium/auxiliary/util/u_hash_table.c319
-rw-r--r--src/gallium/auxiliary/util/u_hash_table.h25
-rw-r--r--src/gallium/auxiliary/util/u_inlines.h401
-rw-r--r--src/gallium/auxiliary/util/u_math.c139
-rw-r--r--src/gallium/auxiliary/util/u_pack_color.h87
-rw-r--r--src/gallium/auxiliary/util/u_rect.h104
-rw-r--r--src/gallium/auxiliary/util/u_string.h232
-rw-r--r--src/gallium/auxiliary/util/u_surface.c462
-rw-r--r--src/gallium/auxiliary/util/u_surface.h111
-rw-r--r--src/gallium/include/pipe/p_context.h546
-rw-r--r--src/gallium/include/pipe/p_defines.h334
-rw-r--r--src/gallium/include/pipe/p_format.h5
-rw-r--r--src/gallium/include/pipe/p_screen.h230
-rw-r--r--src/gallium/include/pipe/p_shader_tokens.h753
-rw-r--r--src/gallium/include/pipe/p_state.h4
-rw-r--r--src/gallium/include/pipe/p_video_enums.h75
-rw-r--r--src/gallium/include/pipe/p_video_state.h67
-rw-r--r--src/gallium/meson.build47
-rw-r--r--src/mesa/compat/c11/threads.h (renamed from src/gallium/include/c11/threads.h)6
-rw-r--r--src/mesa/compat/c11/threads_posix.h (renamed from src/gallium/include/c11/threads_posix.h)74
-rw-r--r--src/mesa/compat/c11/threads_win32.h (renamed from src/gallium/include/c11/threads_win32.h)258
-rw-r--r--src/mesa/compat/c11_compat.h27
-rw-r--r--src/mesa/compat/c99_compat.h (renamed from src/gallium/include/c99_compat.h)10
-rw-r--r--src/mesa/compat/c99_math.h211
-rw-r--r--src/mesa/compat/no_extern_c.h (renamed from src/gallium/include/no_extern_c.h)2
-rw-r--r--src/mesa/meson.build34
-rw-r--r--src/mesa/pipe/p_compiler.h (renamed from src/gallium/include/pipe/p_compiler.h)85
-rw-r--r--src/mesa/pipe/p_config.h (renamed from src/gallium/include/pipe/p_config.h)92
-rw-r--r--src/mesa/util/anon_file.c166
-rw-r--r--src/mesa/util/anon_file.h33
-rw-r--r--src/mesa/util/bitscan.c (renamed from src/gallium/auxiliary/os/os_misc.c)97
-rw-r--r--src/mesa/util/bitscan.h356
-rw-r--r--src/mesa/util/compiler.h89
-rw-r--r--src/mesa/util/detect_os.h131
-rw-r--r--src/mesa/util/fast_urem_by_const.h74
-rw-r--r--src/mesa/util/futex.h120
-rw-r--r--src/mesa/util/hash_table.c906
-rw-r--r--src/mesa/util/hash_table.h197
-rw-r--r--src/mesa/util/list.h270
-rw-r--r--src/mesa/util/macros.h480
-rw-r--r--src/mesa/util/os_file.c227
-rw-r--r--src/mesa/util/os_file.h59
-rw-r--r--src/mesa/util/os_memory.h (renamed from src/gallium/auxiliary/os/os_memory.h)16
-rw-r--r--src/mesa/util/os_memory_aligned.h128
-rw-r--r--src/mesa/util/os_memory_stdc.h (renamed from src/gallium/auxiliary/os/os_memory_stdc.h)20
-rw-r--r--src/mesa/util/os_misc.c361
-rw-r--r--src/mesa/util/os_misc.h (renamed from src/gallium/auxiliary/os/os_misc.h)42
-rw-r--r--src/mesa/util/ralloc.c936
-rw-r--r--src/mesa/util/ralloc.h604
-rw-r--r--src/mesa/util/simple_mtx.h170
-rw-r--r--src/mesa/util/u_atomic.h272
-rw-r--r--src/mesa/util/u_cpu_detect.c868
-rw-r--r--src/mesa/util/u_cpu_detect.h (renamed from src/gallium/auxiliary/util/u_cpu_detect.h)71
-rw-r--r--src/mesa/util/u_debug.c (renamed from src/gallium/auxiliary/util/u_debug.c)344
-rw-r--r--src/mesa/util/u_debug.h (renamed from src/gallium/auxiliary/util/u_debug.h)227
-rw-r--r--src/mesa/util/u_endian.h (renamed from src/gallium/auxiliary/os/os_memory_aligned.h)52
-rw-r--r--src/mesa/util/u_math.c311
-rw-r--r--src/mesa/util/u_math.h (renamed from src/gallium/auxiliary/util/u_math.h)672
-rw-r--r--src/mesa/util/u_memory.h (renamed from src/gallium/auxiliary/util/u_memory.h)27
-rw-r--r--src/mesa/util/u_string.h131
-rw-r--r--src/mesa/util/u_thread.h396
-rw-r--r--src/mesa/util/xxhash.h (renamed from src/gallium/auxiliary/util/xxhash.h)74
-rw-r--r--src/meson.build63
l---------src/proxy/.clang-format1
-rw-r--r--src/proxy/proxy_client.c115
-rw-r--r--src/proxy/proxy_client.h34
-rw-r--r--src/proxy/proxy_common.c47
-rw-r--r--src/proxy/proxy_common.h46
-rw-r--r--src/proxy/proxy_context.c683
-rw-r--r--src/proxy/proxy_context.h64
-rw-r--r--src/proxy/proxy_renderer.c50
-rw-r--r--src/proxy/proxy_renderer.h66
-rw-r--r--src/proxy/proxy_server.c125
-rw-r--r--src/proxy/proxy_server.h27
-rw-r--r--src/proxy/proxy_socket.c258
-rw-r--r--src/proxy/proxy_socket.h51
-rw-r--r--src/venus/.clang-format8
-rw-r--r--src/venus/venus-protocol/vk_platform.h4
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer.h4
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_buffer.h198
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_command_buffer.h3527
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_defines.h512
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_descriptor_pool.h95
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_descriptor_set.h143
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_descriptor_set_layout.h11
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_device.h6818
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_device_memory.h255
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_dispatches.h110
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_fence.h52
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_handles.h41
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_image.h320
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_image_view.h68
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_info.h363
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_pipeline.h817
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_private_data_slot.h324
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_queue.h295
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_render_pass.h29
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_sampler.h70
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_semaphore.h186
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_shader_module.h69
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_structs.h497
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_transport.h212
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_types.h276
-rw-r--r--src/venus/venus-protocol/vn_protocol_renderer_util.h703
-rw-r--r--src/venus/venus-protocol/vulkan.h3
-rw-r--r--src/venus/venus-protocol/vulkan_core.h3524
-rw-r--r--src/venus/vkr_allocator.c283
-rw-r--r--src/venus/vkr_allocator.h75
-rw-r--r--src/venus/vkr_buffer.c90
-rw-r--r--src/venus/vkr_command_buffer.c660
-rw-r--r--src/venus/vkr_command_buffer.h8
-rw-r--r--src/venus/vkr_common.c140
-rw-r--r--src/venus/vkr_common.h65
-rw-r--r--src/venus/vkr_context.c405
-rw-r--r--src/venus/vkr_context.h84
-rw-r--r--src/venus/vkr_cs.c25
-rw-r--r--src/venus/vkr_cs.h39
-rw-r--r--src/venus/vkr_descriptor_set.c24
-rw-r--r--src/venus/vkr_descriptor_set.h8
-rw-r--r--src/venus/vkr_device.c214
-rw-r--r--src/venus/vkr_device.h28
-rw-r--r--src/venus/vkr_device_memory.c317
-rw-r--r--src/venus/vkr_device_memory.h20
-rw-r--r--src/venus/vkr_device_object.py51
-rw-r--r--src/venus/vkr_image.c126
-rw-r--r--src/venus/vkr_instance.c16
-rw-r--r--src/venus/vkr_instance.h3
-rw-r--r--src/venus/vkr_physical_device.c198
-rw-r--r--src/venus/vkr_physical_device.h11
-rw-r--r--src/venus/vkr_pipeline.c22
-rw-r--r--src/venus/vkr_query_pool.c12
-rw-r--r--src/venus/vkr_queue.c239
-rw-r--r--src/venus/vkr_queue.h12
-rw-r--r--src/venus/vkr_render_pass.c10
-rw-r--r--src/venus/vkr_renderer.c25
-rw-r--r--src/venus/vkr_renderer.h6
-rw-r--r--src/venus/vkr_ring.c53
-rw-r--r--src/venus/vkr_ring.h4
-rw-r--r--src/venus/vkr_transport.c60
-rw-r--r--src/venus_hw.h32
-rw-r--r--src/virgl_context.c5
-rw-r--r--src/virgl_context.h22
-rw-r--r--src/virgl_hw.h59
-rw-r--r--src/virgl_protocol.h87
-rw-r--r--src/virgl_resource.c69
-rw-r--r--src/virgl_resource.h45
-rw-r--r--src/virgl_util.c20
-rw-r--r--src/virgl_util.h10
-rw-r--r--src/virgl_video.c2347
-rw-r--r--src/virgl_video.h161
-rw-r--r--src/virgl_video_hw.h585
-rw-r--r--src/virglrenderer.c262
-rw-r--r--src/virglrenderer.h77
-rw-r--r--src/virglrenderer_hw.h3
-rw-r--r--src/vrend_blitter.c523
-rw-r--r--src/vrend_blitter.h139
-rw-r--r--src/vrend_debug.c12
-rw-r--r--src/vrend_debug.h19
-rw-r--r--src/vrend_decode.c293
-rw-r--r--src/vrend_formats.c187
-rw-r--r--src/vrend_object.c4
-rw-r--r--src/vrend_renderer.c3022
-rw-r--r--src/vrend_renderer.h62
-rw-r--r--src/vrend_shader.c3473
-rw-r--r--src/vrend_shader.h125
-rw-r--r--src/vrend_strbuf.h41
-rw-r--r--src/vrend_video.c771
-rw-r--r--src/vrend_video.h95
-rw-r--r--src/vrend_winsys.c46
-rw-r--r--src/vrend_winsys.h2
-rw-r--r--src/vrend_winsys_egl.c120
-rw-r--r--src/vrend_winsys_egl.h5
-rw-r--r--src/vrend_winsys_gbm.c4
-rw-r--r--src/vrend_winsys_glx.c4
-rw-r--r--tests/fuzzer/meson.build8
-rw-r--r--tests/fuzzer/virgl_drm_fuzzer.c (renamed from tests/fuzzer/virgl_venus_fuzzer.c)36
-rw-r--r--tests/fuzzer/virgl_fuzzer.c9
-rw-r--r--tests/meson.build39
-rw-r--r--tests/test_fuzzer_formats.c58
-rw-r--r--tests/test_virgl_cmd.c11
-rw-r--r--tests/test_virgl_strbuf.c18
-rw-r--r--tests/test_virgl_transfer.c53
-rw-r--r--tests/testvirgl_encode.c12
-rw-r--r--tests/testvirgl_encode.h1
-rw-r--r--virglrenderer.pc.in10
-rw-r--r--vtest/vtest_fuzzer.c2
-rw-r--r--vtest/vtest_protocol.h13
-rw-r--r--vtest/vtest_renderer.c138
-rw-r--r--vtest/vtest_server.c21
285 files changed, 57343 insertions, 16228 deletions
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000..5ceb0aee
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,31 @@
+# To use this config on you editor, follow the instructions at:
+# http://editorconfig.org
+
+root = true
+
+[*]
+charset = utf-8
+insert_final_newline = true
+tab_width = 3
+
+[*.{c,h,cpp,hpp,cc,hh}]
+indent_style = space
+indent_size = 3
+max_line_length = 90
+
+[{*.py,SCons*}]
+indent_style = space
+indent_size = 4
+
+[*.yml]
+indent_style = space
+indent_size = 2
+
+[*.rst]
+indent_style = space
+indent_size = 3
+
+[{meson.build,meson_options.txt}]
+indent_style = space
+indent_size = 3
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..bee8a64b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 00000000..3955e386
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,349 @@
+variables:
+ FDO_UPSTREAM_REPO: "virgl/virglrenderer"
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ MESA_BASE_TAG: ${DEBIAN_BASE_TAG}
+ #
+ # IMPORTANT!
+ #
+ # Use the Pipeline ID corresponding to the Mesa Git rev provided below.
+ # If multiple pipeline runs are available, please choose the one having
+ # at least the 'debian-testing' job in 'Build-x86_64' stage completed.
+ #
+ # Pick a pipeline on https://gitlab.freedesktop.org/mesa/mesa/-/pipelines/
+ #
+ MESA_PIPELINE_ID: 743439
+ MESA_PROJECT_PATH: mesa/mesa
+ STORAGE_HOST: s3.freedesktop.org
+ CI_REGISTRY_IMAGE: "registry.freedesktop.org/${MESA_PROJECT_PATH}"
+ # per-pipeline artifact storage on MinIO
+ PIPELINE_ARTIFACTS_BASE: ${STORAGE_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
+ # per-job artifact storage on MinIO
+ JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
+ # reference images stored for traces
+ PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${STORAGE_HOST}/mesa-tracie-results/${FDO_UPSTREAM_REPO}"
+
+
+include:
+ - project: 'freedesktop/ci-templates'
+ ref: 79c325922670137e8f0a4dc5f6f097e0eb57c1af
+ file:
+ - '/templates/ci-fairy.yml'
+ - project: 'freedesktop/ci-templates'
+ ref: *ci-templates-commit
+ file:
+ - '/templates/debian.yml'
+ - '/templates/fedora.yml'
+ - project: 'mesa/mesa'
+ # IMPORTANT: Use a recent Mesa Git revision
+ # The commit ref must be in sync with the pipeline picked above
+ # It can be found on the pipeline page below the commit message
+ ref: 1ec172646cd7f5b8c04173a6b45a871aa48aa12e
+ file:
+ - '/.gitlab-ci/image-tags.yml'
+
+# YAML anchors for rule conditions
+# --------------------------------
+.rules-anchors:
+ rules:
+ # Scheduled pipeline
+ - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
+ when: on_success
+ # Forked project branch / pre-merge pipeline not for Marge bot
+ - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "virgl" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
+ when: manual
+ # Pipeline runs for the main branch of the upstream virglrenderer project
+ - if: &is-virglrenderer-main '$CI_PROJECT_NAMESPACE == "virgl" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
+ when: always
+ # Post-merge pipeline
+ - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "virgl" && $CI_COMMIT_BRANCH'
+ when: on_success
+ # Pre-merge pipeline for Marge Bot
+ - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+
+stages:
+ - build
+ - sanity test
+ - test
+
+.set-image:
+ variables:
+ MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_TEMPLATES_COMMIT}"
+ image: "$MESA_IMAGE"
+
+.set-image-base-tag:
+ extends:
+ - .set-image
+ variables:
+ MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
+
+debian/x86_build:
+ stage: build
+ extends:
+ - .set-image-base-tag
+ variables:
+ MESA_IMAGE_PATH: ${DEBIAN_X86_BUILD_IMAGE_PATH}
+ MESA_IMAGE_TAG: ${DEBIAN_BUILD_TAG}
+ MINIO_ARTIFACT_NAME: virgl-amd64
+ script:
+ - .gitlab-ci/meson/build.sh
+ artifacts:
+ name: "virgl_${CI_JOB_NAME}"
+ when: always
+ paths:
+ - install/
+ - results/
+ rules:
+ - if: *is-scheduled-pipeline
+ when: on_success
+ - if: *is-post-merge
+ when: never
+ - if: *is-forked-branch-or-pre-merge-not-for-marge
+ when: manual
+ - if: *is-pre-merge-for-marge
+ when: on_success
+ - when: never
+
+#
+# Sanity test jobs
+#
+
+.make_check_base:
+ stage: sanity test
+ extends: debian/x86_build
+ needs: []
+ artifacts:
+ when: always
+ paths:
+ - results/
+
+mesa check meson:
+ extends: .make_check_base
+ variables:
+ TEST_SUITE: make-check-meson
+
+make check clang-fuzzer:
+ extends: .make_check_base
+ variables:
+ TEST_SUITE: make-check-clang-fuzzer
+ CC: clang
+ EXTRA_OPTION: "-D fuzzer=true"
+
+make check trace-stderr:
+ extends: .make_check_base
+ variables:
+ TEST_SUITE: make-check-trace-stderr
+ EXTRA_OPTION: "-D tracing=stderr"
+
+make check venus:
+ extends: .make_check_base
+ variables:
+ TEST_SUITE: make-check-venus
+
+#
+# Piglit & dEQP test jobs
+#
+
+.use-gl-test-image:
+ stage: test
+ extends:
+ - .set-image-base-tag
+ before_script:
+ - echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}"
+ - unset CI_JOB_JWT
+ - export CI_JOB_NAME_SANITIZED="$(echo $CI_JOB_NAME | tr ' /' '--')"
+ variables:
+ CI_JOB_JWT_FILE: /minio_jwt
+ MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_GL_PATH}
+ MESA_IMAGE_TAG: ${DEBIAN_X86_TEST_GL_TAG}
+ script:
+ - export DEQP_RESULTS_DIR="results/${CI_JOB_NAME_SANITIZED}"
+ - export PIGLIT_RESULTS_DIR="results/${CI_JOB_NAME_SANITIZED}"
+ - .gitlab-ci/container/debian/x86_test.sh
+ artifacts:
+ name: "virgl-result"
+ when: always
+ paths:
+ - results/
+ reports:
+ junit: results/junit.xml
+ needs:
+ - job: debian/x86_build
+ artifacts: true
+ rules:
+ - if: *is-post-merge
+ when: never
+ - when: on_success
+
+.use-vk-test-image:
+ stage: test
+ extends:
+ - .set-image-base-tag
+ before_script:
+ - echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}"
+ - unset CI_JOB_JWT
+ - export CI_JOB_NAME_SANITIZED="$(echo $CI_JOB_NAME | tr ' /' '--')"
+ variables:
+ CI_JOB_JWT_FILE: /minio_jwt
+ MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_VK_PATH}
+ MESA_IMAGE_TAG: ${DEBIAN_X86_TEST_VK_TAG}
+ script:
+ - export DEQP_RESULTS_DIR="results/${CI_JOB_NAME_SANITIZED}"
+ - export PIGLIT_RESULTS_DIR="results/${CI_JOB_NAME_SANITIZED}"
+ - .gitlab-ci/container/debian/x86_test.sh
+ artifacts:
+ name: "venus-result"
+ when: always
+ paths:
+ - results/
+ reports:
+ junit: results/junit.xml
+ needs:
+ - job: debian/x86_build
+ artifacts: true
+ rules:
+ - if: *is-post-merge
+ when: never
+ - when: on_success
+
+.gl-host-test:
+ extends:
+ - .use-gl-test-image
+ variables:
+ GALLIUM_DRIVER: virpipe
+ GALLIVM_PERF: nopt
+
+.gl-virt-test:
+ extends:
+ - .use-gl-test-image
+ variables:
+ GALLIUM_DRIVER: virgl
+ CROSVM_GALLIUM_DRIVER: llvmpipe
+ GALLIVM_PERF: "nopt,no_quad_lod"
+
+.deqp-host:
+ extends:
+ - .gl-host-test
+
+.piglit-host:
+ extends:
+ - .gl-host-test
+ variables: &piglit-host-variables
+ PIGLIT_PLATFORM: surfaceless_egl
+ PIGLIT_NO_WINDOW: 1
+ PIGLIT_PROFILES: gpu
+
+.deqp-virt:
+ extends:
+ - .gl-virt-test
+ variables:
+ # There will be FDO_CI_CONCURRENT Crosvm processes, so each should use a single thread
+ LP_NUM_THREADS: 1
+
+.piglit-virt:
+ extends:
+ - .gl-virt-test
+ variables:
+ <<: *piglit-host-variables
+ # Use all threads for rendering and only run one job at a time
+ LP_NUM_THREADS: ${FDO_CI_CONCURRENT}
+ FORCE_FDO_CI_CONCURRENT: 1
+
+# Host runners (virpipe/vtest)
+
+deqp-gl-host:
+ extends:
+ - .deqp-host
+ variables:
+ DEQP_SUITE: virgl-gl
+ GPU_VERSION: virgl-gl
+
+deqp-gles-host:
+ extends:
+ - .deqp-host
+ variables:
+ VIRGL_HOST_API: GLES
+ DEQP_SUITE: virgl-gles
+ GPU_VERSION: virgl-gles
+
+piglit-gl-host:
+ extends:
+ - .piglit-host
+ variables:
+ GPU_VERSION: virgl-gl
+
+piglit-gles-host:
+ extends:
+ - .piglit-host
+ variables:
+ VIRGL_HOST_API: GLES
+ GPU_VERSION: virgl-gles
+
+# Virt runners (virgl/crosvm)
+
+deqp-gl-virt:
+ extends:
+ - .deqp-virt
+ variables:
+ DEQP_SUITE: virgl-gl
+ GPU_VERSION: virgl-gl
+ CROSVM_GPU_ARGS: &deqp-gl-crosvm-gpu-args "gles=false,backend=virglrenderer,egl=true,surfaceless=true,width=1024,height=768"
+
+deqp-gles-virt:
+ extends:
+ - .deqp-virt
+ variables:
+ VIRGL_HOST_API: GLES
+ DEQP_SUITE: virgl-gles
+ GPU_VERSION: virgl-gles
+ CROSVM_GPU_ARGS: &deqp-gles-crosvm-gpu-args "gles=true,backend=virglrenderer,egl=true,surfaceless=true,width=1024,height=768"
+
+piglit-gl-virt:
+ extends:
+ - .piglit-virt
+ parallel: 3
+ variables:
+ GPU_VERSION: virgl-gl
+ CROSVM_GPU_ARGS: *deqp-gl-crosvm-gpu-args
+
+piglit-gles-virt:
+ extends:
+ - .piglit-virt
+ parallel: 3
+ variables:
+ VIRGL_HOST_API: GLES
+ GPU_VERSION: virgl-gles
+ CROSVM_GPU_ARGS: *deqp-gles-crosvm-gpu-args
+
+virgl-traces:
+ extends:
+ - .piglit-virt
+ variables:
+ GPU_VERSION: virgl-gl
+ CROSVM_GPU_ARGS: *deqp-gl-crosvm-gpu-args
+ EGL_PLATFORM: "surfaceless"
+ PIGLIT_REPLAY_DESCRIPTION_FILE: "${CI_PROJECT_DIR}/install/traces-virgl.yml"
+ PIGLIT_REPLAY_DEVICE_NAME: "gl-virgl"
+ PIGLIT_RESULTS: "virgl-replay"
+
+.venus-lavapipe-test:
+ extends:
+ - .use-vk-test-image
+ variables:
+ VK_DRIVER: virtio
+ CROSVM_GALLIUM_DRIVER: "llvmpipe"
+ CROSVM_VK_DRIVER: "lvp"
+
+venus-lavapipe:
+ extends:
+ - .venus-lavapipe-test
+ variables:
+ DEQP_FRACTION: 15
+ DEQP_SUITE: venus
+ GPU_VERSION: venus
+ LP_NUM_THREADS: 1 # There will be FDO_CI_CONCURRENT Crosvm processes, so each should use a single thread
+ FDO_CI_CONCURRENT: 32 # Seems to be the fastest value, more gets actually slower
+ CROSVM_MEMORY: 3072
+ CROSVM_GPU_ARGS: "vulkan=true,gles=false,backend=virglrenderer,egl=true,surfaceless=true"
+ tags:
+ - mesa-swrast
diff --git a/.gitlab-ci/container/debian/x86_test.sh b/.gitlab-ci/container/debian/x86_test.sh
new file mode 100755
index 00000000..01488b8a
--- /dev/null
+++ b/.gitlab-ci/container/debian/x86_test.sh
@@ -0,0 +1,104 @@
+#!/bin/sh
+
+set -ex
+
+MESA_CI_PROJECT_DIR="/builds/${MESA_PROJECT_PATH}"
+mkdir -p ${MESA_CI_PROJECT_DIR}
+cd ${MESA_CI_PROJECT_DIR}
+
+# Deploy Mesa CI artifacts
+MESA_CI_ARTIFACTS_URL="https://${STORAGE_HOST}/artifacts/${MESA_PROJECT_PATH}/${MESA_PIPELINE_ID}/mesa-amd64.tar.zst"
+if wget -q --method=HEAD ${MESA_CI_ARTIFACTS_URL}; then
+ wget -S --progress=dot:giga -O- ${MESA_CI_ARTIFACTS_URL} | tar -xv --zstd
+else
+ echo -e "\e[31mThe Mesa artifacts has expired, please update to newer Mesa pipeline!\e[0m"
+ apt-get update && apt-get -y install jq
+ MESA_PROJECT_PATH_ESCAPED=$(echo "$MESA_PROJECT_PATH" | sed 's|/|%2F|')
+ MESA_PROJECT_ID=$(wget -cq "${CI_API_V4_URL}/projects/${MESA_PROJECT_PATH_ESCAPED}" -O - | jq -c '.id')
+ FALLBACK_PAGE=1
+ while :
+ do
+ MESA_JOB_ID=$(wget -cq "${CI_API_V4_URL}/projects/${MESA_PROJECT_ID}/pipelines/${MESA_PIPELINE_ID}/jobs?per_page=100&page=${FALLBACK_PAGE}&scope=success" -O - \
+ | jq -c '.[] | select(.name == "debian-testing") | .id')
+ if [ ! -z "${MESA_JOB_ID}" ]; then
+ break
+ fi
+ if [ $FALLBACK_PAGE -ge 10 ]; then
+ echo -e "\e[31mUnable to find the debian-testing job!\e[0m"
+ exit 1
+ fi
+ FALLBACK_PAGE=$((FALLBACK_PAGE+1))
+ done
+ MESA_CI_ARTIFACTS_URL="${CI_API_V4_URL}/projects/${MESA_PROJECT_ID}/jobs/${MESA_JOB_ID}/artifacts/artifacts/install.tar"
+ unset MESA_JOB_ID
+ wget -S --progress=dot:giga -O- ${MESA_CI_ARTIFACTS_URL} | tar -xv
+fi
+
+# Overwrite Mesa CI's virglrenderer binaries with self built versions
+cp -a ${CI_PROJECT_DIR}/install/bin/virgl_test_server /usr/local/bin/
+cp -a ${CI_PROJECT_DIR}/install/lib/libvirglrenderer.so* /usr/local/lib/
+
+if [ "${VK_DRIVER}" = "virtio" ] || [ "${GALLIUM_DRIVER}" = "virgl" ]; then
+ #
+ # Run the tests on virtual platform (virgl/crosvm)
+ #
+ cp -a ${CI_PROJECT_DIR}/.gitlab-ci/expectations/virt/*.txt install/
+ cp -a ${CI_PROJECT_DIR}/.gitlab-ci/expectations/virt/*.toml install/
+
+ #
+ # crosvm-runner.sh depends on resources from ${CI_PROJECT_DIR}/install,
+ # but their actual location is ${MESA_CI_PROJECT_DIR}/install, hence
+ # let's fix this using a bind mount.
+ #
+ mv ${CI_PROJECT_DIR}/install ${CI_PROJECT_DIR}/install-orig
+ mkdir ${CI_PROJECT_DIR}/install
+ mount --bind install ${CI_PROJECT_DIR}/install
+
+ export LD_LIBRARY_PATH="${CI_PROJECT_DIR}/install/lib"
+ set +e
+
+ if [ -z "${DEQP_SUITE}" ]; then
+ if [ -z "${PIGLIT_REPLAY_DESCRIPTION_FILE}" ]; then
+ FDO_CI_CONCURRENT=${FORCE_FDO_CI_CONCURRENT:-FDO_CI_CONCURRENT} \
+ install/crosvm-runner.sh install/piglit/piglit-runner.sh
+ else
+ FDO_CI_CONCURRENT=${FORCE_FDO_CI_CONCURRENT:-FDO_CI_CONCURRENT} \
+ install/crosvm-runner.sh install/piglit/piglit-traces.sh
+ fi
+ else
+ install/deqp-runner.sh
+ fi
+
+ RET=$?
+
+ # Cleanup
+ umount ${CI_PROJECT_DIR}/install && \
+ rmdir ${CI_PROJECT_DIR}/install && \
+ mv ${CI_PROJECT_DIR}/install-orig ${CI_PROJECT_DIR}/install
+else
+ #
+ # Run the tests on host platform (virpipe/vtest)
+ #
+ cp -a ${CI_PROJECT_DIR}/.gitlab-ci/expectations/host/*.txt install/
+ cp -a ${CI_PROJECT_DIR}/.gitlab-ci/expectations/host/*.toml install/
+
+ export LIBGL_ALWAYS_SOFTWARE="true"
+ set +e
+
+ if [ -z "${DEQP_SUITE}" ]; then
+ PIGLIT_RUNNER_OPTIONS="--timeout 180" \
+ install/piglit/piglit-runner.sh
+ else
+ DEQP_EXPECTED_RENDERER=virgl \
+ WAFFLE_PLATFORM="surfaceless_egl" \
+ SANITY_MESA_VERSION_CMD=wflinfo \
+ HANG_DETECTION_CMD= \
+ EGL_PLATFORM=surfaceless \
+ install/deqp-runner.sh
+ fi
+
+ RET=$?
+fi
+
+mv -f results ${CI_PROJECT_DIR}/
+exit ${RET}
diff --git a/.gitlab-ci/expectations/host/deqp-virgl-gl.toml b/.gitlab-ci/expectations/host/deqp-virgl-gl.toml
new file mode 100644
index 00000000..f65be42c
--- /dev/null
+++ b/.gitlab-ci/expectations/host/deqp-virgl-gl.toml
@@ -0,0 +1,51 @@
+[[deqp]]
+deqp = "/deqp/modules/gles2/deqp-gles2"
+caselists = ["/deqp/mustpass/gles2-master.txt"]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+version_check = "GL ES 3.2.*git"
+renderer_check = "virgl"
+
+[[deqp]]
+deqp = "/deqp/modules/gles3/deqp-gles3"
+caselists = ["/deqp/mustpass/gles3-master.txt"]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 180.0
+
+[[deqp]]
+deqp = "/deqp/modules/gles31/deqp-gles31"
+caselists = ["/deqp/mustpass/gles31-master.txt"]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 180.0
+
+[[deqp]]
+deqp = "/deqp/external/openglcts/modules/glcts"
+caselists = [
+ "/deqp/mustpass/gl30-master.txt",
+ "/deqp/mustpass/gl31-master.txt",
+ "/deqp/mustpass/gl32-master.txt",
+]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
diff --git a/.gitlab-ci/expectations/host/deqp-virgl-gles.toml b/.gitlab-ci/expectations/host/deqp-virgl-gles.toml
new file mode 100644
index 00000000..812f82ee
--- /dev/null
+++ b/.gitlab-ci/expectations/host/deqp-virgl-gles.toml
@@ -0,0 +1,36 @@
+[[deqp]]
+deqp = "/deqp/modules/gles2/deqp-gles2"
+caselists = ["/deqp/mustpass/gles2-master.txt"]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+version_check = "GL ES 3.2.*git"
+renderer_check = "virgl"
+
+[[deqp]]
+deqp = "/deqp/modules/gles3/deqp-gles3"
+caselists = ["/deqp/mustpass/gles3-master.txt"]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 180.0
+
+[[deqp]]
+deqp = "/deqp/modules/gles31/deqp-gles31"
+caselists = ["/deqp/mustpass/gles31-master.txt"]
+deqp_args = [
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 180.0
diff --git a/.gitlab-ci/expectations/host/virgl-gl-fails.txt b/.gitlab-ci/expectations/host/virgl-gl-fails.txt
new file mode 100644
index 00000000..3cca2064
--- /dev/null
+++ b/.gitlab-ci/expectations/host/virgl-gl-fails.txt
@@ -0,0 +1,546 @@
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES2.functional.clipping.point.wide_point_clip,Fail
+dEQP-GLES2.functional.clipping.point.wide_point_clip_viewport_center,Fail
+dEQP-GLES2.functional.clipping.point.wide_point_clip_viewport_corner,Fail
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.default_framebuffer_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.default_framebuffer_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.fbo_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.fbo_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.default_framebuffer_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.default_framebuffer_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.fbo_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.fbo_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_primitive.vertex_tessellation_fragment.default_framebuffer,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_primitive.vertex_tessellation_fragment.fbo,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_2,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES3.functional.clipping.point.wide_point_clip,Fail
+dEQP-GLES3.functional.clipping.point.wide_point_clip_viewport_center,Fail
+dEQP-GLES3.functional.clipping.point.wide_point_clip_viewport_corner,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_x,Fail
+# https://gitlab.khronos.org/Tracker/vk-gl-cts/-/issues/2892
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_mat4x2_vs,Crash
+KHR-GL30.transform_feedback.api_errors_test,Fail
+KHR-GL31.transform_feedback.capture_special_interleaved_test,Crash
+KHR-GL32.transform_feedback_overflow_query_ARB.advanced-single-stream-interleaved-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.advanced-single-stream-separate-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.basic-single-stream-interleaved-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.basic-single-stream-separate-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.multiple-streams-multiple-buffers-per-stream,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.multiple-streams-one-buffer-per-stream,Fail
+KHR-GL43.compute_shader.resource-subroutine,Fail
+KHR-GL43.map_buffer_alignment.functional,Crash
+KHR-GL43.shader_image_load_store.basic-allFormats-loadGeometryStages,Fail
+KHR-GL43.shader_image_load_store.basic-allFormats-storeGeometryStages,Fail
+KHR-GL43.shader_image_load_store.basic-allTargets-store,Fail
+KHR-GL43.shader_image_load_store.incomplete_textures,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-cs-float,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-cs-int,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-cs-uint,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-fs-float,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-fs-int,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-fs-uint,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-gs-float,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-gs-int,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-gs-uint,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-tcs-float,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-tcs-int,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-tcs-uint,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-tes-float,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-tes-int,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-tes-uint,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-vs-float,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-vs-int,Fail
+KHR-GL43.shader_image_size.advanced-nonMS-vs-uint,Fail
+KHR-GL43.shader_storage_buffer_object.advanced-indirectAddressing-case2,Fail
+KHR-GL43.shader_storage_buffer_object.advanced-usage-case1,Fail
+KHR-GL43.shader_storage_buffer_object.advanced-usage-sync,Fail
+KHR-GL43.shader_storage_buffer_object.basic-atomic-case1,Fail
+KHR-GL43.shader_storage_buffer_object.basic-atomic-case2,Fail
+KHR-GL43.shading_language_420pack.binding_images,Fail
+KHR-GL43.texture_view.view_sampling,Fail
+KHR-GL43.transform_feedback.capture_vertex_interleaved_test,Fail
+KHR-GL43.transform_feedback.capture_vertex_separate_test,Fail
+KHR-GL43.transform_feedback.discard_vertex_test,Fail
+KHR-GL43.transform_feedback.draw_xfb_instanced_test,Crash
+KHR-GL43.transform_feedback.draw_xfb_stream_instanced_test,Crash
+KHR-GL43.transform_feedback_overflow_query_ARB.advanced-single-stream-interleaved-attribs,Fail
+KHR-GL43.transform_feedback_overflow_query_ARB.advanced-single-stream-separate-attribs,Fail
+KHR-GL43.transform_feedback_overflow_query_ARB.basic-single-stream-interleaved-attribs,Fail
+KHR-GL43.transform_feedback_overflow_query_ARB.basic-single-stream-separate-attribs,Fail
+KHR-GL43.transform_feedback_overflow_query_ARB.multiple-streams-multiple-buffers-per-stream,Fail
+KHR-GL43.transform_feedback_overflow_query_ARB.multiple-streams-one-buffer-per-stream,Fail
+KHR-GL43.transform_feedback.query_vertex_interleaved_test,Fail
+KHR-GL43.transform_feedback.query_vertex_separate_test,Fail
+
+fast_color_clear@fcc-front-buffer-distraction,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges,Fail
+shaders@point-vertex-id divisor,Fail
+shaders@point-vertex-id gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_instanceid,Fail
+shaders@point-vertex-id gl_vertexid divisor,Fail
+shaders@point-vertex-id gl_vertexid,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid,Fail
+spec@arb_blend_func_extended@arb_blend_func_extended-fbo-extended-blend-pattern_gles2,Fail
+spec@arb_clear_texture@arb_clear_texture-depth,Fail
+spec@arb_copy_image@arb_copy_image-formats,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-copypixels,Fail
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-drawpixels-24_8,Fail
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-drawpixels-32f_24_8_rev,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH32F_STENCIL8- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH_COMPONENT32F- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH32F_STENCIL8- swizzled- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32F- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT16- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT24- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT32- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT16- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT24- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32- swizzled- border color only,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_1,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_2,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_3,Crash
+spec@arb_es2_compatibility@texwrap formats bordercolor,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor@GL_RGB565- border color only,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled@GL_RGB565- swizzled- border color only,Fail
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed,Crash
+spec@arb_occlusion_query@occlusion_query_conform,Fail
+spec@arb_occlusion_query@occlusion_query_conform@GetObjivAval_multi1,Fail
+spec@arb_occlusion_query@occlusion_query_meta_no_fragments,Fail
+spec@arb_occlusion_query@occlusion_query_meta_save,Fail
+spec@arb_point_sprite@arb_point_sprite-mipmap,Fail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex,Fail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex@'vs_input2[1][0]' on GL_PROGRAM_INPUT,Fail
+spec@arb_sample_shading@builtin-gl-sample-position 2,Fail
+spec@arb_shader_atomic_counter_ops@execution@add,Fail
+spec@arb_shader_image_load_store@early-z,Fail
+spec@arb_shader_image_load_store@early-z@occlusion query test/early-z pass,Fail
+spec@arb_shader_image_load_store@layer,Fail
+spec@arb_shader_image_load_store@layer@image2DMSArray/layered binding test,Fail
+spec@arb_shader_image_load_store@layer@image2DMSArray/non-layered binding test,Fail
+spec@arb_shader_image_load_store@layer@image2DMS/layered binding test,Fail
+spec@arb_shader_image_load_store@layer@image2DMS/non-layered binding test,Fail
+spec@arb_shader_image_load_store@max-images@Combined max image uniforms test,Fail
+spec@arb_shader_image_load_store@max-images,Fail
+spec@arb_shader_image_load_store@max-size,Fail
+spec@arb_shader_image_load_store@max-size@image2DMSArray max size test/4x8x8x2048,Fail
+spec@arb_shader_image_load_store@max-size@image2DMS max size test/4x16384x8x1,Fail
+spec@arb_shader_image_load_store@max-size@image2DMS max size test/4x8x16384x1,Fail
+spec@arb_shader_image_load_store@semantics,Fail
+spec@arb_shader_image_load_store@semantics@imageLoad/Vertex shader/rgba32f/image2DMSArray test,Fail
+spec@arb_shader_image_load_store@semantics@imageLoad/Vertex shader/rgba32f/image2DMS test,Fail
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomiccompswap-int,Fail
+spec@arb_shader_storage_buffer_object@layout-std140-write-shader,Fail
+spec@arb_shader_storage_buffer_object@maxblocks,Fail
+spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgrad,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_INTENSITY- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGBA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGB- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_INTENSITY- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB- swizzled- border color only,Fail
+spec@arb_texture_cube_map_array@fbo-generatemipmap-cubemap array s3tc_dxt1,Fail
+spec@arb_texture_float@fbo-blending-formats,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_RGB32F,Fail
+spec@arb_texture_float@fbo-clear-formats,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-fast-clear,Fail
+spec@arb_texture_float@multisample-fast-clear gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 2 gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 4 gl_arb_texture_float,Fail
+spec@arb_texture_float@texwrap formats bordercolor,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB32F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA32F- swizzled- border color only,Fail
+spec@arb_texture_rectangle@copyteximage rect,Fail
+spec@arb_texture_rectangle@copyteximage rect samples=2,Fail
+spec@arb_texture_rectangle@copyteximage rect samples=4,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor@GL_RGBA8- border color only,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R32F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG32F- swizzled- border color only,Fail
+spec@arb_texture_view@rendering-layers-image,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image1DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image2DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of imageCubeArray,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_0,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_2,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_any,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_single,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic,Fail
+spec@egl 1.4@eglterminate then unbind context,Fail
+spec@egl_ext_protected_content@conformance,Fail
+spec@egl_khr_gl_image@egl_khr_gl_renderbuffer_image-clear-shared-image gl_depth_component24,Fail
+spec@egl_khr_surfaceless_context@viewport,Fail
+spec@ext_framebuffer_multisample@alpha-blending-after-rendering 2,Fail
+spec@ext_framebuffer_multisample@blit-mismatched-formats,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-edges,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-edges,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth single,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 inverted,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 non-inverted,Fail
+spec@ext_framebuffer_object@fbo-blending-format-quirks,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT24/GL_FLOAT,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT/GL_FLOAT,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-clear-and-render,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-rendering,Fail
+spec@ext_packed_depth_stencil@fbo-depthstencil-gl_depth24_stencil8-copypixels,Fail
+spec@ext_packed_depth_stencil@fbo-depthstencil-gl_depth24_stencil8-drawpixels-24_8,Fail
+spec@ext_packed_depth_stencil@fbo-depthstencil-gl_depth24_stencil8-drawpixels-32f_24_8_rev,Fail
+spec@ext_packed_depth_stencil@readdrawpixels,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor@GL_DEPTH24_STENCIL8- border color only,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled@GL_DEPTH24_STENCIL8- swizzled- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor,Fail
+spec@ext_packed_float@texwrap formats bordercolor@GL_R11F_G11F_B10F- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled@GL_R11F_G11F_B10F- swizzled- border color only,Fail
+spec@ext_texture_array@fbo-generatemipmap-array s3tc_dxt1,Fail
+spec@ext_texture_array@gen-mipmap,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets 2d_array s3tc,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets cube_array s3tc,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@fbo-blending,Fail
+spec@ext_texture_integer@multisample-fast-clear gl_ext_texture_integer,Fail
+spec@ext_texture_integer@texwrap formats bordercolor,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor@GL_RGB9_E5- border color only,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled@GL_RGB9_E5- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB- swizzled- border color only,Fail
+spec@ext_transform_feedback@builtin-varyings gl_culldistance,Fail
+spec@glsl-1.10@execution@samplers@glsl-fs-shadow2d-clamp-z,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quads,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quad-strip,Fail
+spec@glsl-1.50@execution@variable-indexing@gs-input-array-float-index-rd,Fail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp,Fail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp@sRGB decode full precision,Fail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp,Fail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp@sRGB decode full precision,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp@sRGB decode full precision,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp@sRGB decode full precision,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color,Fail
+spec@nv_copy_image@nv_copy_image-formats,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@nv_read_depth@read_depth_gles3,Fail
+spec@!opengl 1.0@depth-clear-precision-check@depth32,Fail
+spec@!opengl 1.0@depth-clear-precision-check,Fail
+spec@!opengl 1.0@gl-1.0-drawbuffer-modes,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag-quads,Fail
+spec@!opengl 1.0@gl-1.0-swapbuffers-behavior,Fail
+spec@!opengl 1.0@rasterpos,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_gs_linked,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_tes_linked,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=4,Fail
+spec@!opengl 1.1@linestipple@Factor 2x,Fail
+spec@!opengl 1.1@linestipple@Factor 3x,Fail
+spec@!opengl 1.1@linestipple,Fail
+spec@!opengl 1.1@linestipple@Line loop,Fail
+spec@!opengl 1.1@linestipple@Line strip,Fail
+spec@!opengl 1.1@linestipple@Restarting lines within a single Begin-End block,Fail
+spec@!opengl 1.1@point-line-no-cull,Fail
+spec@!opengl 1.1@polygon-mode-facing,Fail
+spec@!opengl 1.1@polygon-mode,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset,Fail
+spec@!opengl 1.1@read-front clear-front-first,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=2,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=4,Crash
+spec@!opengl 1.1@read-front,Fail
+spec@!opengl 1.1@read-front samples=2,Crash
+spec@!opengl 1.1@read-front samples=4,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE6_ALPHA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_R3_G3_B2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10_A2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5_A1- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE6_ALPHA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_R3_G3_B2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10_A2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5_A1- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA8- swizzled- border color only,Fail
+spec@!opengl 1.1@windowoverlap,Fail
+spec@!opengl 1.2@copyteximage 3d,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.5@depth-tex-compare,Fail
+spec@!opengl 1.5@draw-elements-user,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag-immediate,Fail
+spec@!opengl 3.2@layered-rendering@clear-color-mismatched-layer-count,Fail
+spec@!opengl es 3.0@gles-3.0-transform-feedback-uniform-buffer-object,Fail
diff --git a/.gitlab-ci/expectations/host/virgl-gl-flakes.txt b/.gitlab-ci/expectations/host/virgl-gl-flakes.txt
new file mode 100644
index 00000000..5836e9f0
--- /dev/null
+++ b/.gitlab-ci/expectations/host/virgl-gl-flakes.txt
@@ -0,0 +1,392 @@
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8
+dEQP-GLES31.functional.ssbo.layout.random.all_shared_buffer.36
+# https://gitlab.freedesktop.org/mesa/mesa/-/issues/4651
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_bool_mat3_vs
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_bool_mat4x3_vs
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_mat4x2_vs
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_mat4x3_vs
+KHR-GL31.transform_feedback.capture_special_interleaved_test
+KHR-GL43.blend_equation_advanced.blend_all.GL_COLORBURN_KHR_all_qualifier
+KHR-GL43.blend_equation_advanced.blend_specific.GL_HARDLIGHT_KHR
+KHR-GL43.blend_equation_advanced.blend_specific.GL_HSL_COLOR_KHR
+KHR-GL43.blend_equation_advanced.blend_specific.GL_SCREEN_KHR
+KHR-GL43.blend_equation_advanced.preprocessor.require
+KHR-GL43.compute_shader.pipeline-post-fs
+KHR-GL43.constant_expressions.array_abs_int_tess_eval
+KHR-GL43.constant_expressions.array_abs_ivec3_geometry
+KHR-GL43.constant_expressions.array_abs_ivec4_geometry
+KHR-GL43.constant_expressions.array_acos_float_fragment
+KHR-GL43.constant_expressions.array_acos_float_tess_eval
+KHR-GL43.constant_expressions.array_acos_vec2_geometry
+KHR-GL43.constant_expressions.array_acos_vec3_geometry
+KHR-GL43.constant_expressions.array_acos_vec4_geometry
+KHR-GL43.constant_expressions.array_asin_float_geometry
+KHR-GL43.constant_expressions.array_asin_vec2_geometry
+KHR-GL43.constant_expressions.array_asin_vec2_tess_control
+KHR-GL43.constant_expressions.array_asin_vec3_geometry
+KHR-GL43.constant_expressions.array_asin_vec4_geometry
+KHR-GL43.constant_expressions.array_ceil_float_tess_eval
+KHR-GL43.constant_expressions.array_ceil_vec3_tess_control
+KHR-GL43.constant_expressions.array_ceil_vec4_geometry
+KHR-GL43.constant_expressions.array_clamp_float_geometry
+KHR-GL43.constant_expressions.array_clamp_vec2_float_geometry
+KHR-GL43.constant_expressions.array_clamp_vec2_geometry
+KHR-GL43.constant_expressions.array_clamp_vec3_float_tess_control
+KHR-GL43.constant_expressions.array_clamp_vec3_tess_eval
+KHR-GL43.constant_expressions.array_clamp_vec4_float_geometry
+KHR-GL43.constant_expressions.array_cos_float_geometry
+KHR-GL43.constant_expressions.array_cos_vec2_geometry
+KHR-GL43.constant_expressions.array_cos_vec3_vertex
+KHR-GL43.constant_expressions.array_cos_vec4_tess_eval
+KHR-GL43.constant_expressions.array_degrees_vec2_tess_eval
+KHR-GL43.constant_expressions.array_degrees_vec3_compute
+KHR-GL43.constant_expressions.array_dot_float_tess_eval
+KHR-GL43.constant_expressions.array_dot_vec2_geometry
+KHR-GL43.constant_expressions.array_dot_vec3_geometry
+KHR-GL43.constant_expressions.array_dot_vec4_compute
+KHR-GL43.constant_expressions.array_dot_vec4_tess_eval
+KHR-GL43.constant_expressions.array_exp2_vec2_geometry
+KHR-GL43.constant_expressions.array_exp2_vec2_vertex
+KHR-GL43.constant_expressions.array_exp2_vec3_geometry
+KHR-GL43.constant_expressions.array_exp2_vec4_geometry
+KHR-GL43.constant_expressions.array_exp_float_tess_control
+KHR-GL43.constant_expressions.array_exp_float_tess_eval
+KHR-GL43.constant_expressions.array_exp_vec2_fragment
+KHR-GL43.constant_expressions.array_exp_vec2_tess_eval
+KHR-GL43.constant_expressions.array_exp_vec4_tess_eval
+KHR-GL43.constant_expressions.array_floor_float_geometry
+KHR-GL43.constant_expressions.array_floor_float_tess_control
+KHR-GL43.constant_expressions.array_floor_vec2_geometry
+KHR-GL43.constant_expressions.array_floor_vec4_tess_eval
+KHR-GL43.constant_expressions.array_inversesqrt_float_tess_eval
+KHR-GL43.constant_expressions.array_inversesqrt_vec2_geometry
+KHR-GL43.constant_expressions.array_inversesqrt_vec4_geometry
+KHR-GL43.constant_expressions.array_length_vec2_tess_eval
+KHR-GL43.constant_expressions.array_length_vec3_geometry
+KHR-GL43.constant_expressions.array_length_vec3_vertex
+KHR-GL43.constant_expressions.array_length_vec4_geometry
+KHR-GL43.constant_expressions.array_log2_vec2_compute
+KHR-GL43.constant_expressions.array_log2_vec3_compute
+KHR-GL43.constant_expressions.array_log2_vec4_geometry
+KHR-GL43.constant_expressions.array_log_float_compute
+KHR-GL43.constant_expressions.array_log_float_geometry
+KHR-GL43.constant_expressions.array_log_float_tess_eval
+KHR-GL43.constant_expressions.array_log_vec2_tess_eval
+KHR-GL43.constant_expressions.array_max_float_geometry
+KHR-GL43.constant_expressions.array_max_vec2_float_geometry
+KHR-GL43.constant_expressions.array_max_vec2_geometry
+KHR-GL43.constant_expressions.array_max_vec3_float_tess_control
+KHR-GL43.constant_expressions.array_max_vec4_geometry
+KHR-GL43.constant_expressions.array_max_vec4_tess_control
+KHR-GL43.constant_expressions.array_min_float_compute
+KHR-GL43.constant_expressions.array_min_float_geometry
+KHR-GL43.constant_expressions.array_min_float_vertex
+KHR-GL43.constant_expressions.array_min_vec2_float_geometry
+KHR-GL43.constant_expressions.array_min_vec2_tess_eval
+KHR-GL43.constant_expressions.array_min_vec3_float_geometry
+KHR-GL43.constant_expressions.array_min_vec3_float_tess_eval
+KHR-GL43.constant_expressions.array_min_vec3_fragment
+KHR-GL43.constant_expressions.array_min_vec3_tess_eval
+KHR-GL43.constant_expressions.array_min_vec4_float_tess_control
+KHR-GL43.constant_expressions.array_mod_float_tess_eval
+KHR-GL43.constant_expressions.array_mod_vec2_float_tess_eval
+KHR-GL43.constant_expressions.array_mod_vec2_fragment
+KHR-GL43.constant_expressions.array_mod_vec2_tess_eval
+KHR-GL43.constant_expressions.array_mod_vec3_float_geometry
+KHR-GL43.constant_expressions.array_mod_vec3_float_tess_eval
+KHR-GL43.constant_expressions.array_mod_vec3_tess_eval
+KHR-GL43.constant_expressions.array_mod_vec4_float_tess_control
+KHR-GL43.constant_expressions.array_mod_vec4_float_tess_eval
+KHR-GL43.constant_expressions.array_mod_vec4_geometry
+KHR-GL43.constant_expressions.array_normalize_vec2_geometry
+KHR-GL43.constant_expressions.array_normalize_vec3_tess_eval
+KHR-GL43.constant_expressions.array_normalize_vec3_vertex
+KHR-GL43.constant_expressions.array_normalize_vec4_tess_eval
+KHR-GL43.constant_expressions.array_pow_float_fragment
+KHR-GL43.constant_expressions.array_pow_float_geometry
+KHR-GL43.constant_expressions.array_pow_float_vertex
+KHR-GL43.constant_expressions.array_pow_vec2_geometry
+KHR-GL43.constant_expressions.array_pow_vec3_tess_eval
+KHR-GL43.constant_expressions.array_pow_vec4_tess_eval
+KHR-GL43.constant_expressions.array_radians_float_geometry
+KHR-GL43.constant_expressions.array_radians_vec2_tess_eval
+KHR-GL43.constant_expressions.array_radians_vec2_vertex
+KHR-GL43.constant_expressions.array_radians_vec3_geometry
+KHR-GL43.constant_expressions.array_radians_vec4_tess_eval
+KHR-GL43.constant_expressions.array_round_vec2_geometry
+KHR-GL43.constant_expressions.array_round_vec3_geometry
+KHR-GL43.constant_expressions.array_round_vec4_vertex
+KHR-GL43.constant_expressions.array_sign_float_tess_eval
+KHR-GL43.constant_expressions.array_sign_vec2_tess_control
+KHR-GL43.constant_expressions.array_sign_vec2_tess_eval
+KHR-GL43.constant_expressions.array_sign_vec3_geometry
+KHR-GL43.constant_expressions.array_sign_vec3_vertex
+KHR-GL43.constant_expressions.array_sign_vec4_tess_eval
+KHR-GL43.constant_expressions.array_sin_float_tess_eval
+KHR-GL43.constant_expressions.array_sin_vec2_tess_control
+KHR-GL43.constant_expressions.array_sin_vec3_fragment
+KHR-GL43.constant_expressions.array_sin_vec3_tess_eval
+KHR-GL43.constant_expressions.array_sin_vec4_tess_eval
+KHR-GL43.constant_expressions.array_sqrt_float_vertex
+KHR-GL43.constant_expressions.array_sqrt_vec4_geometry
+KHR-GL43.constant_expressions.array_trunc_float_tess_eval
+KHR-GL43.constant_expressions.array_trunc_vec2_compute
+KHR-GL43.constant_expressions.array_trunc_vec2_tess_control
+KHR-GL43.constant_expressions.array_trunc_vec2_tess_eval
+KHR-GL43.constant_expressions.array_trunc_vec2_vertex
+KHR-GL43.constant_expressions.basic_abs_int_geometry
+KHR-GL43.constant_expressions.basic_abs_ivec2_geometry
+KHR-GL43.constant_expressions.basic_abs_ivec3_tess_eval
+KHR-GL43.constant_expressions.basic_abs_ivec4_tess_eval
+KHR-GL43.constant_expressions.basic_ceil_float_geometry
+KHR-GL43.constant_expressions.basic_ceil_float_tess_eval
+KHR-GL43.constant_expressions.basic_ceil_vec3_geometry
+KHR-GL43.constant_expressions.basic_ceil_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_ceil_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_clamp_vec2_float_fragment
+KHR-GL43.constant_expressions.basic_clamp_vec2_float_tess_eval
+KHR-GL43.constant_expressions.basic_clamp_vec2_fragment
+KHR-GL43.constant_expressions.basic_clamp_vec2_tess_control
+KHR-GL43.constant_expressions.basic_clamp_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_clamp_vec3_float_compute
+KHR-GL43.constant_expressions.basic_clamp_vec3_tess_control
+KHR-GL43.constant_expressions.basic_clamp_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_clamp_vec3_vertex
+KHR-GL43.constant_expressions.basic_clamp_vec4_geometry
+KHR-GL43.constant_expressions.basic_clamp_vec4_vertex
+KHR-GL43.constant_expressions.basic_cos_float_tess_control
+KHR-GL43.constant_expressions.basic_cos_vec2_tess_control
+KHR-GL43.constant_expressions.basic_cos_vec3_geometry
+KHR-GL43.constant_expressions.basic_cos_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_degrees_float_compute
+KHR-GL43.constant_expressions.basic_degrees_float_tess_eval
+KHR-GL43.constant_expressions.basic_degrees_vec2_geometry
+KHR-GL43.constant_expressions.basic_degrees_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_dot_float_geometry
+KHR-GL43.constant_expressions.basic_dot_vec2_vertex
+KHR-GL43.constant_expressions.basic_dot_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_dot_vec4_compute
+KHR-GL43.constant_expressions.basic_dot_vec4_geometry
+KHR-GL43.constant_expressions.basic_exp2_float_compute
+KHR-GL43.constant_expressions.basic_exp2_float_geometry
+KHR-GL43.constant_expressions.basic_exp2_vec2_geometry
+KHR-GL43.constant_expressions.basic_exp2_vec2_tess_control
+KHR-GL43.constant_expressions.basic_exp2_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_exp_float_compute
+KHR-GL43.constant_expressions.basic_exp_float_geometry
+KHR-GL43.constant_expressions.basic_exp_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_floor_float_geometry
+KHR-GL43.constant_expressions.basic_floor_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_floor_vec4_compute
+KHR-GL43.constant_expressions.basic_floor_vec4_geometry
+KHR-GL43.constant_expressions.basic_inversesqrt_float_fragment
+KHR-GL43.constant_expressions.basic_inversesqrt_float_geometry
+KHR-GL43.constant_expressions.basic_inversesqrt_vec2_geometry
+KHR-GL43.constant_expressions.basic_inversesqrt_vec3_geometry
+KHR-GL43.constant_expressions.basic_length_float_tess_eval
+KHR-GL43.constant_expressions.basic_length_vec3_compute
+KHR-GL43.constant_expressions.basic_length_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_log2_float_geometry
+KHR-GL43.constant_expressions.basic_log2_float_tess_control
+KHR-GL43.constant_expressions.basic_log2_vec2_geometry
+KHR-GL43.constant_expressions.basic_log2_vec2_tess_control
+KHR-GL43.constant_expressions.basic_log2_vec3_geometry
+KHR-GL43.constant_expressions.basic_log2_vec3_tess_control
+KHR-GL43.constant_expressions.basic_log2_vec4_fragment
+KHR-GL43.constant_expressions.basic_log2_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_log_float_geometry
+KHR-GL43.constant_expressions.basic_log_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_log_vec3_geometry
+KHR-GL43.constant_expressions.basic_log_vec3_vertex
+KHR-GL43.constant_expressions.basic_log_vec4_geometry
+KHR-GL43.constant_expressions.basic_log_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_max_float_geometry
+KHR-GL43.constant_expressions.basic_max_vec2_float_fragment
+KHR-GL43.constant_expressions.basic_max_vec2_float_tess_eval
+KHR-GL43.constant_expressions.basic_max_vec2_geometry
+KHR-GL43.constant_expressions.basic_max_vec3_fragment
+KHR-GL43.constant_expressions.basic_max_vec4_float_geometry
+KHR-GL43.constant_expressions.basic_max_vec4_fragment
+KHR-GL43.constant_expressions.basic_max_vec4_geometry
+KHR-GL43.constant_expressions.basic_max_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_min_float_tess_control
+KHR-GL43.constant_expressions.basic_min_float_vertex
+KHR-GL43.constant_expressions.basic_min_vec2_float_compute
+KHR-GL43.constant_expressions.basic_min_vec2_float_geometry
+KHR-GL43.constant_expressions.basic_min_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_min_vec4_float_compute
+KHR-GL43.constant_expressions.basic_min_vec4_float_geometry
+KHR-GL43.constant_expressions.basic_min_vec4_tess_control
+KHR-GL43.constant_expressions.basic_min_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_mod_float_geometry
+KHR-GL43.constant_expressions.basic_mod_vec2_float_geometry
+KHR-GL43.constant_expressions.basic_mod_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_mod_vec3_float_tess_eval
+KHR-GL43.constant_expressions.basic_mod_vec3_fragment
+KHR-GL43.constant_expressions.basic_mod_vec3_geometry
+KHR-GL43.constant_expressions.basic_mod_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_mod_vec4_float_compute
+KHR-GL43.constant_expressions.basic_mod_vec4_float_tess_eval
+KHR-GL43.constant_expressions.basic_mod_vec4_geometry
+KHR-GL43.constant_expressions.basic_normalize_float_compute
+KHR-GL43.constant_expressions.basic_normalize_float_geometry
+KHR-GL43.constant_expressions.basic_normalize_float_vertex
+KHR-GL43.constant_expressions.basic_normalize_vec3_geometry
+KHR-GL43.constant_expressions.basic_normalize_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_normalize_vec4_compute
+KHR-GL43.constant_expressions.basic_pow_float_geometry
+KHR-GL43.constant_expressions.basic_pow_float_tess_eval
+KHR-GL43.constant_expressions.basic_pow_vec2_geometry
+KHR-GL43.constant_expressions.basic_pow_vec2_vertex
+KHR-GL43.constant_expressions.basic_pow_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_radians_float_tess_eval
+KHR-GL43.constant_expressions.basic_radians_vec2_tess_control
+KHR-GL43.constant_expressions.basic_radians_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_radians_vec4_vertex
+KHR-GL43.constant_expressions.basic_round_float_tess_eval
+KHR-GL43.constant_expressions.basic_round_vec4_geometry
+KHR-GL43.constant_expressions.basic_round_vec4_vertex
+KHR-GL43.constant_expressions.basic_sign_vec2_geometry
+KHR-GL43.constant_expressions.basic_sign_vec4_tess_eval
+KHR-GL43.constant_expressions.basic_sin_float_geometry
+KHR-GL43.constant_expressions.basic_sin_vec2_geometry
+KHR-GL43.constant_expressions.basic_sin_vec3_fragment
+KHR-GL43.constant_expressions.basic_sin_vec3_geometry
+KHR-GL43.constant_expressions.basic_sin_vec3_tess_control
+KHR-GL43.constant_expressions.basic_sqrt_float_geometry
+KHR-GL43.constant_expressions.basic_sqrt_vec2_tess_control
+KHR-GL43.constant_expressions.basic_sqrt_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_sqrt_vec3_geometry
+KHR-GL43.constant_expressions.basic_sqrt_vec3_tess_eval
+KHR-GL43.constant_expressions.basic_sqrt_vec4_geometry
+KHR-GL43.constant_expressions.basic_sqrt_vec4_vertex
+KHR-GL43.constant_expressions.basic_trunc_float_fragment
+KHR-GL43.constant_expressions.basic_trunc_float_geometry
+KHR-GL43.constant_expressions.basic_trunc_vec2_tess_eval
+KHR-GL43.constant_expressions.basic_trunc_vec3_geometry
+KHR-GL43.constant_expressions.basic_trunc_vec4_compute
+KHR-GL43.constant_expressions.basic_trunc_vec4_geometry
+KHR-GL43.constant_expressions.basic_trunc_vec4_tess_eval
+KHR-GL43.copy_image.smoke_test
+KHR-GL43.explicit_uniform_location.subroutine-index-multiple-uniforms
+KHR-GL43.explicit_uniform_location.uniform-loc
+KHR-GL43.explicit_uniform_location.uniform-loc-arrays-nonspaced
+KHR-GL43.explicit_uniform_location.uniform-loc-implicit-in-some-stages
+KHR-GL43.explicit_uniform_location.uniform-loc-mix-with-implicit2
+KHR-GL43.indirect_parameters_tests.MultiDrawElementsIndirectCount
+KHR-GL43.packed_pixels.varied_rectangle.compressed_red
+KHR-GL43.packed_pixels.varied_rectangle.compressed_red_rgtc1
+KHR-GL43.packed_pixels.varied_rectangle.compressed_rg
+KHR-GL43.packed_pixels.varied_rectangle.compressed_rgba
+KHR-GL43.packed_pixels.varied_rectangle.compressed_signed_red_rgtc1
+KHR-GL43.packed_pixels.varied_rectangle.compressed_signed_rg_rgtc2
+KHR-GL43.packed_pixels.varied_rectangle.compressed_srgb
+KHR-GL43.packed_pixels.varied_rectangle.compressed_srgb_alpha
+KHR-GL43.packed_pixels.varied_rectangle.r8i
+KHR-GL43.packed_pixels.varied_rectangle.rg16f
+KHR-GL43.packed_pixels.varied_rectangle.rg16ui
+KHR-GL43.packed_pixels.varied_rectangle.rg32f
+KHR-GL43.packed_pixels.varied_rectangle.rg8ui
+KHR-GL43.packed_pixels.varied_rectangle.rgb16
+KHR-GL43.packed_pixels.varied_rectangle.rgb16_snorm
+KHR-GL43.packed_pixels.varied_rectangle.rgb16ui
+KHR-GL43.packed_pixels.varied_rectangle.rgb5
+KHR-GL43.packed_pixels.varied_rectangle.rgb8ui
+KHR-GL43.packed_pixels.varied_rectangle.rgb9_e5
+KHR-GL43.packed_pixels.varied_rectangle.rgba16f
+KHR-GL43.packed_pixels.varied_rectangle.rgba16_snorm
+KHR-GL43.packed_pixels.varied_rectangle.rgba16ui
+KHR-GL43.packed_pixels.varied_rectangle.rgba8i
+KHR-GL43.packed_pixels.varied_rectangle.rgba8ui
+KHR-GL43.packed_pixels.varied_rectangle.srgb8_alpha8
+KHR-GL43.shader_atomic_counters.advanced-usage-ubo
+KHR-GL43.shader_bitfield_operation.findLSB.ivec4_0
+KHR-GL43.shader_bitfield_operation.findMSB.int_0
+KHR-GL43.shader_bitfield_operation.findMSB.ivec2_0
+KHR-GL43.shader_bitfield_operation.findMSB.ivec3_3
+KHR-GL43.shader_bitfield_operation.findMSB.uvec3_0
+KHR-GL43.shader_bitfield_operation.findMSB.uvec4_0
+KHR-GL43.shader_image_load_store.advanced-sync-imageAccess2
+KHR-GL43.shader_image_load_store.early-fragment-tests
+KHR-GL43.shader_image_size.advanced-ms-cs-float
+KHR-GL43.shader_image_size.advanced-ms-cs-uint
+KHR-GL43.shader_image_size.advanced-ms-gs-float
+KHR-GL43.shader_image_size.advanced-ms-tcs-float
+KHR-GL43.shader_image_size.advanced-ms-tes-uint
+KHR-GL43.shader_image_size.advanced-ms-vs-float
+KHR-GL43.shader_image_size.basic-ms-fs-uint
+KHR-GL43.shader_image_size.basic-nonMS-cs-int
+KHR-GL43.shader_image_size.basic-nonMS-tes-uint
+KHR-GL43.shader_storage_buffer_object.advanced-indirectAddressing-case1
+KHR-GL43.shader_storage_buffer_object.advanced-matrix
+KHR-GL43.shader_storage_buffer_object.advanced-switchBuffers-cs
+KHR-GL43.shader_storage_buffer_object.advanced-unsizedArrayLength-cs-std140-struct
+KHR-GL43.shader_storage_buffer_object.advanced-unsizedArrayLength-cs-std430-vec-bindrangeOffset
+KHR-GL43.shader_storage_buffer_object.advanced-unsizedArrayLength-cs-std430-vec-pad
+KHR-GL43.shader_storage_buffer_object.advanced-unsizedArrayLength-fs-std140-matR
+KHR-GL43.shader_storage_buffer_object.advanced-usage-operators
+KHR-GL43.shader_storage_buffer_object.advanced-write-geometry
+KHR-GL43.shader_storage_buffer_object.basic-atomic-case4-cs
+KHR-GL43.shader_storage_buffer_object.basic-basic
+KHR-GL43.shader_storage_buffer_object.basic-matrixOperations-case3-cs
+KHR-GL43.shader_storage_buffer_object.basic-matrixOperations-case7-vs
+KHR-GL43.shader_storage_buffer_object.basic-operations-case1-vs
+KHR-GL43.shader_storage_buffer_object.basic-std140Layout-case4-vs
+KHR-GL43.shader_storage_buffer_object.basic-stdLayout-case2-cs
+KHR-GL43.shader_storage_buffer_object.basic-stdLayout-case3-vs
+KHR-GL43.shader_storage_buffer_object.basic-stdLayout_UBO_SSBO-case2-vs
+KHR-GL43.shading_language_420pack.binding_image_single
+KHR-GL43.shading_language_420pack.binding_samplers
+KHR-GL43.shading_language_420pack.binding_sampler_single
+KHR-GL43.shading_language_420pack.binding_uniform_block_array
+KHR-GL43.shading_language_420pack.binding_uniform_single_block
+KHR-GL43.shading_language_420pack.qualifier_order_block
+KHR-GL43.shading_language_420pack.utf8_characters
+KHR-GL43.texture_size_promotion.functional
+KHR-GL43.vertex_attrib_binding.basic-input-case4
+KHR-GL43.vertex_attrib_binding.basic-input-case8
+KHR-GL43.vertex_attrib_binding.basic-inputI-case2
+
+shaders@glsl-uniform-interstage-limits@subdivide 5
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges
+spec@arb_fragment_layer_viewport@layer-gs-writes-in-range
+spec@arb_fragment_layer_viewport@viewport-gs-writes-in-range
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@Basic
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@MS4
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@Per-sample
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glScissor
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glViewport
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-roundup-samples
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed
+spec@arb_shader_atomic_counter_ops@execution@add
+spec@arb_shader_atomic_counter_ops@execution@atomic-counter-array-out-of-bounds-access
+spec@arb_shader_image_load_store@execution@image-array-out-of-bounds-access-store
+spec@arb_shader_image_load_store@execution@image-array-out-of-bounds-access-load
+spec@arb_shader_storage_buffer_object@execution@memory-layouts-struct-deref
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicadd-int
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicexchange-int
+spec@arb_shader_storage_buffer_object@layout-std140-write-shader
+spec@arb_timer_query@query gl_timestamp
+spec@arb_timer_query@timestamp-get
+spec@ext_timer_query@time-elapsed
+spec@ext_framebuffer_blit@fbo-blit-check-limits
+spec@ext_framebuffer_blit@fbo-sys-blit
+spec@ext_framebuffer_blit@fbo-sys-sub-blit
+spec@oes_viewport_array@viewport-gs-writes-in-range
+spec@!opengl 1.0@gl-1.0-drawbuffer-modes
+spec@!opengl 1.0@gl-1.0-front-invalidate-back
+spec@!opengl 1.0@gl-1.0-swapbuffers-behavior
+spec@!opengl 1.1@masked-clear
+spec@!opengl 1.1@ppgtt_memory_alignment
+spec@!opengl 1.1@read-front
+spec@!opengl 1.1@read-front clear-front-first
+spec@!opengl 1.1@read-front clear-front-first samples=2
+spec@!opengl 1.1@read-front clear-front-first samples=4
+spec@!opengl 1.1@read-front samples=2
+spec@!opengl 1.1@read-front samples=4
+spec@!opengl 2.0@vertex-program-two-side enabled front front2@tes-out and fs
+spec@!opengl 2.0@vertex-program-two-side enabled front front2@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front2 back2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front2 back2@vs- gs and fs
+spec@!opengl 3.0@gl30basic
diff --git a/.gitlab-ci/expectations/host/virgl-gl-skips.txt b/.gitlab-ci/expectations/host/virgl-gl-skips.txt
new file mode 100644
index 00000000..6c282d15
--- /dev/null
+++ b/.gitlab-ci/expectations/host/virgl-gl-skips.txt
@@ -0,0 +1,49 @@
+# Sometimes crashes, e.g. https://gitlab.freedesktop.org/kusma/mesa/-/jobs/4109419
+dEQP-GLES31.functional.compute.basic.empty
+
+# Take too long to run
+KHR-GL43.texture_swizzle.smoke
+KHR-GL43.texture_swizzle.functional
+KHR-GL43.copy_image.functional
+
+glx@.*
+
+# Skip because we don't care for fp64 for now
+spec@arb_gpu_shader_fp64@.*
+
+# Skip TS tests for now
+spec@arb_tessellation_shader@.*
+
+# Skip, this is expected
+# Refer to src/mesa/main/drawpix.c:100
+spec@ext_texture_integer@fbo-integer
+
+# Fails on iris too
+spec@arb_direct_state_access@gettextureimage-formats
+
+# Skip these as they get skipped with the Intel driver + vtest
+spec@arb_shader_texture_image_samples@builtin-image*
+
+# Skip for now
+spec@arb_vertex_attrib_64bit.*
+
+# Reported as crash, but no obvious crash
+spec@intel_shader_integer_functions2@execution@built-in-functions*
+spec@arb_vertex_program.*
+
+# Crashes when lowering GLSL to TGSI, but this is going away with the GLSL-NIR-TGSI lowering coming soon
+spec@glsl-4.00@execution@inout.*
+
+# Skip because they pass with the Intel driver
+spec@arb_shader_texture_image_samples@texturesamples@.*
+spec@nv_primitive_restart@primitive-restart-draw-mode-polygon
+spec@nv_primitive_restart@primitive-restart-draw-mode-quad_strip
+spec@nv_primitive_restart@primitive-restart-draw-mode-quads
+spec@glsl-4.00@execution@conversion.*
+spec@ext_framebuffer_multisample@clip-and-scissor-blit.*
+
+# Skip any fp64 tests, it's not working properly, and there is
+# no priority in fixing this
+spec@glsl-4.*@*dmat*
+spec@glsl-4.*@*dvec*
+spec@glsl-4.*@*double*
diff --git a/.gitlab-ci/expectations/host/virgl-gles-fails.txt b/.gitlab-ci/expectations/host/virgl-gles-fails.txt
new file mode 100644
index 00000000..e26206fe
--- /dev/null
+++ b/.gitlab-ci/expectations/host/virgl-gles-fails.txt
@@ -0,0 +1,3173 @@
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_2,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_x,Fail
+
+fast_color_clear@fcc-front-buffer-distraction,ExpectedFail
+shaders@glsl-fs-pointcoord,Fail
+shaders@glsl-novertexdata,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges,Fail
+shaders@point-vertex-id divisor,Fail
+shaders@point-vertex-id gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_instanceid,Fail
+shaders@point-vertex-id gl_vertexid divisor,Fail
+shaders@point-vertex-id gl_vertexid,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid,Fail
+spec@arb_clear_texture@arb_clear_texture-depth,Fail
+spec@arb_clear_texture@arb_clear_texture-sized-formats,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-clear,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-drawpixels,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-probepixel,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-readpixels,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-render,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-render-fog,Fail
+spec@arb_compute_shader@execution@min-dvec4-double-large-group-size,Fail
+spec@arb_copy_image@arb_copy_image-formats,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_ALPHA16/Destination: GL_ALPHA16,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RED_RGTC1/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RED_RGTC1/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_BPTC_UNORM/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RG_RGTC2/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RED_RGTC1/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RED_RGTC1/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RG_RGTC2/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16I/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16_SNORM/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16UI/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R32F/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8I/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8_SNORM/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8UI/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16I/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16_SNORM/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16UI/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8I/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8_SNORM/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8UI/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16I/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16_SNORM/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16UI/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8I/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8_SNORM/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8UI/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8I/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8_SNORM/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8UI/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH32F_STENCIL8- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH_COMPONENT32F- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH32F_STENCIL8- swizzled- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32F- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT16- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT24- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT32- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT16- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT24- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32- swizzled- border color only,Fail
+spec@arb_draw_indirect@arb_draw_indirect-draw-elements-prim-restart-ugly,Fail
+spec@arb_enhanced_layouts@linker@component-layout@intrastage-vs,Fail
+spec@arb_enhanced_layouts@linker@component-layout@vs-to-fs,Fail
+spec@arb_enhanced_layouts@matching_basic_types_3_loc_1,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_1,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_1_loc_1,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_2,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_2_loc_1,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_3,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_3_loc_1,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor@GL_RGB565- border color only,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled@GL_RGB565- swizzled- border color only,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@Basic,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@discard,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@fb resize,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@glScissor,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@glViewport,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@MS4,Fail
+spec@arb_framebuffer_object@fbo-blit-scaled-linear,ExpectedFail
+spec@arb_framebuffer_object@fbo-gl_pointcoord,Fail
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed,Crash
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-get,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-emitstreamvertex_nodraw,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-minmax,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-tf-wrong-stream-value,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-xfb-streams,Fail
+spec@arb_gpu_shader5@linker@stream-different-zero-gs-fs,Fail
+spec@arb_gpu_shader5@linker@stream-invalid-prim-output,Fail
+spec@arb_occlusion_query@occlusion_query_conform,Fail
+spec@arb_occlusion_query@occlusion_query_conform@GetObjivAval_multi1,Fail
+spec@arb_occlusion_query@occlusion_query_conform@GetObjivAval_multi2,Fail
+spec@arb_occlusion_query@occlusion_query,Fail
+spec@arb_occlusion_query@occlusion_query_meta_fragments,Fail
+spec@arb_occlusion_query@occlusion_query_meta_no_fragments,Fail
+spec@arb_occlusion_query@occlusion_query_meta_save,Fail
+spec@arb_point_parameters@arb_point_parameters-point-attenuation@Aliased combinations,Fail
+spec@arb_point_parameters@arb_point_parameters-point-attenuation@Antialiased combinations,Fail
+spec@arb_point_parameters@arb_point_parameters-point-attenuation,Fail
+spec@arb_point_sprite@arb_point_sprite-checkerboard,Fail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex,ExpectedFail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex@'vs_input2[1][0]' on GL_PROGRAM_INPUT,ExpectedFail
+spec@arb_provoking_vertex@arb-provoking-vertex-render,Fail
+spec@arb_provoking_vertex@clipped-strip-first,Fail
+spec@arb_sample_shading@samplemask 2@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all,Fail
+spec@arb_sample_shading@samplemask 2 all@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all@noms partition,Fail
+spec@arb_sample_shading@samplemask 2 all@sample mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2,Fail
+spec@arb_sample_shading@samplemask 2@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2@noms partition,Fail
+spec@arb_sample_shading@samplemask 2@sample mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@0.250000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@0.250000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all,Fail
+spec@arb_sample_shading@samplemask 4 all@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@noms partition,Fail
+spec@arb_sample_shading@samplemask 4 all@sample mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4,Fail
+spec@arb_sample_shading@samplemask 4@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@noms partition,Fail
+spec@arb_sample_shading@samplemask 4@sample mask_in_one,Fail
+spec@arb_seamless_cube_map@arb_seamless_cubemap,Fail
+spec@arb_shader_image_load_store@early-z,ExpectedFail
+spec@arb_shader_image_load_store@early-z@occlusion query test/early-z pass,ExpectedFail
+spec@arb_shader_image_load_store@early-z@occlusion query test/late-z pass,Fail
+spec@arb_shader_image_load_store@execution@disable_early_z,Fail
+spec@arb_shader_image_load_store@max-images@Combined max image uniforms test,Fail
+spec@arb_shader_image_load_store@max-images,Fail
+spec@arb_shader_image_load_store@max-size,Fail
+spec@arb_shader_image_load_store@max-size@image1DArray max size test/16384x8x1x1,Fail
+spec@arb_shader_image_load_store@max-size@image1DArray max size test/8x2048x1x1,Fail
+spec@arb_shader_image_load_store@max-size@image1D max size test/16384x1x1x1,Fail
+spec@arb_shader_image_load_store@restrict,Fail
+spec@arb_shader_image_load_store@restrict@no qualifier image aliasing test,Fail
+spec@arb_shader_image_load_store@semantics,Fail
+spec@arb_shader_image_load_store@semantics@imageStore/Vertex shader/rgba32f/image1D test,Fail
+spec@arb_shader_storage_buffer_object@execution@indirect,Fail
+spec@arb_shader_storage_buffer_object@execution@memory-layouts-struct-deref,Fail
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomiccompswap-int,ExpectedFail
+spec@arb_shader_storage_buffer_object@layout-std140-write-shader,Fail
+spec@arb_shader_storage_buffer_object@maxblocks,Fail
+spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgradcube,ExpectedFail
+spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgrad,ExpectedFail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 3d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb cube,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 3d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod cube,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 1d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 2d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 3d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 1d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 2d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 3d,Fail
+spec@arb_texture_compression_bptc@compressedteximage gl_compressed_rgb_bptc_signed_float,Fail
+spec@arb_texture_compression_bptc@compressedteximage gl_compressed_rgb_bptc_unsigned_float,Fail
+spec@arb_texture_compression_bptc@compressedteximage gl_compressed_srgb_alpha_bptc_unorm,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_INTENSITY- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGBA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGB- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_INTENSITY- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB- swizzled- border color only,Fail
+spec@arb_texture_cube_map_array@arb_texture_cube_map_array-sampler-cube-array-shadow,Fail
+spec@arb_texture_cube_map_array@fbo-generatemipmap-cubemap array s3tc_dxt1,Fail
+spec@arb_texture_float@fbo-blending-formats,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_RGB32F,Fail
+spec@arb_texture_float@fbo-clear-formats,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-fast-clear,Fail
+spec@arb_texture_float@multisample-fast-clear gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 2 gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 4 gl_arb_texture_float,Fail
+spec@arb_texture_float@texwrap formats bordercolor,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB32F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA32F- swizzled- border color only,Fail
+spec@arb_texture_multisample@arb_texture_multisample-dsa-texelfetch,Fail
+spec@arb_texture_multisample@arb_texture_multisample-dsa-texelfetch@Texture type: GL_RGB9_E5,Fail
+spec@arb_texture_query_lod@execution@fs-texturequerylod-nearest-biased,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor@GL_RGBA8- border color only,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R32F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG32F- swizzled- border color only,Fail
+spec@arb_texture_view@rendering-layers-image,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image1DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image2DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of imageCubeArray,Fail
+spec@arb_vertex_attrib_64bit@arb_vertex_attrib_64bit-overlapping-locations api,Fail
+spec@arb_vertex_attrib_64bit@arb_vertex_attrib_64bit-overlapping-locations shader,Fail
+spec@arb_vertex_attrib_64bit@execution@vs-fp64-input-trunc,Fail
+spec@arb_vertex_attrib_64bit@execution@vs-fs-pass-vertex-attrib,Fail
+spec@arb_vertex_attrib_64bit@execution@vs-test-attrib-location,Fail
+spec@arb_vertex_buffer_object@pos-array,Fail
+spec@egl 1.4@eglterminate then unbind context,ExpectedFail
+spec@egl_ext_protected_content@conformance,ExpectedFail
+spec@egl_khr_gl_image@egl_khr_gl_renderbuffer_image-clear-shared-image gl_depth_component24,ExpectedFail
+spec@egl_khr_surfaceless_context@viewport,ExpectedFail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float,Fail
+spec@ext_direct_state_access@renderbuffer,Fail
+spec@ext_direct_state_access@renderbuffer@GetNamedRenderbufferParameterivEXT,Fail
+spec@ext_framebuffer_multisample@alpha-blending-after-rendering 2,Fail
+spec@ext_framebuffer_multisample@alpha-to-coverage-no-draw-buffer-zero 2,Fail
+spec@ext_framebuffer_multisample@alpha-to-one-dual-src-blend 2,Fail
+spec@ext_framebuffer_multisample@alpha-to-one-dual-src-blend 4,Fail
+spec@ext_framebuffer_multisample@blit-flipped 2 x,Fail
+spec@ext_framebuffer_multisample@blit-flipped 2 y,Fail
+spec@ext_framebuffer_multisample@blit-mismatched-formats,Fail
+spec@ext_framebuffer_multisample@draw-buffers-alpha-to-coverage 2,Fail
+spec@ext_framebuffer_multisample@draw-buffers-alpha-to-one 2,Fail
+spec@ext_framebuffer_multisample@draw-buffers-alpha-to-one 4,Fail
+spec@ext_framebuffer_multisample@enable-flag,Fail
+spec@ext_framebuffer_multisample@fast-clear,Fail
+spec@ext_framebuffer_multisample@formats 2,Fail
+spec@ext_framebuffer_multisample@formats 4,Fail
+spec@ext_framebuffer_multisample@formats all_samples,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-edges,ExpectedFail
+spec@ext_framebuffer_multisample@interpolation 2 non-centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 2 non-centroid-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-edges,ExpectedFail
+spec@ext_framebuffer_multisample@interpolation 4 non-centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 non-centroid-disabled,Fail
+spec@ext_framebuffer_multisample@line-smooth 2,Fail
+spec@ext_framebuffer_multisample@multisample-blit 2 color,Fail
+spec@ext_framebuffer_multisample@multisample-blit 2 depth,Fail
+spec@ext_framebuffer_multisample@multisample-blit 2 stencil,Fail
+spec@ext_framebuffer_multisample@multisample-blit 4 depth,Fail
+spec@ext_framebuffer_multisample@multisample-blit 4 stencil,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth combined,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth-computed combined,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth single,Fail
+spec@ext_framebuffer_multisample@no-color 2 stencil combined,Fail
+spec@ext_framebuffer_multisample@no-color 2 stencil single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth combined,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth-computed combined,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth single,Fail
+spec@ext_framebuffer_multisample@no-color 4 stencil combined,Fail
+spec@ext_framebuffer_multisample@no-color 4 stencil single,Fail
+spec@ext_framebuffer_multisample@point-smooth 2,Fail
+spec@ext_framebuffer_multisample@polygon-smooth 2,Fail
+spec@ext_framebuffer_multisample@sample-alpha-to-one 2,Fail
+spec@ext_framebuffer_multisample@sample-alpha-to-one 4,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 inverted,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 non-inverted,Fail
+spec@ext_framebuffer_multisample@unaligned-blit 2 color downsample,Fail
+spec@ext_framebuffer_multisample@unaligned-blit 2 color msaa,Fail
+spec@ext_framebuffer_multisample@upsample 2 stencil,Fail
+spec@ext_framebuffer_multisample@upsample 4 stencil,Fail
+spec@ext_framebuffer_object@fbo-blending-formats,Fail
+spec@ext_framebuffer_object@fbo-blending-formats@GL_ALPHA12,Fail
+spec@ext_framebuffer_object@fbo-blending-formats@GL_ALPHA16,Fail
+spec@ext_framebuffer_object@fbo-blending-snorm,Fail
+spec@ext_framebuffer_object@fbo-clear-formats,Fail
+spec@ext_framebuffer_object@fbo-clear-formats@GL_ALPHA12,Fail
+spec@ext_framebuffer_object@fbo-clear-formats@GL_ALPHA16,Fail
+spec@ext_framebuffer_object@fbo-colormask-formats,Fail
+spec@ext_framebuffer_object@fbo-colormask-formats@GL_ALPHA12,Fail
+spec@ext_framebuffer_object@fbo-colormask-formats@GL_ALPHA16,Fail
+spec@ext_framebuffer_object@fbo-fast-clear,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT24/GL_FLOAT,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT32/GL_UNSIGNED_INT,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT/GL_FLOAT,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-clear-and-render,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-rendering,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor@GL_DEPTH24_STENCIL8- border color only,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled@GL_DEPTH24_STENCIL8- swizzled- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor,Fail
+spec@ext_packed_float@texwrap formats bordercolor@GL_R11F_G11F_B10F- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled@GL_R11F_G11F_B10F- swizzled- border color only,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-dlist@call,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-dlist@compile and execute,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-dlist,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw_gles2,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw_gles2@negative clamp,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw_gles2@positive clamp,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw@negative clamp,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw@positive clamp,Fail
+spec@ext_provoking_vertex@provoking-vertex,Fail
+spec@ext_texture_array@fbo-generatemipmap-array rgb9_e5,Fail
+spec@ext_texture_array@fbo-generatemipmap-array s3tc_dxt1,Fail
+spec@ext_texture_array@gen-mipmap,Fail
+spec@ext_texture_compression_rgtc@fbo-generatemipmap-formats-signed,Fail
+spec@ext_texture_compression_rgtc@fbo-generatemipmap-formats-signed@GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@ext_texture_compression_rgtc@fbo-generatemipmap-formats-signed@GL_COMPRESSED_SIGNED_RED_RGTC1 NPOT,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets 2d_array s3tc,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets cube_array s3tc,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@fbo-blending,Fail
+spec@ext_texture_integer@multisample-fast-clear gl_ext_texture_integer,Fail
+spec@ext_texture_integer@texwrap formats bordercolor,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_lod_bias@lodbias,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor@GL_RGB9_E5- border color only,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled@GL_RGB9_E5- swizzled- border color only,Fail
+spec@ext_texture_snorm@multisample-formats 2 gl_ext_texture_snorm,Fail
+spec@ext_texture_snorm@multisample-formats 4 gl_ext_texture_snorm,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB- swizzled- border color only,Fail
+spec@ext_transform_feedback@builtin-varyings gl_culldistance,Fail
+spec@ext_transform_feedback@immediate-reuse-index-buffer,Fail
+spec@ext_transform_feedback@immediate-reuse-uniform-buffer,Fail
+spec@glsl-1.10@execution@samplers@glsl-fs-shadow2d-clamp-z,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 1d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 1dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 2d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 2dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 3d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() cube,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 1d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 1d_projvec4,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 1dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 2d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 2d_projvec4,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 2dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 3d,Fail
+spec@glsl-1.30@execution@fs-texturelod-miplevels-biased,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cubearray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cubearrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cube,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cubeshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad cubearray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad cube,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad cubeshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod cubearray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod cube,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 3d,Fail
+spec@glsl-1.30@execution@vs-texturelod-miplevels-biased,Fail
+spec@glsl-1.50@built-in constants,Fail
+spec@glsl-1.50@built-in constants@gl_MaxGeometryOutputComponents,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_line_loop other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_line_strip_adjacency other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_line_strip other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_points other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_triangle_fan other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_triangle_strip other,Fail
+spec@glsl-1.50@execution@geometry@tri-strip-ordering-with-prim-restart gl_triangle_strip_adjacency other,Fail
+spec@glsl-1.50@execution@geometry@tri-strip-ordering-with-prim-restart gl_triangle_strip other,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-first-vertex,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quads,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quad-strip,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-strip-first-vertex,Fail
+spec@glsl-3.30@built-in constants,Fail
+spec@glsl-3.30@built-in constants@gl_MaxGeometryOutputComponents,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec2-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec3-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec4-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-cross-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-determinant-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-determinant-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-determinant-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-equal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-equal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-equal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-faceforward-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inverse-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inverse-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inverse-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-double-double-bool,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec2-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec3-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec4-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-notequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-notequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-notequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec2-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec3-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec4-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-cross-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-determinant-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-determinant-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-determinant-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-equal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-equal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-equal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-faceforward-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inverse-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inverse-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inverse-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-double-double-bool,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec3-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec4-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-notequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-notequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-notequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec2-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec3-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec4-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-cross-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-determinant-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-determinant-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-determinant-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-equal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-equal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-equal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-faceforward-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inverse-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inverse-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inverse-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-double-double-bool,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec2-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec3-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec4-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-notequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-notequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-notequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bool-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat2-mat2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat2x3-mat2x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat2x4-mat2x4,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat3-mat3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat3x2-mat3x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat3x4-mat3x4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat4-mat4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat4x2-mat4x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat4x3-mat4x3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-bool,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-float,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-int,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-uint,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-ivec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-uvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-vec2,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-ivec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-uvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-vec3,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-ivec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-uvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-vec4,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-float-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-int-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-ivec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-ivec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-ivec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat2-dmat2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat3-dmat3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat4-dmat4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uint-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-vec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-vec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-vec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bool-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat2-mat2,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat2x3-mat2x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat2x4-mat2x4,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat3-mat3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat3x2-mat3x2,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat3x4-mat3x4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat4-mat4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat4x2-mat4x2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat4x3-mat4x3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-bool,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-float,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-int,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-uint,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-ivec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-uvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-vec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-ivec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-uvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-vec3,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-ivec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-uvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-vec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-float-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-int-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-ivec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-ivec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-ivec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat2-dmat2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat3-dmat3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat4-dmat4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uint-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-vec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-vec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-vec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bool-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat2-mat2,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat2x3-mat2x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat2x4-mat2x4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat3-mat3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat3x2-mat3x2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat3x4-mat3x4,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat4-mat4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat4x2-mat4x2,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat4x3-mat4x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-bool,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-float,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-int,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-uint,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-ivec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-uvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-vec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-ivec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-uvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-vec3,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-ivec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-uvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-vec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-float-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-int-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-ivec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-ivec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-ivec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat2-dmat2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat3-dmat3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat4-dmat4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uint-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-vec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-vec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-vec4-dvec4,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-dvec3@2@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-float-and-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1@2-s2@2-s3@2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat3x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat4x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat4x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat4x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@3-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@4-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double-float-double@2-float@3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec2@3-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec2@4-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@2@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@2@2-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-float-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-float-float-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-float-float-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-float-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec2-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec3-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-float-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-s3@2-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-s3@2-vec3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-vec2-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-vec3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@3-double-float-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@3-dvec2-float-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@3-dvec3-float-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-double-s3-float-s4-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2@2-float-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2@2-vec2-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2@2-vec3-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-vec2-and-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-vec3-and-double@3@2,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@khr_texture_compression_astc@array-gl@12x12 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gl@5x5 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gles@12x12 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gles@5x5 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gles,ExpectedFail
+spec@khr_texture_compression_astc@array-gl,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles ldr,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-sd,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-sd@sRGB skip decode,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl ldr,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-sd,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-sd@sRGB skip decode,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles ldr,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl ldr,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@void-extent-dl-bug,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color,Fail
+spec@nv_copy_image@nv_copy_image-formats,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_ALPHA16/Destination: GL_ALPHA16,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RED_RGTC1/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_BPTC_UNORM/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RG_RGTC2/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RED_RGTC1/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RG_RGTC2/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_R16_SNORM/Destination: GL_R16_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_R8_SNORM/Destination: GL_R8_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGB16_SNORM/Destination: GL_RGB16_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGB8_SNORM/Destination: GL_RGB8_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_RGBA16_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGBA8_SNORM/Destination: GL_RGBA8_SNORM,Fail
+spec@nv_primitive_restart@primitive-restart-disable_vbo,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_combined_vertex_and_index,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_index_only,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_separate_vertex_and_index,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_vertex_only,Fail
+spec@nv_read_depth@read_depth_gles3,Fail
+spec@oes_egl_image_external_essl3@oes_egl_image_external_essl3,ExpectedFail
+spec@oes_egl_image_external_essl3@oes_egl_image_external_essl3@oes_egl_image_external_essl3_imageLoad,ExpectedFail
+spec@oes_egl_image_external_essl3@oes_egl_image_external_essl3@oes_egl_image_external_essl3_imageStore,ExpectedFail
+spec@oes_point_sprite@arb_point_sprite-checkerboard_gles1,Fail
+spec@!opengl 1.0@gl-1.0-drawbuffer-modes,ExpectedFail
+spec@!opengl 1.0@gl-1.0-edgeflag-const,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag-quads,Fail
+spec@!opengl 1.0@gl-1.0-logicop,Fail
+spec@!opengl 1.0@gl-1.0-logicop@GL_NOOP,Fail
+spec@!opengl 1.0@gl-1.0-logicop@GL_NOOP_MSAA,Fail
+spec@!opengl 1.0@gl-1.0-spot-light,Fail
+spec@!opengl 1.0@gl-1.0-swapbuffers-behavior,ExpectedFail
+spec@!opengl 1.0@rasterpos,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_gs_linked,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_tes_linked,Fail
+spec@!opengl 1.1@clipflat,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@depthstencil-default_fb-blit samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-blit samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-float-and-ushort samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-float-and-ushort samples=4,Fail
+spec@!opengl 1.1@draw-pixels,Fail
+spec@!opengl 1.1@line-flat-clip-color,Fail
+spec@!opengl 1.1@linestipple@Factor 2x,Fail
+spec@!opengl 1.1@linestipple@Factor 3x,Fail
+spec@!opengl 1.1@linestipple,Fail
+spec@!opengl 1.1@linestipple@Line loop,Fail
+spec@!opengl 1.1@linestipple@Line strip,Fail
+spec@!opengl 1.1@linestipple@Restarting lines within a single Begin-End block,Fail
+spec@!opengl 1.1@point-line-no-cull,Fail
+spec@!opengl 1.1@polygon-mode-facing,ExpectedFail
+spec@!opengl 1.1@polygon-mode,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on bottom edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on left edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on bottom edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on left edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on right edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on top edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on bottom edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on left edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on right edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on top edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 5: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 5: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 5: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset,ExpectedFail
+spec@!opengl 1.1@read-front clear-front-first,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=2,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=4,Crash
+spec@!opengl 1.1@read-front,Crash
+spec@!opengl 1.1@read-front samples=2,Crash
+spec@!opengl 1.1@read-front samples=4,ExpectedFail
+spec@!opengl 1.1@teximage-colors gl_alpha16@Exact upload-download of GL_ALPHA16,Fail
+spec@!opengl 1.1@teximage-colors gl_rgba,Fail
+spec@!opengl 1.1@teximage-colors gl_rgba@GL_RGBA texture with GL_BGRA and GL_UNSIGNED_INT_2_10_10_10_REV,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE6_ALPHA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_R3_G3_B2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10_A2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5_A1- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE6_ALPHA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_R3_G3_B2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10_A2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5_A1- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA8- swizzled- border color only,Fail
+spec@!opengl 1.1@windowoverlap,ExpectedFail
+spec@!opengl 1.2@copyteximage 3d,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.4@gl-1.4-tex1d-2dborder,Fail
+spec@!opengl 1.4@tex-miplevel-selection-lod-bias,Fail
+spec@!opengl 1.5@depth-tex-compare,Fail
+spec@!opengl 1.5@draw-elements-user,Fail
+spec@!opengl 1.5@draw-vertices,Crash
+spec@!opengl 1.5@draw-vertices-user,Crash
+spec@!opengl 2.0@gl-2.0-edgeflag,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag-immediate,Fail
+spec@!opengl 2.0@gl-2.0-large-point-fs,Fail
+spec@!opengl 2.0@gl-2.0-vertexattribpointer,Crash
+spec@!opengl 2.0@occlusion-query-discard,Fail
+spec@!opengl 3.0@sampler-cube-shadow,Fail
+spec@!opengl 3.2@gl-3.2-adj-prims cull-back pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims cull-front pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims line cull-back pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims line cull-front pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims pv-first,ExpectedFail
+spec@!opengl 3.2@layered-rendering@clear-color-mismatched-layer-count,ExpectedFail
+spec@!opengl 3.2@minmax,Fail
+spec@!opengl 3.2@pointsprite-coord,Fail
+spec@!opengl 3.2@pointsprite-origin,Fail
+spec@!opengl 3.3@minmax,Fail
+spec@!opengl es 3.0@gles-3.0-transform-feedback-uniform-buffer-object,Fail
diff --git a/.gitlab-ci/expectations/host/virgl-gles-flakes.txt b/.gitlab-ci/expectations/host/virgl-gles-flakes.txt
new file mode 100644
index 00000000..4aa4035b
--- /dev/null
+++ b/.gitlab-ci/expectations/host/virgl-gles-flakes.txt
@@ -0,0 +1,49 @@
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8
+
+shaders@glsl-uniform-interstage-limits@subdivide 5
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges
+spec@arb_fragment_layer_viewport@viewport-gs-writes-in-range
+spec@arb_fragment_layer_viewport@layer-gs-writes-in-range
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@Basic
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@MS4
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@Per-sample
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glScissor
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glViewport
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-roundup-samples
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed
+spec@arb_shader_atomic_counter_ops@execution@add
+spec@arb_shader_atomic_counters@function-argument
+spec@arb_shader_image_load_store@atomicity
+spec@arb_shader_image_load_store@atomicity@imageAtomicAdd
+spec@arb_shader_image_load_store@atomicity@imageAtomicAnd
+spec@arb_shader_image_load_store@atomicity@imageAtomicCompSwap
+spec@arb_shader_image_load_store@atomicity@imageAtomicExchange
+spec@arb_shader_image_load_store@atomicity@imageAtomicMax
+spec@arb_shader_image_load_store@atomicity@imageAtomicMin
+spec@arb_shader_image_load_store@atomicity@imageAtomicOr
+spec@arb_shader_image_load_store@atomicity@imageAtomicXor
+spec@arb_shader_storage_buffer_object@execution@memory-layouts-struct-deref
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicadd-int
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicexchange-int
+spec@arb_shader_storage_buffer_object@layout-std140-write-shader
+spec@arb_shader_storage_buffer_object@layout-std430-write-shader
+spec@arb_timer_query@query gl_timestamp
+spec@arb_timer_query@timestamp-get
+spec@ext_timer_query@time-elapsed
+spec@ext_framebuffer_blit@fbo-blit-check-limits
+spec@ext_framebuffer_blit@fbo-sys-blit
+spec@ext_framebuffer_blit@fbo-sys-sub-blit
+spec@oes_viewport_array@viewport-gs-writes-in-range
+spec@!opengl 1.0@gl-1.0-front-invalidate-back
+spec@!opengl 1.1@masked-clear
+spec@!opengl 1.1@ppgtt_memory_alignment
+spec@!opengl 1.1@read-front clear-front-first
+spec@!opengl 1.1@read-front clear-front-first samples=2
+spec@!opengl 1.1@read-front clear-front-first samples=4
+spec@!opengl 1.1@read-front samples=2
+spec@!opengl 2.0@vertex-program-two-side enabled front front2@tes-out and fs
+spec@!opengl 2.0@vertex-program-two-side enabled front front2@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front2 back2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front2 back2@vs- gs and fs
+spec@!opengl 3.0@gl30basic
diff --git a/.gitlab-ci/expectations/host/virgl-gles-skips.txt b/.gitlab-ci/expectations/host/virgl-gles-skips.txt
new file mode 100644
index 00000000..e7ff1c8b
--- /dev/null
+++ b/.gitlab-ci/expectations/host/virgl-gles-skips.txt
@@ -0,0 +1,170 @@
+glx@.*
+
+# Skip because we don't care for fp64 for now
+spec@arb_gpu_shader_fp64@.*
+
+# Skip TS tests for now
+spec@arb_tessellation_shader@.*
+
+# Skip, this is expected
+# Refer to src/mesa/main/drawpix.c:100
+spec@ext_texture_integer@fbo-integer
+
+# Fails on iris too
+spec@arb_direct_state_access@gettextureimage-formats
+
+spec@nv_primitive_restart@primitive-restart-draw-mode-polygon
+spec@nv_primitive_restart@primitive-restart-draw-mode-quad_strip
+spec@nv_primitive_restart@primitive-restart-draw-mode-quads
+spec@ext_framebuffer_multisample@clip-and-scissor-blit.*
+
+# Skip any fp64 tests, this is not supported on GLES hosts
+spec@glsl-4.*@*dmat*
+spec@glsl-4.*@*dvec*
+spec@glsl-4.*@*double*
+spec@arb_enhanced_layouts@execution@component-layout@vs-gs-fs-double
+
+# GLES doesn't support VS array inputs
+spec@arb_enhanced_layouts@execution@component-layout@vs-attribs-array
+
+# GLES doesn't support more than one stream
+spec@arb_enhanced_layouts@gs-stream-location-aliasing
+
+# All these tests use a RGBA32F RW image and this is not supported on GLES
+# so skip the tests
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@layer
+spec@arb_shader_image_load_store@layer@image1DArray/layered binding test
+spec@arb_shader_image_load_store@layer@image1DArray/non-layered binding test
+spec@arb_shader_image_load_store@layer@image1D/layered binding test
+spec@arb_shader_image_load_store@layer@image1D/non-layered binding test
+spec@arb_shader_image_load_store@layer@image2DArray/layered binding test
+spec@arb_shader_image_load_store@layer@image2DArray/non-layered binding test
+spec@arb_shader_image_load_store@layer@image2D/layered binding test
+spec@arb_shader_image_load_store@layer@image2D/non-layered binding test
+spec@arb_shader_image_load_store@layer@image2DRect/layered binding test
+spec@arb_shader_image_load_store@layer@image2DRect/non-layered binding test
+spec@arb_shader_image_load_store@layer@image3D/layered binding test
+spec@arb_shader_image_load_store@layer@image3D/non-layered binding test
+spec@arb_shader_image_load_store@layer@imageBuffer/layered binding test
+spec@arb_shader_image_load_store@layer@imageBuffer/non-layered binding test
+spec@arb_shader_image_load_store@layer@imageCubeArray/layered binding test
+spec@arb_shader_image_load_store@layer@imageCubeArray/non-layered binding test
+spec@arb_shader_image_load_store@layer@imageCube/layered binding test
+spec@arb_shader_image_load_store@layer@imageCube/non-layered binding test
+spec@arb_shader_image_load_store@level@1DArray level binding test
+spec@arb_shader_image_load_store@level@1D level binding test
+spec@arb_shader_image_load_store@level@2DArray level binding test
+spec@arb_shader_image_load_store@level@2D level binding test
+spec@arb_shader_image_load_store@level@3D level binding test
+spec@arb_shader_image_load_store@level@CubeArray level binding test
+spec@arb_shader_image_load_store@level@Cube level binding test
+spec@arb_shader_image_load_store@level
diff --git a/.gitlab-ci/expectations/virt/deqp-venus.toml b/.gitlab-ci/expectations/virt/deqp-venus.toml
new file mode 100644
index 00000000..752d7eca
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/deqp-venus.toml
@@ -0,0 +1,6 @@
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = [ "/deqp/mustpass/vk-master.txt" ]
+deqp_args = [ "/deqp/external/vulkancts/modules/vulkan/deqp-vk" ]
+timeout = 30.0 # Starting lots of Crosvm instances simultaneously can take some time
+renderer_check = "Virtio-GPU Venus.*llvmpipe"
diff --git a/.gitlab-ci/expectations/virt/deqp-virgl-gl.toml b/.gitlab-ci/expectations/virt/deqp-virgl-gl.toml
new file mode 100644
index 00000000..10daea0d
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/deqp-virgl-gl.toml
@@ -0,0 +1,57 @@
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = ["/deqp/mustpass/gles2-master.txt"]
+deqp_args = [
+ "/deqp/modules/gles2/deqp-gles2",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
+version_check = "GL ES 3.2.*git"
+renderer_check = "virgl.*LLVMPIPE"
+
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = ["/deqp/mustpass/gles3-master.txt"]
+deqp_args = [
+ "/deqp/modules/gles3/deqp-gles3",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
+
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = ["/deqp/mustpass/gles31-master.txt"]
+deqp_args = [
+ "/deqp/modules/gles31/deqp-gles31",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
+
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = [
+ "/deqp/mustpass/gl30-master.txt",
+ "/deqp/mustpass/gl31-master.txt",
+ "/deqp/mustpass/gl32-master.txt",
+]
+deqp_args = [
+ "/deqp/external/openglcts/modules/glcts",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
diff --git a/.gitlab-ci/expectations/virt/deqp-virgl-gles.toml b/.gitlab-ci/expectations/virt/deqp-virgl-gles.toml
new file mode 100644
index 00000000..10daea0d
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/deqp-virgl-gles.toml
@@ -0,0 +1,57 @@
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = ["/deqp/mustpass/gles2-master.txt"]
+deqp_args = [
+ "/deqp/modules/gles2/deqp-gles2",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
+version_check = "GL ES 3.2.*git"
+renderer_check = "virgl.*LLVMPIPE"
+
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = ["/deqp/mustpass/gles3-master.txt"]
+deqp_args = [
+ "/deqp/modules/gles3/deqp-gles3",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
+
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = ["/deqp/mustpass/gles31-master.txt"]
+deqp_args = [
+ "/deqp/modules/gles31/deqp-gles31",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
+
+[[deqp]]
+deqp = "/install/crosvm-runner.sh"
+caselists = [
+ "/deqp/mustpass/gl30-master.txt",
+ "/deqp/mustpass/gl31-master.txt",
+ "/deqp/mustpass/gl32-master.txt",
+]
+deqp_args = [
+ "/deqp/external/openglcts/modules/glcts",
+ "--deqp-surface-width=256",
+ "--deqp-surface-height=256",
+ "--deqp-surface-type=pbuffer",
+ "--deqp-gl-config-name=rgba8888d24s8ms0",
+ "--deqp-visibility=hidden"
+]
+timeout = 360.0 # Starting 8 Crosvm instances simultaneously can take some time
diff --git a/.gitlab-ci/expectations/virt/traces-virgl.yml b/.gitlab-ci/expectations/virt/traces-virgl.yml
new file mode 100644
index 00000000..bb4d8b30
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/traces-virgl.yml
@@ -0,0 +1,311 @@
+traces-db:
+ download-url: "https://s3.freedesktop.org/mesa-tracie-public/"
+
+traces:
+ - path: glmark2/desktop:windows=4:effect=blur:blur-radius=5:passes=1:separable=true-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 2fc8433c4a38b796173bda2bcfb924cc
+ - path: glmark2/jellyfish-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 2112a9a5519f39483735509f2ccc61af
+ - path: glxgears/glxgears-2-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: f8eba0fec6e3e0af9cb09844bc73bdc8
+ - path: gputest/furmark-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 2762c809316c58d4eefad6677ecfcb2e
+ - path: gputest/pixmark-piano-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 0d875bda7edc01698342b157c6f51500
+ - path: gputest/triangle-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 7812de00011a3a059892e36cea19c696
+ - path: humus/Portals-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: b697edce7776f1afe294a7e80dfc013e
+ - path: 0ad/0ad-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 350e0cf64d124ba98d90106f61775eb4
+ - path: glmark2/buffer:update-fraction=0.5:update-dispersion=0.9:columns=200:update-method=map:interleave=false-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: f80431e56327354b4c88cc45c7e6633a
+ - path: glmark2/buffer:update-fraction=0.5:update-dispersion=0.9:columns=200:update-method=subdata:interleave=false-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 81e12bfa4ae3b7e63b01edbed71a5941
+ - path: glmark2/buffer:update-fraction=0.5:update-dispersion=0.9:columns=200:update-method=map:interleave=true-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 08e6d00fe3f4414ebfadc9e5f3c3bf0e
+ - path: glmark2/bump:bump-render=height-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 4d5211dfb0fd82a1a1dbb498dc2e5b8b
+ - path: glmark2/bump:bump-render=high-poly-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 4b4d4a4b7bb1341bbd0299c7eb3a6ac9
+ - path: glmark2/bump:bump-render=normals-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 832e5baf289b27dd84a665f1c85f57c2
+ - path: glmark2/conditionals:vertex-steps=0:fragment-steps=0-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: b78f28d97b675fcc7649cced3930650a
+ - path: glmark2/conditionals:vertex-steps=0:fragment-steps=5-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: d0782a516f06a6dddac4f1e1249f41e7
+ - path: glmark2/conditionals:vertex-steps=5:fragment-steps=0-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 1ae280a9c6cae495f2d272516a52167e
+ - path: glmark2/desktop:windows=4:effect=shadow-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: d4b3e8338327859a029c7267c9916524
+ - path: glmark2/effect2d:kernel=0,1,0;1,-4,1;0,1,0;-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 35584880539813436d87bfcbe22cf59b
+ - path: glmark2/effect2d:kernel=1,1,1,1,1;1,1,1,1,1;1,1,1,1,1;-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: b80963dae6ecf40c83bfb16943ef1011
+ - path: glmark2/function:fragment-steps=5:fragment-complexity=low-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: da10cb29cab30c5c068e722b5da7c2e5
+ - path: glmark2/function:fragment-steps=5:fragment-complexity=medium-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 8e40504d9f2ead8c0d02604291bff1b6
+ - path: glmark2/build:use-vbo=false-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 024fc485e1f33461313c956ab1b73bdf
+ - path: glmark2/build:use-vbo=true-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 48c45d16cd410a71aea1a12a73e257d3
+ - path: glmark2/ideas:speed=10000-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: db78cfb035213e31e1435b637b1a8f19
+ - path: glmark2/loop:vertex-steps=5:fragment-steps=5:fragment-loop=false-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 7fee2e864e015353ace431d51d41bb22
+ - path: glmark2/loop:vertex-steps=5:fragment-steps=5:fragment-uniform=false-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: c87127a5c3256c1fe7c79f7931b8f9df
+ - path: glmark2/loop:vertex-steps=5:fragment-steps=5:fragment-uniform=true-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 5fec1f728bda86891db4243130546187
+ - path: glmark2/pulsar:quads=5:texture=false:light=false-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 3e0e6675fb65e00f9128138ff08c2634
+ - path: glmark2/refract-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: cdadfee0518b964433d80c01329ec191
+ - path: glmark2/shading:shading=blinn-phong-inf-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 36b07dad759ca65e52f1abf1667e7ca8
+ - path: glmark2/shading:shading=cel-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: cb41cf2531a06d65f6e4f442ab62ae8d
+ - path: glmark2/shading:shading=gouraud-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 3e5469d5038d7cc94ef3549ce9d8c385
+ - path: glmark2/shading:shading=phong-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: e40abcbb4cfbbbfb499d4b0e6d668f41
+ - path: glmark2/shadow-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 2bb7290f8559ff93305c0e29f3d671e1
+ - path: glmark2/texture:texture-filter=linear-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 914fd8dddb23751d9a187a979d881abb
+ - path: glmark2/texture:texture-filter=mipmap-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: ea1939f3c4e8dd9cdbc26d41f9dc891a
+ - path: glmark2/texture:texture-filter=nearest-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 1ae652bdebd1188ab912a800a4c37166
+# Crash
+# - path: gputest/gimark-v2.trace
+# expectations:
+# - device: gl-virgl
+# checksum: 2cf40180a1315795389d0dfc18aad988
+ - path: gputest/pixmark-julia-fp32-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 8b3584b1dd8f1d1bb63205564bd78e4e
+ - path: gputest/pixmark-julia-fp64-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 73ccaff82ea764057fb0f93f0024cf84
+ - path: gputest/pixmark-volplosion-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: aef0b32ce99a3b25d35304ca08032833
+ - path: gputest/plot3d-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 817a36e53edccdf946061315596e9cdd
+# Times out
+# - path: gputest/tessmark-v2.trace
+# expectations:
+# - device: gl-virgl
+# checksum: 5d04b8d71517238b9bc8a527574e884b
+ - path: humus/AmbientAperture-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: b33fb8ee73b0c50b14822e170f15ab8a
+ - path: humus/CelShading-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 3629cba72bde53e4275a8365175fde83
+ - path: humus/DynamicBranching3-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 0236b28aa8b26fa60172d71bb040f2e9
+ - path: humus/HDR-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: eab0801aadeae87ce31aa0d4ff55e8f8
+ - path: humus/RaytracedShadows-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: df074a376fd3e7abc4dffdd191db8f4b
+ - path: humus/VolumetricFogging2-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 2eb71553403ad8e0171abc9dc25e5bc1
+ - path: itoral-gl-terrain-demo/demo-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 716d4fe36a6212b161285fed8a423ee8
+ - path: neverball/neverball-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: cc11743f008ccd76adf72695a423436a
+ - path: pathfinder/canvas_moire-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 25ba8f18274126670311bd3ffe058f74
+ - path: pathfinder/canvas_text_v2-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: a1446d0c42a78771240fca6f3b1e10d8
+ - path: pathfinder/demo-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 0702a66c415cfc13d5bae8bec08402cf
+ # host crashes with
+ # "src/mesa/main/arrayobj.c:800:_mesa_update_vao_derived_arrays: Assertion
+ # `attrib->_EffRelativeOffset < binding->Stride' failed. running these.
+ # - path: paraview/pv-manyspheres-v2.trace
+ # expectations:
+ # - device: gl-virgl
+ # checksum: b740377ea4bbb3becd304d1696a55247
+ # - path: paraview/pv-waveletcontour-v2.trace
+ # expectations:
+ # - device: gl-virgl
+ # checksum: db43c733f3f3d5253e263838e58d9111
+ - path: paraview/pv-waveletvolume-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: f4af4067b37c00861fa5911e4c0a6629
+ - path: supertuxkart/supertuxkart-mansion-egl-gles-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 092e8ca38e58aaa83df2a9f0b7b8aee5
+ - path: xonotic/xonotic-keybench-high-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: f3b184bf8858a6ebccd09e7ca032197e
+ - path: valve/counterstrike-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 3bc0e0e39cb3c29f6d76ff07f1f02860
+ - path: valve/counterstrike-source-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: f8e5b19142007be14ce6d18d25ef329d
+ - path: valve/half-life-2-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 6099a13f48bf090ee1d768f98208da70
+ - path: valve/portal-2-v2.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 7489a8412ee2bca45431d208e0006a3e
+# Piglit crashes when trying to run this one
+# - path: supertuxkart/supertuxkart-antediluvian-abyss.rdc
+# expectations:
+# - device: gl-virgl
+# checksum: 0
+# Piglit crashes when trying to run this one
+# - path: supertuxkart/supertuxkart-menu.rdc
+# expectations:
+# - device: gl-virgl
+# checksum: 0
+# Piglit crashes when trying to run this one
+# - path: supertuxkart/supertuxkart-ravenbridge-mansion.rdc
+# expectations:
+# - device: gl-virgl
+# checksum: 0
+ - path: godot/Material Testers.x86_64_2020.04.08_13.38_frame799.rdc
+ expectations:
+ - device: gl-virgl
+ checksum: 232eb48d6689c0117e3cc1660af7f32d
+ # ../src/mesa/main/arrayobj.c:800:_mesa_update_vao_derived_arrays: Assertion `attrib->_EffRelativeOffset < binding->Stride' failed.
+ #- path: ror/ror-default.trace
+ # expectations:
+ #- device: gl-virgl
+ #- path: nheko/nheko-colors.trace
+ # expectations:
+ #- device: gl-virgl
+ # checksum: 3a12c08087e16cfae4729f4e9d6c9387
+ #- path: blender/blender-demo-cube_diorama.trace
+ # expectations:
+ #- device: gl-virgl
+ #- path: blender/blender-demo-ellie_pose.trace
+ # expectations:
+ #- device: gl-virgl
+ #- path: freedoom/freedoom-phase2-gl-high.trace
+ # expectations:
+ #- device: gl-virgl
+ #- path: unvanquished/unvanquished-lowest.trace
+ # expectations:
+ #- device: gl-virgl
+ #- path: unvanquished/unvanquished-ultra.trace
+ # expectations:
+ #- device: gl-virgl
+ - path: warzone2100/warzone2100-default.trace
+ expectations:
+ - device: gl-virgl
+ checksum: 1fd3f9b5e5a711bdfac49dc03912e1de
diff --git a/.gitlab-ci/expectations/virt/venus-fails.txt b/.gitlab-ci/expectations/virt/venus-fails.txt
new file mode 100644
index 00000000..8916f9e2
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/venus-fails.txt
@@ -0,0 +1,15 @@
+# Failures likely due to lavapipe (i.e. the intersection of observed failures and lvp-fails.txt)
+dEQP-VK.glsl.crash_test.divbyzero_comp,Crash
+
+# Full Venus list
+dEQP-VK.multiview.queries.15_15_15_15,Fail
+dEQP-VK.multiview.queries.15,Fail
+dEQP-VK.multiview.queries.5_10_5_10,Fail
+dEQP-VK.multiview.renderpass2.queries.15_15_15_15,Fail
+dEQP-VK.multiview.renderpass2.queries.15,Fail
+dEQP-VK.multiview.renderpass2.queries.5_10_5_10,Fail
+dEQP-VK.pipeline.extended_dynamic_state.after_pipelines.enable_raster,Fail
+dEQP-VK.pipeline.extended_dynamic_state.before_draw.enable_raster,Fail
+dEQP-VK.pipeline.extended_dynamic_state.between_pipelines.enable_raster,Fail
+dEQP-VK.pipeline.extended_dynamic_state.cmd_buffer_start.enable_raster,Fail
+dEQP-VK.pipeline.extended_dynamic_state.two_draws_dynamic.enable_raster,Fail
diff --git a/.gitlab-ci/expectations/virt/venus-flakes.txt b/.gitlab-ci/expectations/virt/venus-flakes.txt
new file mode 100644
index 00000000..19891bfa
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/venus-flakes.txt
@@ -0,0 +1,8 @@
+dEQP-VK.synchronization.*16384
+dEQP-VK.synchronization.*262144
+dEQP-VK.spirv_assembly.instruction.graphics.64bit_compare.double.frag_opfordnotequal_nonan_vector
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.subgroups.ballot_broadcast.compute.subgroupbroadcast_uvec4
diff --git a/.gitlab-ci/expectations/virt/venus-skips.txt b/.gitlab-ci/expectations/virt/venus-skips.txt
new file mode 100644
index 00000000..e67e91bb
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/venus-skips.txt
@@ -0,0 +1,2 @@
+# These take so long that cause caselist batches to timeout
+dEQP-VK.pipeline.monolithic.* \ No newline at end of file
diff --git a/.gitlab-ci/expectations/virt/virgl-gl-fails.txt b/.gitlab-ci/expectations/virt/virgl-gl-fails.txt
new file mode 100644
index 00000000..fa9155f6
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/virgl-gl-fails.txt
@@ -0,0 +1,543 @@
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES2.functional.clipping.point.wide_point_clip,Fail
+dEQP-GLES2.functional.clipping.point.wide_point_clip_viewport_center,Fail
+dEQP-GLES2.functional.clipping.point.wide_point_clip_viewport_corner,Fail
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.default_framebuffer_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.default_framebuffer_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.fbo_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.global_state.vertex_tessellation_fragment.fbo_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.default_framebuffer_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.default_framebuffer_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.fbo_bbox_equal,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_draw.vertex_tessellation_fragment.fbo_bbox_larger,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_primitive.vertex_tessellation_fragment.default_framebuffer,Fail
+dEQP-GLES31.functional.primitive_bounding_box.wide_points.tessellation_set_per_primitive.vertex_tessellation_fragment.fbo,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_2,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES3.functional.clipping.point.wide_point_clip,Fail
+dEQP-GLES3.functional.clipping.point.wide_point_clip_viewport_center,Fail
+dEQP-GLES3.functional.clipping.point.wide_point_clip_viewport_corner,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_x,Fail
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_mat4x2_vs,Crash
+KHR-GL30.transform_feedback.api_errors_test,Fail
+KHR-GL31.transform_feedback.capture_special_interleaved_test,Crash
+KHR-GL32.transform_feedback_overflow_query_ARB.advanced-single-stream-interleaved-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.advanced-single-stream-separate-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.basic-single-stream-interleaved-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.basic-single-stream-separate-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.multiple-streams-multiple-buffers-per-stream,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.multiple-streams-one-buffer-per-stream,Fail
+
+
+fast_color_clear@fcc-front-buffer-distraction,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges,Fail
+shaders@point-vertex-id divisor,Fail
+shaders@point-vertex-id gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_instanceid,Fail
+shaders@point-vertex-id gl_vertexid divisor,Fail
+shaders@point-vertex-id gl_vertexid,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid,Fail
+spec@arb_blend_func_extended@arb_blend_func_extended-fbo-extended-blend-pattern_gles2,Fail
+spec@arb_clear_texture@arb_clear_texture-depth,Fail
+spec@arb_copy_image@arb_copy_image-formats,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-copypixels,Fail
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-drawpixels-24_8,Fail
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-drawpixels-32f_24_8_rev,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH32F_STENCIL8- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH_COMPONENT32F- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH32F_STENCIL8- swizzled- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32F- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT16- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT24- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT32- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT16- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT24- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32- swizzled- border color only,Fail
+spec@arb_direct_state_access@gettextureimage-targets,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_1,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_2,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_3,Crash
+spec@arb_es2_compatibility@texwrap formats bordercolor,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor@GL_RGB565- border color only,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled@GL_RGB565- swizzled- border color only,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glScissor,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glViewport,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-roundup-samples,Fail
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed,Crash
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-get,Fail
+spec@arb_occlusion_query@occlusion_query_conform,Fail
+spec@arb_occlusion_query@occlusion_query_conform@GetObjivAval_multi1,Fail
+spec@arb_occlusion_query@occlusion_query_meta_no_fragments,Fail
+spec@arb_occlusion_query@occlusion_query_meta_save,Fail
+spec@arb_point_sprite@arb_point_sprite-mipmap,Fail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex,Fail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex@'vs_input2[1][0]' on GL_PROGRAM_INPUT,Fail
+spec@arb_sample_shading@builtin-gl-sample-position 2,Fail
+spec@arb_shader_atomic_counter_ops@execution@add,Fail
+spec@arb_shader_atomic_counters@fragment-discard,Fail
+spec@arb_shader_image_load_store@early-z,Fail
+spec@arb_shader_image_load_store@early-z@occlusion query test/early-z pass,Fail
+spec@arb_shader_image_load_store@layer,Fail
+spec@arb_shader_image_load_store@layer@image2DMSArray/layered binding test,Fail
+spec@arb_shader_image_load_store@layer@image2DMSArray/non-layered binding test,Fail
+spec@arb_shader_image_load_store@layer@image2DMS/layered binding test,Fail
+spec@arb_shader_image_load_store@layer@image2DMS/non-layered binding test,Fail
+spec@arb_shader_image_load_store@max-images@Combined max image uniforms test,Fail
+spec@arb_shader_image_load_store@max-images,Fail
+spec@arb_shader_image_load_store@max-size,Fail
+spec@arb_shader_image_load_store@max-size@image2DMSArray max size test/4x8x8x2048,Fail
+spec@arb_shader_image_load_store@max-size@image2DMS max size test/4x16384x8x1,Fail
+spec@arb_shader_image_load_store@max-size@image2DMS max size test/4x8x16384x1,Fail
+spec@arb_shader_image_load_store@semantics,Fail
+spec@arb_shader_image_load_store@semantics@imageLoad/Vertex shader/rgba32f/image2DMSArray test,Fail
+spec@arb_shader_image_load_store@semantics@imageLoad/Vertex shader/rgba32f/image2DMS test,Fail
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomiccompswap-int,Fail
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicexchange-int,Fail
+spec@arb_shader_storage_buffer_object@maxblocks,Fail
+spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgrad,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_INTENSITY- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGBA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGB- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_INTENSITY- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB- swizzled- border color only,Fail
+spec@arb_texture_cube_map_array@fbo-generatemipmap-cubemap array s3tc_dxt1,Fail
+spec@arb_texture_float@fbo-blending-formats,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_RGB32F,Fail
+spec@arb_texture_float@fbo-clear-formats,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-fast-clear,Fail
+spec@arb_texture_float@multisample-fast-clear gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 2 gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 4 gl_arb_texture_float,Fail
+spec@arb_texture_float@texwrap formats bordercolor,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB32F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA32F- swizzled- border color only,Fail
+spec@arb_texture_rectangle@copyteximage rect,Fail
+spec@arb_texture_rectangle@copyteximage rect samples=2,Fail
+spec@arb_texture_rectangle@copyteximage rect samples=4,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor@GL_RGBA8- border color only,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R32F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG32F- swizzled- border color only,Fail
+spec@arb_texture_view@rendering-layers-image,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image1DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image2DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of imageCubeArray,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_0,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_2,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_any,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic@arb_transform_feedback_overflow_query-buffer_object_single,Fail
+spec@arb_transform_feedback_overflow_query@arb_transform_feedback_overflow_query-basic,Fail
+spec@egl 1.4@eglterminate then unbind context,Fail
+spec@egl_ext_device_base@conformance@configless_tests,Fail
+spec@egl_ext_device_base@conformance,Fail
+spec@egl_ext_device_base@conformance@pbuffer_tests,Fail
+spec@egl_ext_device_base@conformance@surfaceless_tests,Fail
+spec@egl_ext_protected_content@conformance,Fail
+spec@egl_khr_gl_image@egl_khr_gl_renderbuffer_image-clear-shared-image gl_depth_component24,Fail
+spec@egl_khr_surfaceless_context@viewport,Fail
+spec@ext_framebuffer_multisample@alpha-blending-after-rendering 2,Fail
+spec@ext_framebuffer_multisample@blit-mismatched-formats,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-edges,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-edges,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth single,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 inverted,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 non-inverted,Fail
+spec@ext_framebuffer_object@fbo-blending-format-quirks,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT24/GL_FLOAT,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT/GL_FLOAT,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-clear-and-render,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-rendering,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_ayuv,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_nv12,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_p010,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_p012,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_p016,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_uyvy,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_xyuv,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y210,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y212,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y216,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y412,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y416,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_yuv420,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_yuyv,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_yvu420,Fail
+spec@ext_packed_depth_stencil@fbo-depthstencil-gl_depth24_stencil8-copypixels,Fail
+spec@ext_packed_depth_stencil@fbo-depthstencil-gl_depth24_stencil8-drawpixels-24_8,Fail
+spec@ext_packed_depth_stencil@fbo-depthstencil-gl_depth24_stencil8-drawpixels-32f_24_8_rev,Fail
+spec@ext_packed_depth_stencil@readdrawpixels,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor@GL_DEPTH24_STENCIL8- border color only,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled@GL_DEPTH24_STENCIL8- swizzled- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor,Fail
+spec@ext_packed_float@texwrap formats bordercolor@GL_R11F_G11F_B10F- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled@GL_R11F_G11F_B10F- swizzled- border color only,Fail
+spec@ext_texture_array@fbo-generatemipmap-array s3tc_dxt1,Fail
+spec@ext_texture_array@gen-mipmap,Fail
+spec@ext_texture_array@getteximage-targets 1d_array,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets 2d_array s3tc,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets cube_array s3tc,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@fbo-blending,Fail
+spec@ext_texture_integer@multisample-fast-clear gl_ext_texture_integer,Fail
+spec@ext_texture_integer@texwrap formats bordercolor,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor@GL_RGB9_E5- border color only,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled@GL_RGB9_E5- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB- swizzled- border color only,Fail
+spec@ext_transform_feedback@builtin-varyings gl_culldistance,Fail
+spec@glsl-1.10@execution@samplers@glsl-fs-shadow2d-clamp-z,Fail
+spec@glsl-1.30@execution@texelfetch@fs-texelfetch-isampler1darray,Fail
+spec@glsl-1.30@execution@texelfetch@fs-texelfetch-sampler1darray,Fail
+spec@glsl-1.30@execution@texelfetch@fs-texelfetch-usampler1darray,Fail
+spec@glsl-1.30@execution@texelfetchoffset@fs-texelfetch-isampler1darray,Fail
+spec@glsl-1.30@execution@texelfetchoffset@fs-texelfetch-sampler1darray,Fail
+spec@glsl-1.30@execution@texelfetchoffset@fs-texelfetch-usampler1darray,Fail
+spec@glsl-1.30@execution@texelfetchoffset@vs-texelfetch-isampler1darray,Fail
+spec@glsl-1.30@execution@texelfetchoffset@vs-texelfetch-usampler1darray,Fail
+spec@glsl-1.30@execution@texelfetch@vs-texelfetch-isampler1darray,Fail
+spec@glsl-1.30@execution@texelfetch@vs-texelfetch-usampler1darray,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quads,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quad-strip,Fail
+spec@glsl-1.50@execution@variable-indexing@gs-input-array-float-index-rd,Fail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp,Fail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp@sRGB decode full precision,Fail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp,Fail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp@sRGB decode full precision,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp@sRGB decode full precision,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp,Fail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp@sRGB decode full precision,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color,Fail
+spec@nv_copy_image@nv_copy_image-formats,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@nv_read_depth@read_depth_gles3,Fail
+spec@!opengl 1.0@depth-clear-precision-check@depth32,Fail
+spec@!opengl 1.0@depth-clear-precision-check,Fail
+spec@!opengl 1.0@gl-1.0-drawbuffer-modes,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag-quads,Fail
+spec@!opengl 1.0@gl-1.0-swapbuffers-behavior,Fail
+spec@!opengl 1.0@rasterpos,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_gs_linked,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_tes_linked,Fail
+spec@!opengl 1.1@copypixels-draw-sync,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=4,Fail
+spec@!opengl 1.1@linestipple@Factor 2x,Fail
+spec@!opengl 1.1@linestipple@Factor 3x,Fail
+spec@!opengl 1.1@linestipple,Fail
+spec@!opengl 1.1@linestipple@Line loop,Fail
+spec@!opengl 1.1@linestipple@Line strip,Fail
+spec@!opengl 1.1@linestipple@Restarting lines within a single Begin-End block,Fail
+spec@!opengl 1.1@point-line-no-cull,Fail
+spec@!opengl 1.1@polygon-mode-facing,Fail
+spec@!opengl 1.1@polygon-mode,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset,Fail
+spec@!opengl 1.1@read-front clear-front-first,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=2,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=4,Crash
+spec@!opengl 1.1@read-front,Fail
+spec@!opengl 1.1@read-front samples=2,Crash
+spec@!opengl 1.1@read-front samples=4,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE6_ALPHA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_R3_G3_B2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10_A2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5_A1- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE6_ALPHA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_R3_G3_B2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10_A2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5_A1- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA8- swizzled- border color only,Fail
+spec@!opengl 1.1@windowoverlap,Fail
+spec@!opengl 1.2@copyteximage 3d,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.5@depth-tex-compare,Fail
+spec@!opengl 1.5@draw-elements-user,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag-immediate,Fail
+spec@!opengl 2.0@vertex-program-two-side front back back2@gs-out and fs,Fail
+spec@!opengl 2.0@vertex-program-two-side front back front2@gs-out and fs,Fail
+spec@!opengl 2.0@vertex-program-two-side front back@gs-out and fs,Fail
+spec@!opengl 2.0@vertex-program-two-side front front2 back2@gs-out and fs,Fail
+spec@!opengl 3.2@layered-rendering@clear-color-mismatched-layer-count,Fail
+spec@!opengl es 3.0@gles-3.0-transform-feedback-uniform-buffer-object,Fail
diff --git a/.gitlab-ci/expectations/virt/virgl-gl-flakes.txt b/.gitlab-ci/expectations/virt/virgl-gl-flakes.txt
new file mode 100644
index 00000000..1101b2cf
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/virgl-gl-flakes.txt
@@ -0,0 +1,81 @@
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8
+dEQP-GLES31.functional.ssbo.layout.3_level_unsized_array.shared.mat4
+dEQP-GLES31.functional.ssbo.layout.random.all_shared_buffer.36
+dEQP-GLES31.functional.ssbo.layout.random.arrays_of_arrays.1
+dEQP-GLES31.functional.ssbo.layout.random.nested_structs_arrays_instance_arrays.22
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_bool_mat3_vs
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_bool_mat4x3_vs
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_mat4x2_vs
+KHR-GL30.shaders30.glsl_constructors.bvec4_from_mat4x3_vs
+KHR-GL31.transform_feedback.capture_special_interleaved_test
+
+shaders@glsl-max-varyings
+shaders@glsl-max-varyings >max_varying_components
+shaders@glsl-uniform-interstage-limits@subdivide 5
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges
+spec@arb_compute_shader@local-id-explosion
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8UI/Destination: GL_R8I
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8UI/Destination: GL_R8UI
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RED/Destination: GL_RED
+spec@arb_fragment_layer_viewport@layer-gs-writes-in-range
+spec@arb_fragment_layer_viewport@viewport-gs-writes-in-range
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glScissor
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glViewport
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-roundup-samples
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed
+spec@arb_gpu_shader5@texturegatheroffset@fs-rgba-3-int-2drect-const
+spec@arb_gpu_shader_int64@execution@built-in-functions@gs-min-i64vec2-int64_t
+spec@arb_shader_atomic_counter_ops@execution@add
+spec@arb_shader_atomic_counters@fragment-discard
+spec@arb_shader_image_load_store@indexing
+spec@arb_shader_storage_buffer_object@execution@memory-layouts-struct-deref
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicadd-int
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicexchange-int
+spec@arb_texture_rg@fbo-rg-gl_rg
+spec@arb_timer_query@query gl_timestamp
+spec@arb_timer_query@timestamp-get
+spec@ext_timer_query@time-elapsed
+spec@ext_framebuffer_blit@fbo-blit-check-limits
+spec@ext_framebuffer_blit@fbo-sys-blit
+spec@ext_framebuffer_blit@fbo-sys-sub-blit
+spec@ext_framebuffer_object@fbo-stencil-gl_stencil_index16-drawpixels
+spec@ext_texture_snorm@multisample-formats 2 gl_ext_texture_snorm
+spec@ext_timer_query@time-elapsed
+spec@glsl-1.10@execution@glsl-fs-max-array-access-function
+spec@glsl-1.10@execution@variable-indexing@fs-uniform-array-mat2-row-rd
+spec@glsl-1.50@execution@built-in-functions@gs-op-bitor-neg-ivec4-int
+spec@glsl-1.50@execution@built-in-functions@gs-op-bitxor-neg-uint-uvec2
+spec@glsl-1.50@execution@built-in-functions@gs-op-sub-mat4x2-mat4x2
+spec@glsl-1.50@execution@texelfetchoffset@gs-texelfetch-usampler3d
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-dvec2
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dvec3
+spec@glsl-4.30@execution@built-in-functions@cs-op-bitand-not-uvec2-uvec2
+spec@glsl-4.30@execution@built-in-functions@cs-op-bitor-abs-not-int-ivec4
+spec@glsl-4.50@execution@ssbo-atomiccompswap-int
+spec@oes_texture_view@sampling-2d-array-as-2d-layer
+spec@oes_viewport_array@viewport-gs-writes-in-range
+spec@!opengl 1.0@gl-1.0-drawbuffer-modes
+spec@!opengl 1.0@gl-1.0-front-invalidate-back
+spec@!opengl 1.0@gl-1.0-polygon-line-aa
+spec@!opengl 1.0@gl-1.0-swapbuffers-behavior
+spec@!opengl 1.1@copypixels-draw-sync
+spec@!opengl 1.1@masked-clear
+spec@!opengl 1.1@polygon-mode-facing
+spec@!opengl 1.1@ppgtt_memory_alignment
+spec@!opengl 1.1@read-front
+spec@!opengl 1.1@read-front clear-front-first
+spec@!opengl 1.1@read-front clear-front-first samples=2
+spec@!opengl 1.1@read-front clear-front-first samples=4
+spec@!opengl 1.1@read-front samples=2
+spec@!opengl 1.1@read-front samples=4
+spec@!opengl 1.1@texsubimage-unpack
+spec@!opengl 2.0@tex3d-npot
+spec@!opengl 2.0@vertex-program-two-side front back back2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front back back2@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front back front2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front back front2@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front back@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front back@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front front2 back2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front front2 back2@vs- gs and fs
+spec@!opengl 3.0@gl30basic
diff --git a/.gitlab-ci/expectations/virt/virgl-gl-skips.txt b/.gitlab-ci/expectations/virt/virgl-gl-skips.txt
new file mode 100644
index 00000000..e9fef33e
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/virgl-gl-skips.txt
@@ -0,0 +1,75 @@
+# Sometimes crashes, e.g. https://gitlab.freedesktop.org/kusma/mesa/-/jobs/4109419
+dEQP-GLES31.functional.compute.basic.empty
+
+glx@.*
+
+# Skip because we don't care for fp64 for now
+spec@arb_gpu_shader_fp64@.*
+
+# Skip TS tests for now
+spec@arb_tessellation_shader@.*
+
+# Skip, this is expected
+# Refer to src/mesa/main/drawpix.c:100
+spec@ext_texture_integer@fbo-integer
+
+# Fails on iris too
+spec@arb_direct_state_access@gettextureimage-formats
+
+# Skip these as they get skipped with the Intel driver + vtest
+spec@arb_shader_texture_image_samples@builtin-image*
+
+# Skip for now
+spec@arb_vertex_attrib_64bit.*
+
+# Reported as crash, but no obvious crash
+spec@intel_shader_integer_functions2@execution@built-in-functions*
+spec@arb_vertex_program.*
+
+# Crashes when lowering GLSL to TGSI, but this is going away with the GLSL-NIR-TGSI lowering coming soon
+spec@glsl-4.00@execution@inout.*
+
+# Skip because they pass with the Intel driver
+spec@arb_shader_texture_image_samples@texturesamples@.*
+spec@nv_primitive_restart@primitive-restart-draw-mode-polygon
+spec@nv_primitive_restart@primitive-restart-draw-mode-quad_strip
+spec@nv_primitive_restart@primitive-restart-draw-mode-quads
+spec@glsl-4.00@execution@conversion.*
+spec@ext_framebuffer_multisample@clip-and-scissor-blit.*
+
+# Skip because they crash crosvm/virglrenderer
+spec@arb_pixel_buffer_object@texsubimage array pbo
+spec@arb_shader_image_load_store@invalid
+spec@arb_shader_image_load_store@layer
+spec@arb_shader_image_load_store@level
+spec@arb_shader_image_load_store@max-size
+spec@arb_shader_image_load_store@semantics
+spec@arb_shader_image_size@builtin
+spec@ext_gpu_shader4@execution@texelfetch@fs-texelfetch-isampler1darray
+spec@ext_gpu_shader4@execution@texelfetch@fs-texelfetch-sampler1darray
+spec@ext_gpu_shader4@execution@texelfetch@fs-texelfetch-usampler1darray
+spec@ext_gpu_shader4@execution@texelfetchoffset@fs-texelfetch-isampler1darray
+spec@ext_gpu_shader4@execution@texelfetchoffset@fs-texelfetch-sampler1darray
+spec@ext_gpu_shader4@execution@texelfetchoffset@fs-texelfetch-usampler1darray
+spec@ext_gpu_shader4@execution@texelfetchoffset@vs-texelfetch-isampler1darray
+spec@ext_gpu_shader4@execution@texelfetchoffset@vs-texelfetch-sampler1darray
+spec@ext_gpu_shader4@execution@texelfetchoffset@vs-texelfetch-usampler1darray
+spec@ext_gpu_shader4@execution@texelfetch@vs-texelfetch-isampler1darray
+spec@ext_gpu_shader4@execution@texelfetch@vs-texelfetch-sampler1darray
+spec@ext_gpu_shader4@execution@texelfetch@vs-texelfetch-usampler1darray
+spec@ext_texture_array@texsubimage array
+spec@glsl-1.30@execution@texelfetchoffset@vs-texelfetch-sampler1darray
+spec@glsl-1.30@execution@texelfetch@vs-texelfetch-sampler1darray
+spec@glsl-1.50@execution@interface-blocks-api-access-members
+spec@glsl-1.50@execution@texelfetch@gs-texelfetch-isampler1darray
+spec@glsl-1.50@execution@texelfetch@gs-texelfetch-sampler1darray
+spec@glsl-1.50@execution@texelfetch@gs-texelfetch-usampler1darray
+spec@glsl-1.50@execution@texelfetchoffset@gs-texelfetch-isampler1darray
+spec@glsl-1.50@execution@texelfetchoffset@gs-texelfetch-sampler1darray
+spec@glsl-1.50@execution@texelfetchoffset@gs-texelfetch-usampler1darray
+
+# Skip any fp64 tests, it's not working properly, and there is
+# no priority in fixing this
+spec@glsl-4.*@*dmat*
+spec@glsl-4.*@*dvec*
+spec@glsl-4.*@*double*
diff --git a/.gitlab-ci/expectations/virt/virgl-gles-fails.txt b/.gitlab-ci/expectations/virt/virgl-gles-fails.txt
new file mode 100644
index 00000000..03c690e7
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/virgl-gles-fails.txt
@@ -0,0 +1,3225 @@
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES2.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_pixel.multisample_texture_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_rbo_2,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_1,Fail
+dEQP-GLES31.functional.shaders.sample_variables.sample_mask_in.bit_count_per_two_samples.multisample_texture_2,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_center,Fail
+dEQP-GLES3.functional.clipping.line.wide_line_clip_viewport_corner,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_mag_reverse_src_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_x,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_dst_y,Fail
+dEQP-GLES3.functional.fbo.blit.rect.nearest_consistency_min_reverse_src_x,Fail
+KHR-GL30.transform_feedback.api_errors_test,Fail
+KHR-GL30.transform_feedback.draw_xfb_stream_instanced_test,Fail
+KHR-GL31.transform_feedback.draw_xfb_stream_instanced_test,Fail
+KHR-GL32.transform_feedback.draw_xfb_stream_instanced_test,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.advanced-single-stream-interleaved-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.advanced-single-stream-separate-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.basic-single-stream-interleaved-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.basic-single-stream-separate-attribs,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.multiple-streams-multiple-buffers-per-stream,Fail
+KHR-GL32.transform_feedback_overflow_query_ARB.multiple-streams-one-buffer-per-stream,Fail
+
+
+fast_color_clear@fcc-front-buffer-distraction,ExpectedFail
+shaders@glsl-fs-pointcoord,Fail
+shaders@glsl-novertexdata,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5,Fail
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges,Fail
+shaders@point-vertex-id divisor,Fail
+shaders@point-vertex-id gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_instanceid,Fail
+shaders@point-vertex-id gl_vertexid divisor,Fail
+shaders@point-vertex-id gl_vertexid,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid divisor,Fail
+shaders@point-vertex-id gl_vertexid gl_instanceid,Fail
+spec@arb_clear_texture@arb_clear_texture-depth,Fail
+spec@arb_clear_texture@arb_clear_texture-sized-formats,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-clear,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-drawpixels,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-getteximage,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-probepixel,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-readpixels,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-render,Fail
+spec@arb_color_buffer_float@gl_rgba8_snorm-render-fog,Fail
+spec@arb_compute_shader@execution@min-dvec4-double-large-group-size,Fail
+spec@arb_copy_image@arb_copy_image-formats,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_ALPHA16/Destination: GL_ALPHA16,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RED_RGTC1/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RED_RGTC1/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_BPTC_UNORM/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RGB_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_RG_RGTC2/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RED_RGTC1/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RED_RGTC1/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RG_RGTC2/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16I/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16_SNORM/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R16UI/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R32F/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8I/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8_SNORM/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_R8UI/Destination: GL_R8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16I/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16_SNORM/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG16UI/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32F/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32I/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG32UI/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8I/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8_SNORM/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RG8UI/Destination: GL_R16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16I/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16_SNORM/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB16UI/Destination: GL_RGB16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8I/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8_SNORM/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGB8UI/Destination: GL_RGB8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16I/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA16UI/Destination: GL_RGBA16_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32F/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32I/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA32UI/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8I/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8_SNORM/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_RGBA8UI/Destination: GL_RGBA8_SNORM,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH32F_STENCIL8- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor@GL_DEPTH_COMPONENT32F- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH32F_STENCIL8- swizzled- border color only,Fail
+spec@arb_depth_buffer_float@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32F- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT16- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT24- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor@GL_DEPTH_COMPONENT32- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT16- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT24- swizzled- border color only,Fail
+spec@arb_depth_texture@texwrap formats bordercolor-swizzled@GL_DEPTH_COMPONENT32- swizzled- border color only,Fail
+spec@arb_draw_indirect@arb_draw_indirect-draw-elements-prim-restart-ugly,Fail
+spec@arb_enhanced_layouts@linker@component-layout@intrastage-vs,Fail
+spec@arb_enhanced_layouts@linker@component-layout@vs-to-fs,Fail
+spec@arb_enhanced_layouts@matching_basic_types_3_loc_1,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_1,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_1_loc_1,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_2,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_2_loc_1,Fail
+spec@arb_enhanced_layouts@matching_fp64_types_3,Crash
+spec@arb_enhanced_layouts@matching_fp64_types_3_loc_1,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor@GL_RGB565- border color only,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled,Fail
+spec@arb_es2_compatibility@texwrap formats bordercolor-swizzled@GL_RGB565- swizzled- border color only,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@Basic,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@discard,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@fb resize,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@glScissor,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@glViewport,Fail
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-query@MS4,Fail
+spec@arb_framebuffer_object@fbo-blit-scaled-linear,ExpectedFail
+spec@arb_framebuffer_object@fbo-gl_pointcoord,Fail
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed,Crash
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-get,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-emitstreamvertex_nodraw,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-minmax,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-tf-wrong-stream-value,Fail
+spec@arb_gpu_shader5@arb_gpu_shader5-xfb-streams,Fail
+spec@arb_gpu_shader5@linker@stream-different-zero-gs-fs,Fail
+spec@arb_gpu_shader5@linker@stream-invalid-prim-output,Fail
+spec@arb_occlusion_query@occlusion_query_conform,Fail
+spec@arb_occlusion_query@occlusion_query_conform@GetObjivAval_multi1,Fail
+spec@arb_occlusion_query@occlusion_query_conform@GetObjivAval_multi2,Fail
+spec@arb_occlusion_query@occlusion_query,Fail
+spec@arb_occlusion_query@occlusion_query_meta_fragments,Fail
+spec@arb_occlusion_query@occlusion_query_meta_no_fragments,Fail
+spec@arb_occlusion_query@occlusion_query_meta_save,Fail
+spec@arb_point_parameters@arb_point_parameters-point-attenuation@Aliased combinations,Fail
+spec@arb_point_parameters@arb_point_parameters-point-attenuation@Antialiased combinations,Fail
+spec@arb_point_parameters@arb_point_parameters-point-attenuation,Fail
+spec@arb_point_sprite@arb_point_sprite-checkerboard,Fail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex,ExpectedFail
+spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex@'vs_input2[1][0]' on GL_PROGRAM_INPUT,ExpectedFail
+spec@arb_provoking_vertex@arb-provoking-vertex-render,Fail
+spec@arb_provoking_vertex@clipped-strip-first,Fail
+spec@arb_sample_shading@samplemask 2@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all,Fail
+spec@arb_sample_shading@samplemask 2 all@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2 all@noms partition,Fail
+spec@arb_sample_shading@samplemask 2 all@sample mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2,Fail
+spec@arb_sample_shading@samplemask 2@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 2@noms partition,Fail
+spec@arb_sample_shading@samplemask 2@sample mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@0.250000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@0.250000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@0.500000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@1.000000 mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all,Fail
+spec@arb_sample_shading@samplemask 4 all@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4 all@noms partition,Fail
+spec@arb_sample_shading@samplemask 4 all@sample mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4,Fail
+spec@arb_sample_shading@samplemask 4@noms mask_in_one,Fail
+spec@arb_sample_shading@samplemask 4@noms partition,Fail
+spec@arb_sample_shading@samplemask 4@sample mask_in_one,Fail
+spec@arb_seamless_cube_map@arb_seamless_cubemap,Fail
+spec@arb_shader_image_load_store@bitcast,Fail
+spec@arb_shader_image_load_store@bitcast@r11f_g11f_b10f to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@r32f to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@r32i to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@r32ui to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg16f to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg16i to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg16_snorm to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg16 to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg16ui to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg32f to rgba16_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg32i to rgba16_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rg32ui to rgba16_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rgb10_a2 to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rgb10_a2ui to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rgba16 to rgba16_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@bitcast@rgba8 to rgba8_snorm bitcast test,Fail
+spec@arb_shader_image_load_store@early-z,ExpectedFail
+spec@arb_shader_image_load_store@early-z@occlusion query test/early-z pass,ExpectedFail
+spec@arb_shader_image_load_store@early-z@occlusion query test/late-z pass,Fail
+spec@arb_shader_image_load_store@execution@disable_early_z,Fail
+spec@arb_shader_image_load_store@max-images@Combined max image uniforms test,Fail
+spec@arb_shader_image_load_store@max-images,Fail
+spec@arb_shader_image_load_store@max-size,Crash
+spec@arb_shader_image_load_store@max-size@image1D max size test/16384x1x1x1,Fail
+spec@arb_shader_image_load_store@restrict,Fail
+spec@arb_shader_image_load_store@restrict@no qualifier image aliasing test,Fail
+spec@arb_shader_image_load_store@semantics,Fail
+spec@arb_shader_image_load_store@semantics@imageStore/Vertex shader/rgba32f/image1D test,Fail
+spec@arb_shader_storage_buffer_object@execution@indirect,Fail
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomiccompswap-int,ExpectedFail
+spec@arb_shader_storage_buffer_object@maxblocks,Fail
+spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgradcube,ExpectedFail
+spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgrad,ExpectedFail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb 3d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *gradarb cube,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod 3d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *lod cube,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 1d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 2d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projgradarb 3d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 1d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 1d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 1dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 2d,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 2d_projvec4,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 2dshadow,Fail
+spec@arb_shader_texture_lod@execution@tex-miplevel-selection *projlod 3d,Fail
+spec@arb_texture_compression_bptc@compressedteximage gl_compressed_rgb_bptc_signed_float,Fail
+spec@arb_texture_compression_bptc@compressedteximage gl_compressed_rgb_bptc_unsigned_float,Fail
+spec@arb_texture_compression_bptc@compressedteximage gl_compressed_srgb_alpha_bptc_unorm,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT- swizzled- border color only,Fail
+spec@arb_texture_compression_bptc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_INTENSITY- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE_ALPHA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_LUMINANCE- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGBA- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor@GL_COMPRESSED_RGB- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_INTENSITY- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_LUMINANCE- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA- swizzled- border color only,Fail
+spec@arb_texture_compression@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB- swizzled- border color only,Fail
+spec@arb_texture_cube_map_array@arb_texture_cube_map_array-sampler-cube-array-shadow,Fail
+spec@arb_texture_cube_map_array@fbo-generatemipmap-cubemap array s3tc_dxt1,Fail
+spec@arb_texture_float@fbo-blending-formats,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-blending-formats@GL_RGB32F,Fail
+spec@arb_texture_float@fbo-clear-formats,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-clear-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA16F_ARB,Fail
+spec@arb_texture_float@fbo-colormask-formats@GL_ALPHA32F_ARB,Fail
+spec@arb_texture_float@fbo-fast-clear,Fail
+spec@arb_texture_float@multisample-fast-clear gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 2 gl_arb_texture_float,Fail
+spec@arb_texture_float@multisample-formats 4 gl_arb_texture_float,Fail
+spec@arb_texture_float@texwrap formats bordercolor,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_INTENSITY32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA16F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_LUMINANCE_ALPHA32F_ARB- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGB32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA16F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor@GL_RGBA32F- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_INTENSITY32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA16F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_LUMINANCE_ALPHA32F_ARB- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGB32F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA16F- swizzled- border color only,Fail
+spec@arb_texture_float@texwrap formats bordercolor-swizzled@GL_RGBA32F- swizzled- border color only,Fail
+spec@arb_texture_multisample@arb_texture_multisample-dsa-texelfetch,Fail
+spec@arb_texture_multisample@arb_texture_multisample-dsa-texelfetch@Texture type: GL_RGB9_E5,Fail
+spec@arb_texture_query_lod@execution@fs-texturequerylod-nearest-biased,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect bordercolor@GL_RGBA8- border color only,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor,Fail
+spec@arb_texture_rectangle@texwrap rect proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_R8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG16- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor@GL_RG8- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_R8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG16- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats bordercolor-swizzled@GL_RG8- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_R32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG16F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor@GL_RG32F- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_R32F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG16F- swizzled- border color only,Fail
+spec@arb_texture_rg@texwrap formats-float bordercolor-swizzled@GL_RG32F- swizzled- border color only,Fail
+spec@arb_texture_view@rendering-layers-image,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image1DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of image2DArray,Fail
+spec@arb_texture_view@rendering-layers-image@layers rendering of imageCubeArray,Fail
+spec@arb_vertex_attrib_64bit@arb_vertex_attrib_64bit-overlapping-locations api,Fail
+spec@arb_vertex_attrib_64bit@arb_vertex_attrib_64bit-overlapping-locations shader,Fail
+spec@arb_vertex_attrib_64bit@execution@vs-fp64-input-trunc,Fail
+spec@arb_vertex_attrib_64bit@execution@vs-fs-pass-vertex-attrib,Fail
+spec@arb_vertex_attrib_64bit@execution@vs-test-attrib-location,Fail
+spec@arb_vertex_buffer_object@pos-array,Fail
+spec@egl 1.4@eglterminate then unbind context,ExpectedFail
+spec@egl_ext_device_base@conformance@configless_tests,Fail
+spec@egl_ext_device_base@conformance,Fail
+spec@egl_ext_device_base@conformance@pbuffer_tests,Fail
+spec@egl_ext_device_base@conformance@surfaceless_tests,Fail
+spec@egl_ext_protected_content@conformance,ExpectedFail
+spec@egl_khr_gl_image@egl_khr_gl_renderbuffer_image-clear-shared-image gl_depth_component24,ExpectedFail
+spec@egl_khr_surfaceless_context@viewport,ExpectedFail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT,Fail
+spec@ext_direct_state_access@compressedmultiteximage gl_compressed_rgb_bptc_signed_float,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage2DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureImage3DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage2DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE_AND_EXECUTE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT + display list GL_COMPILE,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float@CompressedTextureSubImage3DEXT,Fail
+spec@ext_direct_state_access@compressedtextureimage gl_compressed_rgb_bptc_signed_float,Fail
+spec@ext_direct_state_access@renderbuffer,Fail
+spec@ext_direct_state_access@renderbuffer@GetNamedRenderbufferParameterivEXT,Fail
+spec@ext_framebuffer_multisample@alpha-blending-after-rendering 2,Fail
+spec@ext_framebuffer_multisample@alpha-to-coverage-no-draw-buffer-zero 2,Fail
+spec@ext_framebuffer_multisample@alpha-to-one-dual-src-blend 2,Fail
+spec@ext_framebuffer_multisample@alpha-to-one-dual-src-blend 4,Fail
+spec@ext_framebuffer_multisample@blit-flipped 2 x,Fail
+spec@ext_framebuffer_multisample@blit-flipped 2 y,Fail
+spec@ext_framebuffer_multisample@blit-mismatched-formats,Fail
+spec@ext_framebuffer_multisample@draw-buffers-alpha-to-coverage 2,Fail
+spec@ext_framebuffer_multisample@draw-buffers-alpha-to-one 2,Fail
+spec@ext_framebuffer_multisample@draw-buffers-alpha-to-one 4,Fail
+spec@ext_framebuffer_multisample@enable-flag,Fail
+spec@ext_framebuffer_multisample@fast-clear,Fail
+spec@ext_framebuffer_multisample@formats 2,Fail
+spec@ext_framebuffer_multisample@formats 4,Fail
+spec@ext_framebuffer_multisample@formats all_samples,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 2 centroid-edges,ExpectedFail
+spec@ext_framebuffer_multisample@interpolation 2 non-centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 2 non-centroid-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 centroid-edges,ExpectedFail
+spec@ext_framebuffer_multisample@interpolation 4 non-centroid-deriv-disabled,Fail
+spec@ext_framebuffer_multisample@interpolation 4 non-centroid-disabled,Fail
+spec@ext_framebuffer_multisample@line-smooth 2,Fail
+spec@ext_framebuffer_multisample@multisample-blit 2 color,Fail
+spec@ext_framebuffer_multisample@multisample-blit 2 depth,Fail
+spec@ext_framebuffer_multisample@multisample-blit 2 stencil,Fail
+spec@ext_framebuffer_multisample@multisample-blit 4 depth,Fail
+spec@ext_framebuffer_multisample@multisample-blit 4 stencil,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth combined,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth-computed combined,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 2 depth single,Fail
+spec@ext_framebuffer_multisample@no-color 2 stencil combined,Fail
+spec@ext_framebuffer_multisample@no-color 2 stencil single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth combined,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth-computed combined,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth-computed single,Fail
+spec@ext_framebuffer_multisample@no-color 4 depth single,Fail
+spec@ext_framebuffer_multisample@no-color 4 stencil combined,Fail
+spec@ext_framebuffer_multisample@no-color 4 stencil single,Fail
+spec@ext_framebuffer_multisample@point-smooth 2,Fail
+spec@ext_framebuffer_multisample@polygon-smooth 2,Fail
+spec@ext_framebuffer_multisample@sample-alpha-to-one 2,Fail
+spec@ext_framebuffer_multisample@sample-alpha-to-one 4,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 inverted,Fail
+spec@ext_framebuffer_multisample@sample-coverage 2 non-inverted,Fail
+spec@ext_framebuffer_multisample@unaligned-blit 2 color downsample,Fail
+spec@ext_framebuffer_multisample@unaligned-blit 2 color msaa,Fail
+spec@ext_framebuffer_multisample@upsample 2 stencil,Fail
+spec@ext_framebuffer_multisample@upsample 4 stencil,Fail
+spec@ext_framebuffer_object@fbo-blending-formats,Fail
+spec@ext_framebuffer_object@fbo-blending-formats@GL_ALPHA12,Fail
+spec@ext_framebuffer_object@fbo-blending-formats@GL_ALPHA16,Fail
+spec@ext_framebuffer_object@fbo-blending-snorm,Fail
+spec@ext_framebuffer_object@fbo-clear-formats,Fail
+spec@ext_framebuffer_object@fbo-clear-formats@GL_ALPHA12,Fail
+spec@ext_framebuffer_object@fbo-clear-formats@GL_ALPHA16,Fail
+spec@ext_framebuffer_object@fbo-colormask-formats,Fail
+spec@ext_framebuffer_object@fbo-colormask-formats@GL_ALPHA12,Fail
+spec@ext_framebuffer_object@fbo-colormask-formats@GL_ALPHA16,Fail
+spec@ext_framebuffer_object@fbo-fast-clear,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT24/GL_FLOAT,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT32/GL_UNSIGNED_INT,Fail
+spec@ext_framebuffer_object@fbo-readpixels-depth-formats@GL_DEPTH_COMPONENT/GL_FLOAT,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-clear-and-render,Fail
+spec@ext_framebuffer_object@getteximage-formats init-by-rendering,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_ayuv,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_nv12,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_p010,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_p012,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_p016,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_uyvy,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_xyuv,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y210,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y212,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y216,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y412,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_y416,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_yuv420,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_yuyv,Fail
+spec@ext_image_dma_buf_import@ext_image_dma_buf_import-sample_yvu420,Fail
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 585 d=z24_s8,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor@GL_DEPTH24_STENCIL8- border color only,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_depth_stencil@texwrap formats bordercolor-swizzled@GL_DEPTH24_STENCIL8- swizzled- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor,Fail
+spec@ext_packed_float@texwrap formats bordercolor@GL_R11F_G11F_B10F- border color only,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled,Fail
+spec@ext_packed_float@texwrap formats bordercolor-swizzled@GL_R11F_G11F_B10F- swizzled- border color only,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-dlist@call,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-dlist@compile and execute,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-dlist,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw_gles2,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw_gles2@negative clamp,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw_gles2@positive clamp,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw@negative clamp,Fail
+spec@ext_polygon_offset_clamp@ext_polygon_offset_clamp-draw@positive clamp,Fail
+spec@ext_provoking_vertex@provoking-vertex,Fail
+spec@ext_texture_array@fbo-generatemipmap-array rgb9_e5,Fail
+spec@ext_texture_array@fbo-generatemipmap-array s3tc_dxt1,Fail
+spec@ext_texture_array@gen-mipmap,Fail
+spec@ext_texture_compression_rgtc@fbo-generatemipmap-formats-signed,Fail
+spec@ext_texture_compression_rgtc@fbo-generatemipmap-formats-signed@GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@ext_texture_compression_rgtc@fbo-generatemipmap-formats-signed@GL_COMPRESSED_SIGNED_RED_RGTC1 NPOT,Fail
+spec@ext_texture_compression_rgtc@rgtc-teximage-01,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RED_RGTC1- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor@GL_COMPRESSED_SIGNED_RG_RGTC2- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RED_RGTC1- swizzled- border color only,Fail
+spec@ext_texture_compression_rgtc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_SIGNED_RG_RGTC2- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets 2d_array s3tc,Fail
+spec@ext_texture_compression_s3tc@getteximage-targets cube_array s3tc,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGBA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_compression_s3tc@texwrap formats bordercolor-swizzled@GL_COMPRESSED_RGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@fbo-blending,Fail
+spec@ext_texture_integer@multisample-fast-clear gl_ext_texture_integer,Fail
+spec@ext_texture_integer@texwrap formats bordercolor,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA16UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA32UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8I_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor@GL_ALPHA8UI_EXT- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA16UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA32UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8I_EXT- swizzled- border color only,Fail
+spec@ext_texture_integer@texwrap formats bordercolor-swizzled@GL_ALPHA8UI_EXT- swizzled- border color only,Fail
+spec@ext_texture_lod_bias@lodbias,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor@GL_RGB9_E5- border color only,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_shared_exponent@texwrap formats bordercolor-swizzled@GL_RGB9_E5- swizzled- border color only,Fail
+spec@ext_texture_snorm@multisample-formats 2 gl_ext_texture_snorm,Fail
+spec@ext_texture_snorm@multisample-formats 4 gl_ext_texture_snorm,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_INTENSITY8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_LUMINANCE8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_R8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RG8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGB8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA16_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor@GL_RGBA8_SNORM- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_INTENSITY8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_R8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RG8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGB8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA16_SNORM- swizzled- border color only,Fail
+spec@ext_texture_snorm@texwrap formats bordercolor-swizzled@GL_RGBA8_SNORM- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SLUMINANCE8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8_ALPHA8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor@GL_SRGB8- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SLUMINANCE8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8_ALPHA8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats bordercolor-swizzled@GL_SRGB8- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SLUMINANCE- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SLUMINANCE- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_ALPHA- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB_S3TC_DXT1_EXT- swizzled- border color only,Fail
+spec@ext_texture_srgb@texwrap formats-s3tc bordercolor-swizzled@GL_COMPRESSED_SRGB- swizzled- border color only,Fail
+spec@ext_transform_feedback@builtin-varyings gl_culldistance,Fail
+spec@ext_transform_feedback@immediate-reuse-index-buffer,Fail
+spec@ext_transform_feedback@immediate-reuse-uniform-buffer,Fail
+spec@glsl-1.10@execution@samplers@glsl-fs-shadow2d-clamp-z,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 1d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 1dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 2d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 2dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() 3d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:texture() cube,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 1d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 1d_projvec4,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 1dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 2d,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 2d_projvec4,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 2dshadow,Fail
+spec@glsl-1.20@execution@tex-miplevel-selection gl2:textureproj 3d,Fail
+spec@glsl-1.30@execution@fs-texturelod-miplevels-biased,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cubearray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cubearrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cube,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texture() cubeshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad cubearray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad cube,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegrad cubeshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturegradoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod cubearray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelod cube,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection texturelodoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2darray,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2darrayshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureproj 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgrad 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojgradoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlod 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojlodoffset 3d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 1d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 1d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 1dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 2d,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 2d_projvec4,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 2dshadow,Fail
+spec@glsl-1.30@execution@tex-miplevel-selection textureprojoffset 3d,Fail
+spec@glsl-1.30@execution@vs-texturelod-miplevels-biased,Fail
+spec@glsl-1.50@built-in constants,Fail
+spec@glsl-1.50@built-in constants@gl_MaxGeometryOutputComponents,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_line_loop other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_line_strip_adjacency other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_line_strip other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_points other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_triangle_fan other,Fail
+spec@glsl-1.50@execution@geometry@primitive-id-restart gl_triangle_strip other,Fail
+spec@glsl-1.50@execution@geometry@tri-strip-ordering-with-prim-restart gl_triangle_strip_adjacency other,Fail
+spec@glsl-1.50@execution@geometry@tri-strip-ordering-with-prim-restart gl_triangle_strip other,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-first-vertex,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quads,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-quad-strip,Fail
+spec@glsl-1.50@execution@primitive-id-no-gs-strip-first-vertex,Fail
+spec@glsl-3.30@built-in constants,Fail
+spec@glsl-3.30@built-in constants@gl_MaxGeometryOutputComponents,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-abs-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-ceil-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec2-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec3-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec4-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-clamp-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-cross-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-determinant-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-determinant-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-determinant-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-distance-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-dot-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-equal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-equal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-equal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-faceforward-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-floor-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-fract-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-greaterthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inverse-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inverse-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inverse-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-inversesqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-length-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-lessthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-matrixcompmult-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-max-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-min-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-double-double-bool,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec2-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec3-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec4-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mix-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-mod-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-normalize-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-notequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-notequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-notequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-add-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-div-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat2x4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat3x4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dmat4x3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-mult-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-op-sub-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-outerproduct-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-reflect-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-refract-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-round-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-roundeven-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sign-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-double-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-smoothstep-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-sqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-step-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-transpose-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-double,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@fs-trunc-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-abs-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-ceil-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec2-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec3-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec4-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-clamp-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-cross-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-determinant-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-determinant-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-determinant-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-distance-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-dot-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-equal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-equal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-equal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-faceforward-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-floor-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-fract-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-greaterthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inverse-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inverse-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inverse-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-inversesqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-length-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-lessthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-matrixcompmult-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-max-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-min-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-double-double-bool,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec3-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec4-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mix-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-mod-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-normalize-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-notequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-notequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-notequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-add-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-div-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat2x4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat3x4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dmat4x3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-mult-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-op-sub-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-outerproduct-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-reflect-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-refract-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-round-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-roundeven-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sign-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-double-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-smoothstep-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-sqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-step-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-transpose-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-double,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@gs-trunc-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-abs-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-ceil-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec2-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec3-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec4-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-clamp-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-cross-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-determinant-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-determinant-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-determinant-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-distance-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-dot-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-equal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-equal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-equal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-faceforward-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-floor-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-fract-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-greaterthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inverse-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inverse-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inverse-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-inversesqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-length-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthan-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthan-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthan-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthanequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthanequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-lessthanequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-matrixcompmult-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-max-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-min-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-double-double-bool,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec2-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec3-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec4-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mix-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-mod-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-normalize-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-notequal-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-notequal-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-notequal-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-add-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-div-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat2x4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat3x4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dmat4x3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-mult-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat2x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat3x4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dmat4x3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-op-sub-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec2-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec2-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec3-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec3-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec4-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec4-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-outerproduct-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-reflect-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-dvec2-dvec2-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-dvec3-dvec3-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-refract-dvec4-dvec4-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-round-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-roundeven-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sign-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-double-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-dvec2-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-dvec3-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-smoothstep-dvec4-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-sqrt-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-double-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-dvec2-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-dvec3-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-step-dvec4-dvec4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat2x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat2x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat3x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat3x4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat4,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat4x2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-transpose-dmat4x3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-double,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-dvec2,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-dvec3,Fail
+spec@glsl-4.00@execution@built-in-functions@vs-trunc-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bool-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-bvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat2-mat2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat2x3-mat2x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat2x4-mat2x4,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat3-mat3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat3x2-mat3x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat3x4-mat3x4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat4-mat4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat4x2-mat4x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dmat4x3-mat4x3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-bool,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-float,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-int,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-double-uint,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-ivec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-uvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec2-vec2,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-ivec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-uvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec3-vec3,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-ivec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-uvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-explicit-dvec4-vec4,ExpectedFail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-float-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-int-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-ivec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-ivec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-ivec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat2-dmat2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat3-dmat3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat4-dmat4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-mat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uint-double,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-uvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-vec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-vec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@frag-conversion-implicit-vec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bool-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-bvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat2-mat2,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat2x3-mat2x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat2x4-mat2x4,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat3-mat3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat3x2-mat3x2,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat3x4-mat3x4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat4-mat4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat4x2-mat4x2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dmat4x3-mat4x3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-bool,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-float,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-int,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-double-uint,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-ivec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-uvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec2-vec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-ivec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-uvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec3-vec3,ExpectedFail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-ivec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-uvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-explicit-dvec4-vec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-float-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-int-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-ivec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-ivec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-ivec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat2-dmat2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat3-dmat3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat4-dmat4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-mat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uint-double,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-uvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-vec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-vec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@geom-conversion-implicit-vec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bool-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-bvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat2-mat2,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat2x3-mat2x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat2x4-mat2x4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat3-mat3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat3x2-mat3x2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat3x4-mat3x4,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat4-mat4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat4x2-mat4x2,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dmat4x3-mat4x3,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-bool,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-float,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-int,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-double-uint,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-bvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-ivec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-uvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec2-vec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-bvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-ivec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-uvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec3-vec3,ExpectedFail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-bvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-ivec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-uvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-explicit-dvec4-vec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-float-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-int-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-ivec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-ivec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-ivec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat2-dmat2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat2x3-dmat2x3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat2x4-dmat2x4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat3-dmat3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat3x2-dmat3x2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat3x4-dmat3x4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat4-dmat4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat4x2-dmat4x2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-mat4x3-dmat4x3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uint-double,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uvec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uvec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-uvec4-dvec4,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-vec2-dvec2,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-vec3-dvec3,Fail
+spec@glsl-4.00@execution@conversion@vert-conversion-implicit-vec4-dvec4,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-dvec3@2@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-float-and-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1@2-s2@2-s3@2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat3x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat4x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat4x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dmat4x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@3-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double@4-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double-float-double@2-float@3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec2@3-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec2@4-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@2@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@2@2-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3@4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@1-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@2-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@3-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@4-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@5-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float@6-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-float-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-float-float-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-float-float-float-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-float-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec2-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec3-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2@2-vec3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-float-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-s3@2-float-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-s3@2-vec3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-vec2-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@2-vec3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@3-double-float-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@3-dvec2-float-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2@3-dvec3-float-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-double-s3-float-s4-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-s2-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec2-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat2x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat2x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat2x4-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat3x2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dmat3x3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-double@3@2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-double-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dvec2-location-0,Crash
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s1-vec3-dvec3-location-0,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2@2-float-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2@2-vec2-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2@2-vec3-double,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-vec2-and-double@3@2,Fail
+spec@glsl-4.00@execution@inout@vs-out-fs-in-vec3-and-double@3@2,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@frag-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@geom-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.10@execution@conversion@vert-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@frag-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@geom-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat2-mat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat2x3-mat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat2x4-mat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat3-mat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat3x2-mat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat3x4-mat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat4-mat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat4x2-mat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dmat4x3-mat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-double-float-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dvec2-vec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dvec3-vec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-explicit-dvec4-vec4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-float-double-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat2-dmat2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat2x3-dmat2x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat2x4-dmat2x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat3-dmat3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat3x2-dmat3x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat3x4-dmat3x4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat4-dmat4-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat4x2-dmat4x2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-mat4x3-dmat4x3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-vec2-dvec2-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-vec3-dvec3-zero-sign,Fail
+spec@glsl-4.20@execution@conversion@vert-conversion-implicit-vec4-dvec4-zero-sign,Fail
+spec@khr_texture_compression_astc@array-gl@12x12 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gl@5x5 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gles@12x12 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gles@5x5 Block Dim,ExpectedFail
+spec@khr_texture_compression_astc@array-gles,ExpectedFail
+spec@khr_texture_compression_astc@array-gl,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles ldr,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-sd,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb-sd@sRGB skip decode,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gles srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl ldr,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-sd,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb-sd@sRGB skip decode,ExpectedFail
+spec@khr_texture_compression_astc@miptree-gl srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles ldr,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gles srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl ldr,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl ldr@LDR Profile,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb-fp@sRGB decode full precision,ExpectedFail
+spec@khr_texture_compression_astc@sliced-3d-miptree-gl srgb@sRGB decode,ExpectedFail
+spec@khr_texture_compression_astc@void-extent-dl-bug,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 0 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x223344ff,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color 1 0x76356278,Fail
+spec@nv_copy_depth_to_color@nv_copy_depth_to_color,Fail
+spec@nv_copy_image@nv_copy_image-formats,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_ALPHA16/Destination: GL_ALPHA16,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RED_RGTC1/Destination: GL_COMPRESSED_RED_RGTC1,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_BPTC_UNORM/Destination: GL_COMPRESSED_RGBA_BPTC_UNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT3_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_RGBA_S3TC_DXT5_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT/Destination: GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_RGB_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_RG_RGTC2/Destination: GL_COMPRESSED_RG_RGTC2,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RED_RGTC1/Destination: GL_COMPRESSED_SIGNED_RED_RGTC1,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SIGNED_RG_RGTC2/Destination: GL_COMPRESSED_SIGNED_RG_RGTC2,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM/Destination: GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT/Destination: GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT/Destination: GL_COMPRESSED_SRGB_S3TC_DXT1_EXT,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_DEPTH_COMPONENT24/Destination: GL_DEPTH_COMPONENT24,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_R16_SNORM/Destination: GL_R16_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_R8_SNORM/Destination: GL_R8_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGB16_SNORM/Destination: GL_RGB16_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGB8_SNORM/Destination: GL_RGB8_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGBA16_SNORM/Destination: GL_RGBA16_SNORM,Fail
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_RGBA8_SNORM/Destination: GL_RGBA8_SNORM,Fail
+spec@nv_primitive_restart@primitive-restart-disable_vbo,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_combined_vertex_and_index,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_index_only,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_separate_vertex_and_index,Fail
+spec@nv_primitive_restart@primitive-restart-vbo_vertex_only,Fail
+spec@nv_read_depth@read_depth_gles3,Fail
+spec@oes_egl_image_external_essl3@oes_egl_image_external_essl3,ExpectedFail
+spec@oes_egl_image_external_essl3@oes_egl_image_external_essl3@oes_egl_image_external_essl3_imageLoad,ExpectedFail
+spec@oes_egl_image_external_essl3@oes_egl_image_external_essl3@oes_egl_image_external_essl3_imageStore,ExpectedFail
+spec@oes_point_sprite@arb_point_sprite-checkerboard_gles1,Fail
+spec@!opengl 1.0@gl-1.0-drawbuffer-modes,ExpectedFail
+spec@!opengl 1.0@gl-1.0-edgeflag-const,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag,Fail
+spec@!opengl 1.0@gl-1.0-edgeflag-quads,Fail
+spec@!opengl 1.0@gl-1.0-logicop,Fail
+spec@!opengl 1.0@gl-1.0-logicop@GL_NOOP,Fail
+spec@!opengl 1.0@gl-1.0-logicop@GL_NOOP_MSAA,Fail
+spec@!opengl 1.0@gl-1.0-spot-light,Fail
+spec@!opengl 1.0@gl-1.0-swapbuffers-behavior,ExpectedFail
+spec@!opengl 1.1@clipflat,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glBegin/End(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawArrays(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_POLYGON)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUADS)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_QUAD_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_FAN)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLES)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CCW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_FILL)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: center top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: left top PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right bottom PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right middle PV: FIRST,Fail
+spec@!opengl 1.1@clipflat@glDrawElements(GL_TRIANGLE_STRIP)- glFrontFace(GL_CW)- glPolygonMode(GL_LINE)- quadrant: right top PV: FIRST,Fail
+spec@!opengl 1.1@depthstencil-default_fb-blit samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-blit samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-copypixels samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-24_8 samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-32f_24_8_rev samples=4,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-float-and-ushort samples=2,Fail
+spec@!opengl 1.1@depthstencil-default_fb-drawpixels-float-and-ushort samples=4,Fail
+spec@!opengl 1.1@draw-pixels,Fail
+spec@!opengl 1.1@line-flat-clip-color,Fail
+spec@!opengl 1.1@linestipple@Factor 2x,Fail
+spec@!opengl 1.1@linestipple@Factor 3x,Fail
+spec@!opengl 1.1@linestipple,Fail
+spec@!opengl 1.1@linestipple@Line loop,Fail
+spec@!opengl 1.1@linestipple@Line strip,Fail
+spec@!opengl 1.1@linestipple@Restarting lines within a single Begin-End block,Fail
+spec@!opengl 1.1@point-line-no-cull,Fail
+spec@!opengl 1.1@polygon-mode-facing,ExpectedFail
+spec@!opengl 1.1@polygon-mode,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on bottom edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on left edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 0: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 1: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 2: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on bottom edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on left edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on right edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 3: Expected white pixel on top edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on bottom edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on left edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on right edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 4: Expected white pixel on top edge,ExpectedFail
+spec@!opengl 1.1@polygon-mode-offset@config 5: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 5: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 5: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected blue pixel in center,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected white pixel on right edge,Fail
+spec@!opengl 1.1@polygon-mode-offset@config 6: Expected white pixel on top edge,Fail
+spec@!opengl 1.1@polygon-mode-offset,ExpectedFail
+spec@!opengl 1.0@rasterpos,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_gs_linked,Fail
+spec@!opengl 1.0@rasterpos@glsl_vs_tes_linked,Fail
+spec@!opengl 1.1@read-front clear-front-first,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=2,Crash
+spec@!opengl 1.1@read-front clear-front-first samples=4,Crash
+spec@!opengl 1.1@read-front,Crash
+spec@!opengl 1.1@read-front samples=2,Crash
+spec@!opengl 1.1@read-front samples=4,ExpectedFail
+spec@!opengl 1.1@teximage-colors gl_alpha16@Exact upload-download of GL_ALPHA16,Fail
+spec@!opengl 1.1@teximage-colors gl_r16_snorm@Exact upload-download of GL_R16_SNORM,Fail
+spec@!opengl 1.1@teximage-colors gl_r8_snorm@Exact upload-download of GL_R8_SNORM,Fail
+spec@!opengl 1.1@teximage-colors gl_rgb16_snorm@Exact upload-download of GL_RGB16_SNORM,Fail
+spec@!opengl 1.1@teximage-colors gl_rgb8_snorm@Exact upload-download of GL_RGB8_SNORM,Fail
+spec@!opengl 1.1@teximage-colors gl_rgba16_snorm@Exact upload-download of GL_RGBA16_SNORM,Fail
+spec@!opengl 1.1@teximage-colors gl_rgba8_snorm@Exact upload-download of GL_RGBA8_SNORM,Fail
+spec@!opengl 1.1@teximage-colors gl_rgba,Fail
+spec@!opengl 1.1@teximage-colors gl_rgba@GL_RGBA texture with GL_BGRA and GL_UNSIGNED_INT_2_10_10_10_REV,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 1d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor,Fail
+spec@!opengl 1.1@texwrap 2d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_INTENSITY8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16_ALPHA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4_ALPHA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE6_ALPHA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8_ALPHA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_LUMINANCE8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_R3_G3_B2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10_A2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB10- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5_A1- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB5- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGB8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA12- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA16- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA2- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA4- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_INTENSITY8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16_ALPHA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4_ALPHA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE6_ALPHA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8_ALPHA8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_LUMINANCE8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_R3_G3_B2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10_A2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB10- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5_A1- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB5- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGB8- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA12- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA16- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA2- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA4- swizzled- border color only,Fail
+spec@!opengl 1.1@texwrap formats bordercolor-swizzled@GL_RGBA8- swizzled- border color only,Fail
+spec@!opengl 1.1@windowoverlap,ExpectedFail
+spec@!opengl 1.2@copyteximage 3d,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d bordercolor@GL_RGBA8- border color only,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor,Fail
+spec@!opengl 1.2@texwrap 3d proj bordercolor@GL_RGBA8- projected- border color only,Fail
+spec@!opengl 1.4@gl-1.4-tex1d-2dborder,Fail
+spec@!opengl 1.4@tex-miplevel-selection-lod-bias,Fail
+spec@!opengl 1.5@depth-tex-compare,Fail
+spec@!opengl 1.5@draw-elements-user,Fail
+spec@!opengl 1.5@draw-vertices,Fail
+spec@!opengl 1.5@draw-vertices-user,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag,Fail
+spec@!opengl 2.0@gl-2.0-edgeflag-immediate,Fail
+spec@!opengl 2.0@gl-2.0-large-point-fs,Fail
+spec@!opengl 2.0@gl-2.0-vertexattribpointer,Fail
+spec@!opengl 2.0@occlusion-query-discard,Fail
+spec@!opengl 3.0@sampler-cube-shadow,Fail
+spec@!opengl 3.2@gl-3.2-adj-prims cull-back pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims cull-front pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims line cull-back pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims line cull-front pv-first,ExpectedFail
+spec@!opengl 3.2@gl-3.2-adj-prims pv-first,ExpectedFail
+spec@!opengl 3.2@layered-rendering@clear-color-mismatched-layer-count,ExpectedFail
+spec@!opengl 3.2@minmax,Fail
+spec@!opengl 3.2@pointsprite-coord,Fail
+spec@!opengl 3.2@pointsprite-origin,Fail
+spec@!opengl 3.3@minmax,Fail
+spec@!opengl es 3.0@gles-3.0-transform-feedback-uniform-buffer-object,Fail
diff --git a/.gitlab-ci/expectations/virt/virgl-gles-flakes.txt b/.gitlab-ci/expectations/virt/virgl-gles-flakes.txt
new file mode 100644
index 00000000..50087606
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/virgl-gles-flakes.txt
@@ -0,0 +1,158 @@
+dEQP-GLES31.functional.draw_buffers_indexed.random.max_implementation_draw_buffers.8
+dEQP-GLES31.functional.ssbo.layout.random.all_shared_buffer.36
+dEQP-GLES31.functional.ssbo.layout.random.arrays_of_arrays.1
+dEQP-GLES31.functional.ssbo.layout.random.nested_structs_arrays_instance_arrays.22
+dEQP-GLES31.functional.ssbo.layout.unsized_struct_array.per_block_buffer.shared_instance_array
+
+hiz@hiz-depth-read-fbo-d24-s0
+hiz@hiz-depth-read-window-stencil0
+hiz@hiz-stencil-read-window-depth0
+hiz@hiz-stencil-read-window-depth1
+shaders@glsl-fs-sampler-numbering
+shaders@glsl-max-varyings
+shaders@glsl-max-varyings >max_varying_components
+shaders@glsl-uniform-interstage-limits@subdivide 5
+shaders@glsl-uniform-interstage-limits@subdivide 5- statechanges
+spec@amd_shader_trinary_minmax@execution@built-in-functions@gs-max3-vec3-vec3-vec3
+spec@amd_shader_trinary_minmax@execution@built-in-functions@tcs-mid3-uvec2-uvec2-uvec2
+spec@amd_shader_trinary_minmax@execution@built-in-functions@vs-max3-vec3-vec3-vec3
+spec@arb_clip_control@arb_clip_control-clip-control
+spec@arb_compute_shader@local-id-explosion
+spec@arb_copy_image@arb_copy_image-formats@Source: GL_STENCIL_INDEX8/Destination: GL_STENCIL_INDEX8
+spec@arb_depth_buffer_float@depthstencil-render-miplevels 146 d=z32f_s8
+spec@arb_depth_buffer_float@depthstencil-render-miplevels 273 s=d=z32f_s8
+spec@arb_depth_buffer_float@depthstencil-render-miplevels 585 d=z32f_s8
+spec@arb_depth_buffer_float@fbo-depthstencil-gl_depth32f_stencil8-copypixels
+spec@arb_depth_texture@depthstencil-render-miplevels 146 d=z16
+spec@arb_depth_texture@depthstencil-render-miplevels 585 d=z16
+spec@arb_fragment_layer_viewport@layer-gs-writes-in-range
+spec@arb_fragment_layer_viewport@viewport-gs-writes-in-range
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@Basic
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glScissor
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@glViewport
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@MS4
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-atomic@Per-sample
+spec@arb_framebuffer_no_attachments@arb_framebuffer_no_attachments-roundup-samples
+spec@arb_framebuffer_object@arb_framebuffer_object-depth-stencil-blit stencil gl_stencil_index1
+spec@arb_framebuffer_srgb@blit renderbuffer linear_to_srgb upsample disabled clear
+spec@arb_get_texture_sub_image@arb_get_texture_sub_image-getcompressed
+spec@arb_gpu_shader5@texturegather@fs-r-0-uint-2drect
+spec@arb_gpu_shader5@texturegather@fs-rgb-1-unorm-2drect
+spec@arb_gpu_shader5@texturegatheroffset@fs-rgb-2-float-2drect-const
+spec@arb_gpu_shader5@texturegatheroffsets@vs-rg-0-float-2drect
+spec@arb_gpu_shader5@texturegatheroffsets@vs-rgb-0-int-2d
+spec@arb_gpu_shader5@texturegatheroffsets@vs-rgb-1-int-2d
+spec@arb_gpu_shader5@texturegatheroffset@vs-r-0-unorm-2darray
+spec@arb_gpu_shader5@texturegatheroffset@vs-rgb-2-uint-2drect-const
+spec@arb_shader_atomic_counter_ops@execution@add
+spec@arb_shader_atomic_counters@fragment-discard
+spec@arb_shader_atomic_counters@function-argument
+spec@arb_shader_image_load_store@atomicity
+spec@arb_shader_image_load_store@atomicity@imageAtomicAdd
+spec@arb_shader_image_load_store@atomicity@imageAtomicAnd
+spec@arb_shader_image_load_store@atomicity@imageAtomicCompSwap
+spec@arb_shader_image_load_store@atomicity@imageAtomicExchange
+spec@arb_shader_image_load_store@atomicity@imageAtomicMax
+spec@arb_shader_image_load_store@atomicity@imageAtomicMin
+spec@arb_shader_image_load_store@atomicity@imageAtomicOr
+spec@arb_shader_image_load_store@atomicity@imageAtomicXor
+spec@arb_shader_precision@fs-op-assign-div-vec3-float
+spec@arb_shader_storage_buffer_object@execution@memory-layouts-struct-deref
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicadd-int
+spec@arb_shader_storage_buffer_object@execution@ssbo-atomicexchange-int
+spec@arb_texture_cube_map@copyteximage cube samples=4
+spec@arb_texture_multisample@texelfetch fs sampler2dms 4 1x71-501x71
+spec@arb_texture_rg@texwrap formats-float bordercolor
+spec@arb_texture_view@rendering-target
+spec@arb_texture_view@rendering-target@1D view rendering
+spec@arb_timer_query@query gl_timestamp
+spec@arb_timer_query@timestamp-get
+spec@ext_timer_query@time-elapsed
+spec@arb_uniform_buffer_object@rendering-array
+spec@ext_framebuffer_blit@fbo-blit-check-limits
+spec@ext_framebuffer_blit@fbo-sys-blit
+spec@ext_framebuffer_blit@fbo-sys-sub-blit
+spec@ext_framebuffer_multisample@sample-alpha-to-coverage 2 depth
+spec@ext_framebuffer_object@fbo-fragcoord
+spec@ext_framebuffer_object@fbo-stencil-gl_stencil_index8-drawpixels
+spec@ext_gpu_shader4@execution@texelfetch@fs-texelfetch-isampler3d
+spec@ext_gpu_shader4@execution@texelfetchoffset@fs-texelfetch-sampler1d
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 146 ds=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 146 s=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 273 d=s=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 273 s=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 292 d=s=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 292 s=d=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 585 d=z24_s8
+spec@ext_packed_depth_stencil@depthstencil-render-miplevels 585 s=z24_s8
+spec@ext_packed_depth_stencil@fbo-clear-formats stencil
+spec@ext_packed_depth_stencil@fbo-clear-formats stencil@GL_DEPTH_STENCIL
+spec@ext_packed_depth_stencil@fbo-depth-gl_depth24_stencil8-tex1d
+spec@ext_texture_array@fbo-depth-array depth-draw
+spec@ext_texture_compression_s3tc@texwrap formats
+spec@ext_texture_compression_s3tc@texwrap formats@GL_COMPRESSED_RGB_S3TC_DXT1_EXT
+spec@ext_transform_feedback@builtin-varyings gl_color
+spec@glsl-1.10@execution@built-in-functions@fs-op-lt-float-float
+spec@glsl-1.10@execution@built-in-functions@vs-acos-float
+spec@glsl-1.10@execution@built-in-functions@vs-all-bvec2
+spec@glsl-1.10@execution@built-in-functions@vs-mix-vec4-vec4-vec4
+spec@glsl-1.10@execution@built-in-functions@vs-op-assign-add-ivec3-int
+spec@glsl-1.10@execution@built-in-functions@vs-op-div-vec2-float
+spec@glsl-1.10@execution@built-in-functions@vs-op-sub-vec2-float
+spec@glsl-1.10@execution@interpolation@interpolation-none-gl_backsecondarycolor-smooth-none
+spec@glsl-1.10@execution@variable-indexing@fs-temp-array-mat4-index-row-wr
+spec@glsl-1.20@execution@built-in-functions@fs-op-assign-mult-mat4x2-mat4
+spec@glsl-1.20@execution@built-in-functions@vs-op-eq-mat2x4-mat2x4
+spec@glsl-1.20@execution@built-in-functions@vs-op-mult-mat2x4-mat3x2
+spec@glsl-1.20@execution@const-builtin@glsl-const-builtin-fract
+spec@glsl-1.20@execution@uniform-initializer@fs-mat3
+spec@glsl-1.20@execution@uniform-initializer@vs-mat4-set-by-other-stage
+spec@glsl-1.20@execution@variable-indexing@fs-uniform-array-mat4-col-row-rd
+spec@glsl-1.20@execution@vs-outerproduct-mat4-ivec
+spec@glsl-1.30@execution@built-in-functions@fs-clamp-ivec3-int-int
+spec@glsl-1.30@execution@built-in-functions@fs-op-assign-lshift-ivec4-int
+spec@glsl-1.30@execution@built-in-functions@fs-op-assign-rshift-ivec2-uint
+spec@glsl-1.30@execution@built-in-functions@fs-op-bitand-not-abs-ivec3-int
+spec@glsl-1.30@execution@built-in-functions@fs-op-bitxor-abs-not-ivec2-int
+spec@glsl-1.30@execution@interpolation@interpolation-flat-gl_backsecondarycolor-smooth-none
+spec@glsl-1.30@execution@switch@fs-uint
+spec@glsl-1.30@execution@texelfetch fs sampler2darray 98x1x9-98x129x9
+spec@glsl-1.30@execution@texelfetch fs sampler3d 1x129x9-98x129x9
+spec@glsl-1.30@execution@texelfetch@vs-texelfetch-isampler2darray-swizzle
+spec@glsl-1.50@execution@built-in-functions@gs-fract-vec2
+spec@glsl-1.50@execution@built-in-functions@gs-fract-vec3
+spec@glsl-1.50@execution@built-in-functions@gs-op-assign-bitand-ivec3-int
+spec@glsl-1.50@execution@built-in-functions@gs-op-bitor-abs-not-ivec4-ivec4
+spec@glsl-1.50@execution@built-in-functions@gs-op-bitor-neg-int-ivec3
+spec@glsl-1.50@execution@built-in-functions@gs-op-uplus-ivec2
+spec@glsl-1.50@execution@texelfetchoffset@gs-texelfetch-isampler1darray
+spec@glsl-4.00@execution@inout@vs-out-fs-in-s2@2-vec3-dmat2x2
+spec@glsl-4.30@execution@built-in-functions@cs-op-add-mat2x4-float
+spec@glsl-4.30@execution@built-in-functions@cs-op-mult-uvec2-uint
+spec@glsl-4.30@execution@built-in-functions@cs-op-rshift-ivec2-int
+spec@glsl-4.30@execution@built-in-functions@cs-op-selection-bool-vec4-vec4
+spec@nv_copy_image@nv_copy_image-formats@Source: GL_DEPTH32F_STENCIL8/Destination: GL_DEPTH32F_STENCIL8
+spec@oes_viewport_array@viewport-gs-writes-in-range
+spec@!opengl 1.0@gl-1.0-blend-func
+spec@!opengl 1.0@gl-1.0-front-invalidate-back
+spec@!opengl 1.1@copypixels-draw-sync
+spec@!opengl 1.1@draw-sync
+spec@!opengl 1.1@getteximage-depth
+spec@!opengl 1.1@getteximage-depth@GL_TEXTURE_CUBE_MAP-GL_DEPTH24_STENCIL8
+spec@!opengl 1.1@masked-clear
+spec@!opengl 1.1@ppgtt_memory_alignment
+spec@!opengl 1.1@read-front clear-front-first
+spec@!opengl 1.1@read-front clear-front-first samples=2
+spec@!opengl 1.1@read-front clear-front-first samples=4
+spec@!opengl 1.1@read-front samples=2
+spec@!opengl 1.1@stencil-drawpixels
+spec@!opengl 2.0@vertex-program-two-side front back back2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front back back2@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front back front2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front back front2@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front back@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front back@vs- gs and fs
+spec@!opengl 2.0@vertex-program-two-side front front2 back2@gs-out and fs
+spec@!opengl 2.0@vertex-program-two-side front front2 back2@vs- gs and fs
+spec@!opengl 3.0@gl30basic
diff --git a/.gitlab-ci/expectations/virt/virgl-gles-skips.txt b/.gitlab-ci/expectations/virt/virgl-gles-skips.txt
new file mode 100644
index 00000000..c3ed979f
--- /dev/null
+++ b/.gitlab-ci/expectations/virt/virgl-gles-skips.txt
@@ -0,0 +1,181 @@
+glx@.*
+
+# Skip because we don't care for fp64 for now
+spec@arb_gpu_shader_fp64@.*
+
+# Skip TS tests for now
+spec@arb_tessellation_shader@.*
+
+# Skip, this is expected
+# Refer to src/mesa/main/drawpix.c:100
+spec@ext_texture_integer@fbo-integer
+
+# Fails on iris too
+spec@arb_direct_state_access@gettextureimage-formats
+
+spec@nv_primitive_restart@primitive-restart-draw-mode-polygon
+spec@nv_primitive_restart@primitive-restart-draw-mode-quad_strip
+spec@nv_primitive_restart@primitive-restart-draw-mode-quads
+spec@ext_framebuffer_multisample@clip-and-scissor-blit.*
+
+# Skip because they crash crosvm/virglrenderer
+spec@arb_shader_image_load_store@max-size
+spec@glsl-1.50@execution@interface-blocks-api-access-members
+
+# Skip slow tests on crosvm/virglrenderer (90-250 s)
+spec@glsl-1.30@execution@interpolation@interpolation-noperspective-gl_backsecondarycolor-flat-distance
+spec@glsl-1.30@execution@interpolation@interpolation-smooth-gl_backsecondarycolor-flat-fixed
+spec@glsl-1.50@execution@built-in-functions@gs-op-bitor-neg-int-ivec3
+spec@glsl-4.30@execution@built-in-functions@cs-op-selection-bool-vec4-vec4
+
+# Skip any fp64 tests, this is not supported on GLES hosts
+spec@glsl-4.*@*dmat*
+spec@glsl-4.*@*dvec*
+spec@glsl-4.*@*double*
+spec@arb_enhanced_layouts@execution@component-layout@vs-gs-fs-double
+
+# GLES doesn't support VS array inputs
+spec@arb_enhanced_layouts@execution@component-layout@vs-attribs-array
+
+# GLES doesn't support more than one stream
+spec@arb_enhanced_layouts@gs-stream-location-aliasing
+
+
+# All these tests use a RGBA32F RW image and this is not supported on GLES
+# so skip the tests
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/WaR/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Buffer update/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Framebuffer/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/WaR/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Pixel/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture update/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Transform feedback/WaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/one bit barrier test/16x16
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/one bit barrier test/64x64
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Atomic counter/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Element array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Image/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Indirect/RaW/one bit barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Texture fetch/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Uniform buffer/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@host-mem-barrier@Vertex array/RaW/full barrier test/4x4
+spec@arb_shader_image_load_store@layer
+spec@arb_shader_image_load_store@layer@image1DArray/layered binding test
+spec@arb_shader_image_load_store@layer@image1DArray/non-layered binding test
+spec@arb_shader_image_load_store@layer@image1D/layered binding test
+spec@arb_shader_image_load_store@layer@image1D/non-layered binding test
+spec@arb_shader_image_load_store@layer@image2DArray/layered binding test
+spec@arb_shader_image_load_store@layer@image2DArray/non-layered binding test
+spec@arb_shader_image_load_store@layer@image2D/layered binding test
+spec@arb_shader_image_load_store@layer@image2D/non-layered binding test
+spec@arb_shader_image_load_store@layer@image2DRect/layered binding test
+spec@arb_shader_image_load_store@layer@image2DRect/non-layered binding test
+spec@arb_shader_image_load_store@layer@image3D/layered binding test
+spec@arb_shader_image_load_store@layer@image3D/non-layered binding test
+spec@arb_shader_image_load_store@layer@imageBuffer/layered binding test
+spec@arb_shader_image_load_store@layer@imageBuffer/non-layered binding test
+spec@arb_shader_image_load_store@layer@imageCubeArray/layered binding test
+spec@arb_shader_image_load_store@layer@imageCubeArray/non-layered binding test
+spec@arb_shader_image_load_store@layer@imageCube/layered binding test
+spec@arb_shader_image_load_store@layer@imageCube/non-layered binding test
+spec@arb_shader_image_load_store@level@1DArray level binding test
+spec@arb_shader_image_load_store@level@1D level binding test
+spec@arb_shader_image_load_store@level@2DArray level binding test
+spec@arb_shader_image_load_store@level@2D level binding test
+spec@arb_shader_image_load_store@level@3D level binding test
+spec@arb_shader_image_load_store@level@CubeArray level binding test
+spec@arb_shader_image_load_store@level@Cube level binding test
+spec@arb_shader_image_load_store@level
diff --git a/.gitlab-ci/meson/build.sh b/.gitlab-ci/meson/build.sh
new file mode 100755
index 00000000..6d333cd4
--- /dev/null
+++ b/.gitlab-ci/meson/build.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+set -e
+set -o xtrace
+
+CROSS_FILE=/cross_file-"$CROSS".txt
+
+# We need to control the version of llvm-config we're using, so we'll
+# tweak the cross file or generate a native file to do so.
+if test -n "$LLVM_VERSION"; then
+ LLVM_CONFIG="llvm-config-${LLVM_VERSION}"
+ echo -e "[binaries]\nllvm-config = '`which $LLVM_CONFIG`'" > native.file
+ if [ -n "$CROSS" ]; then
+ sed -i -e '/\[binaries\]/a\' -e "llvm-config = '`which $LLVM_CONFIG`'" $CROSS_FILE
+ fi
+ $LLVM_CONFIG --version
+else
+ rm -f native.file
+ touch native.file
+fi
+
+# cross-xfail-$CROSS, if it exists, contains a list of tests that are expected
+# to fail for the $CROSS configuration, one per line. you can then mark those
+# tests in their meson.build with:
+#
+# test(...,
+# should_fail: meson.get_cross_property('xfail', '').contains(t),
+# )
+#
+# where t is the name of the test, and the '' is the string to search when
+# not cross-compiling (which is empty, because for amd64 everything is
+# expected to pass).
+if [ -n "$CROSS" ]; then
+ CROSS_XFAIL=.gitlab-ci/cross-xfail-"$CROSS"
+ if [ -s "$CROSS_XFAIL" ]; then
+ sed -i \
+ -e '/\[properties\]/a\' \
+ -e "xfail = '$(tr '\n' , < $CROSS_XFAIL)'" \
+ "$CROSS_FILE"
+ fi
+fi
+
+# Only use GNU time if available, not any shell built-in command
+case $CI_JOB_NAME in
+ # strace and wine don't seem to mix well
+ # ASAN leak detection is incompatible with strace
+ debian-mingw32-x86_64|*-asan*)
+ if test -f /usr/bin/time; then
+ MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
+ fi
+ ;;
+ *)
+ if test -f /usr/bin/time -a -f /usr/bin/strace; then
+ MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time-strace.sh
+ fi
+ ;;
+esac
+
+RET=0
+RESULTS_DIR=$(pwd)/results/${TEST_SUITE:-build}
+rm -rf _build
+
+meson _build --native-file=native.file \
+ --wrap-mode=nofallback \
+ ${CROSS+--cross "$CROSS_FILE"} \
+ -D prefix=$(pwd)/install \
+ -D libdir=lib \
+ -D buildtype=${BUILDTYPE:-debug} \
+ -D c_args="$(echo -n $C_ARGS)" \
+ -D cpp_args="$(echo -n $CPP_ARGS)" \
+ ${DRI_LOADERS} \
+ ${GALLIUM_ST} \
+ -D tests=true \
+ -D render-server=true \
+ -D render-server-worker=process \
+ -D venus-experimental=true \
+ --fatal-meson-warnings \
+ ${EXTRA_OPTION} && \
+pushd _build && \
+meson configure && \
+ninja -j ${FDO_CI_CONCURRENT:-4} install || {
+ RET=$?
+ mkdir -p ${RESULTS_DIR}
+ mv -f meson-logs/* ${RESULTS_DIR}/
+ popd
+ exit ${RET}
+}
+
+if [ -n "${TEST_SUITE}" ]; then
+ VRENDTEST_USE_EGL_SURFACELESS=1 ninja -j ${FDO_CI_CONCURRENT:-4} test || RET=$?
+ mkdir -p ${RESULTS_DIR}
+ mv -f meson-logs/testlog.txt ${RESULTS_DIR}/
+fi
+
+popd
+exit ${RET}
diff --git a/.gitlab-ci/meson/time-strace.sh b/.gitlab-ci/meson/time-strace.sh
new file mode 100755
index 00000000..d579529f
--- /dev/null
+++ b/.gitlab-ci/meson/time-strace.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+STRACEDIR=meson-logs/strace/$(for i in $@; do basename -z -- $i; echo -n _; done)
+
+mkdir -p $STRACEDIR
+
+# If the test times out, meson sends SIGTERM to this process.
+# Simply exec'ing "time" would result in no output from that in this case.
+# Instead, we need to run "time" in the background, catch the signals and
+# propagate them to the actual test process.
+
+/usr/bin/time -v strace -ff -tt -T -o $STRACEDIR/log "$@" &
+TIMEPID=$!
+STRACEPID=$(ps --ppid $TIMEPID -o pid=)
+TESTPID=$(ps --ppid $STRACEPID -o pid=)
+
+if test "x$TESTPID" != x; then
+ trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
+fi
+
+wait $TIMEPID
+EXITCODE=$?
+
+# Only keep strace logs if the test timed out
+rm -rf $STRACEDIR &
+
+exit $EXITCODE
diff --git a/.gitlab-ci/meson/time.sh b/.gitlab-ci/meson/time.sh
new file mode 100755
index 00000000..cde6bb71
--- /dev/null
+++ b/.gitlab-ci/meson/time.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# If the test times out, meson sends SIGTERM to this process.
+# Simply exec'ing "time" would result in no output from that in this case.
+# Instead, we need to run "time" in the background, catch the signals and
+# propagate them to the actual test process.
+
+/usr/bin/time -v "$@" &
+TIMEPID=$!
+TESTPID=$(ps --ppid $TIMEPID -o pid=)
+
+if test "x$TESTPID" != x; then
+ trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
+fi
+
+wait $TIMEPID
+exit $?
diff --git a/Android.bp b/Android.bp
index 1b3a2e81..d1c2647c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -63,47 +63,50 @@ cc_library {
host_supported: true,
cflags: [
"-DHAVE_CONFIG_H",
- "-Wno-null-pointer-arithmetic",
- "-Wno-macro-redefined",
- "-Wno-unused-function",
- "-Wno-incompatible-pointer-types-discards-qualifiers",
+ "-include prebuilt-intermediates/config.h",
"-Wno-unused-parameter",
],
c_std: "experimental",
local_include_dirs: [
"prebuilt-intermediates",
"src",
+ "src/drm",
"src/gallium/auxiliary",
"src/gallium/auxiliary/util",
"src/gallium/include",
+ "src/mesa",
+ "src/mesa/compat",
+ "src/mesa/pipe",
+ "src/mesa/util",
"src/venus",
],
srcs: [
"prebuilt-intermediates/src/u_format_table.c",
"src/gallium/auxiliary/cso_cache/cso_cache.c",
"src/gallium/auxiliary/cso_cache/cso_hash.c",
- "src/gallium/auxiliary/os/os_misc.c",
"src/gallium/auxiliary/tgsi/tgsi_build.c",
"src/gallium/auxiliary/tgsi/tgsi_dump.c",
+ "src/gallium/auxiliary/tgsi/tgsi_info.c",
"src/gallium/auxiliary/tgsi/tgsi_iterate.c",
"src/gallium/auxiliary/tgsi/tgsi_parse.c",
"src/gallium/auxiliary/tgsi/tgsi_sanity.c",
"src/gallium/auxiliary/tgsi/tgsi_scan.c",
- "src/gallium/auxiliary/tgsi/tgsi_text.c",
- "src/gallium/auxiliary/tgsi/tgsi_transform.c",
- "src/gallium/auxiliary/tgsi/tgsi_info.c",
"src/gallium/auxiliary/tgsi/tgsi_strings.c",
- "src/gallium/auxiliary/tgsi/tgsi_ureg.c",
+ "src/gallium/auxiliary/tgsi/tgsi_text.c",
"src/gallium/auxiliary/tgsi/tgsi_util.c",
- "src/gallium/auxiliary/util/u_bitmask.c",
- "src/gallium/auxiliary/util/u_cpu_detect.c",
- "src/gallium/auxiliary/util/u_debug.c",
"src/gallium/auxiliary/util/u_debug_describe.c",
"src/gallium/auxiliary/util/u_format.c",
"src/gallium/auxiliary/util/u_hash_table.c",
- "src/gallium/auxiliary/util/u_math.c",
"src/gallium/auxiliary/util/u_texture.c",
- "src/gallium/auxiliary/util/u_surface.c",
+ "src/mesa/util/anon_file.c",
+ "src/mesa/util/bitscan.c",
+ "src/mesa/util/hash_table.c",
+ "src/mesa/util/os_file.c",
+ "src/mesa/util/os_misc.c",
+ "src/mesa/util/ralloc.c",
+ "src/mesa/util/u_cpu_detect.c",
+ "src/mesa/util/u_debug.c",
+ "src/mesa/util/u_math.c",
"src/iov.c",
"src/virgl_context.c",
"src/virglrenderer.c",
@@ -130,6 +133,12 @@ cc_library {
],
},
linux_glibc: {
+ cflags: [
+ "-Wno-#warnings",
+ // FIXME: Figure out how to get C protos for asprintf,
+ // pthread_setname_np and sched_getcpu from glibc sysroot
+ "-Wno-error=implicit-function-declaration",
+ ],
// Avoid linking to another host copy of libdrm; this library will cause
// binary GPU drivers to be loaded from the host, which might be linked
// to a system copy of libdrm, which conflicts with the AOSP one
@@ -139,8 +148,21 @@ cc_library {
"libdrm",
],
},
+ linux_bionic: {
+ cflags: [
+ // Provide a C proto for memfd_create
+ "-D__USE_GNU",
+ "-DHAVE_MEMFD_CREATE=1",
+ ],
+ },
android: {
+ cflags: [
+ // Provide a C proto for memfd_create
+ "-D__USE_GNU",
+ "-DHAVE_MEMFD_CREATE=1",
+ ],
shared_libs: [
+ "libcutils",
"libdrm",
"liblog",
],
diff --git a/METADATA b/METADATA
index a517e629..0c2157a4 100644
--- a/METADATA
+++ b/METADATA
@@ -1,10 +1,9 @@
-name: "virglrenderer"
-description:
- "Virgil is a research project to investigate the possibility of creating a "
- "virtual 3D GPU for use inside qemu virtual machines, that allows the guest "
- "operating system to use the capabilities of the host GPU to accelerate 3D "
- "rendering."
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update virglrenderer
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+name: "virglrenderer"
+description: "Virgil is a research project to investigate the possibility of creating a virtual 3D GPU for use inside qemu virtual machines, that allows the guest operating system to use the capabilities of the host GPU to accelerate 3D rendering."
third_party {
url {
type: HOMEPAGE
@@ -14,7 +13,11 @@ third_party {
type: GIT
value: "https://anongit.freedesktop.org/git/virglrenderer.git"
}
- version: "master"
+ version: "0.10.4"
license_type: NOTICE
- last_upgrade_date { year: 2018 month: 4 day: 10 }
+ last_upgrade_date {
+ year: 2023
+ month: 1
+ day: 18
+ }
}
diff --git a/ci/.gitlab-ci.yml b/ci/.gitlab-ci.yml
deleted file mode 100644
index 1159f4fb..00000000
--- a/ci/.gitlab-ci.yml
+++ /dev/null
@@ -1,125 +0,0 @@
-variables:
- FDO_DISTRIBUTION_TAG: "2021-06-08"
- FDO_DISTRIBUTION_VERSION: bullseye
- FDO_UPSTREAM_REPO: "virgl/virglrenderer"
- TEST_IMAGE: "$CI_REGISTRY_IMAGE/debian/$FDO_DISTRIBUTION_VERSION:$FDO_DISTRIBUTION_TAG"
-
-include:
- - project: 'freedesktop/ci-templates'
- ref: 4a73f030d0602042cfa44ed94dc5e744b52f57aa
- file: '/templates/debian.yml'
-
-stages:
- - build
- - sanity test
- - test
-
-build docker image:
- stage: build
- extends: .fdo.container-ifnot-exists@debian
- stage: build
- variables:
- GIT_STRATEGY: none # no need to pull the whole tree for rebuilding the image
- FDO_DISTRIBUTION_EXEC: 'bash ci/build-container.sh'
- only:
- - branches
- - tags
- - merge_requests
- - schedules
-
-.tests_base:
- image: $TEST_IMAGE
- variables:
- NUM_THREADS: 4
- script:
- - ci/run_tests.sh $TEST_SUITE
- - echo -e "\nThat's all folks\n"
- only:
- - branches
- - tags
- - merge_requests
- - schedules
-
- artifacts:
- when: always
- paths:
- - results/
-
-.make_check_base:
- stage: sanity test
- extends: .tests_base
-
-mesa check meson:
- variables:
- TEST_SUITE: '--make-check-meson'
- extends: .make_check_base
-
-make check clang-fuzzer:
- variables:
- TEST_SUITE: '--make-check-clang-fuzzer'
- extends: .make_check_base
-
-make check trace-stderr:
- variables:
- TEST_SUITE: '--make-check-trace-stderr'
- extends: .make_check_base
-
-make check venus:
- variables:
- TEST_SUITE: '--make-check-venus'
- extends: .make_check_base
-
-.tests:
- stage: test
- extends: .tests_base
-
-piglit - gl host:
- variables:
- TEST_SUITE: '--piglit-gl'
- extends: .tests
-
-piglit - gles host:
- variables:
- TEST_SUITE: '--piglit-gles'
- extends: .tests
-
-cts gl - gl host:
- variables:
- TEST_SUITE: '--deqp-gl-gl-tests'
- extends: .tests
-
-cts gles2 - gl host:
- variables:
- TEST_SUITE: '--deqp-gl-gles2-tests'
- extends: .tests
-
-cts gles3 - gl host:
- variables:
- TEST_SUITE: '--deqp-gl-gles3-tests'
- extends: .tests
-
-cts gles31 - gl host:
- variables:
- TEST_SUITE: '--deqp-gl-gles31-tests'
- extends: .tests
-
-
-cts gl - gles host:
- variables:
- TEST_SUITE: '--deqp-gles-gl-tests'
- extends: .tests
-
-cts gles2 - gles host:
- variables:
- TEST_SUITE: '--deqp-gles-gles2-tests'
- extends: .tests
-
-cts gles3 - gles host:
- variables:
- TEST_SUITE: '--deqp-gles-gles3-tests'
- extends: .tests
-
-cts gles31 - gles host:
- variables:
- TEST_SUITE: '--deqp-gles-gles31-tests'
- extends: .tests
diff --git a/config.h.meson b/config.h.meson
index a16796f5..b25a86b2 100644
--- a/config.h.meson
+++ b/config.h.meson
@@ -1,17 +1,53 @@
#mesondefine VERSION
+#mesondefine _GNU_SOURCE
+#mesondefine VIRGL_RENDERER_UNSTABLE_APIS
+#mesondefine HAVE___BUILTIN_BSWAP32
+#mesondefine HAVE___BUILTIN_BSWAP64
+#mesondefine HAVE___BUILTIN_CLZ
+#mesondefine HAVE___BUILTIN_CLZLL
+#mesondefine HAVE___BUILTIN_EXPECT
+#mesondefine HAVE___BUILTIN_FFS
+#mesondefine HAVE___BUILTIN_FFSLL
+#mesondefine HAVE___BUILTIN_POPCOUNT
+#mesondefine HAVE___BUILTIN_POPCOUNTLL
+#mesondefine HAVE___BUILTIN_TYPES_COMPATIBLE_P
+#mesondefine HAVE___BUILTIN_UNREACHABLE
+#mesondefine HAVE_FUNC_ATTRIBUTE_CONST
+#mesondefine HAVE_FUNC_ATTRIBUTE_FLATTEN
+#mesondefine HAVE_FUNC_ATTRIBUTE_FORMAT
+#mesondefine HAVE_FUNC_ATTRIBUTE_MALLOC
+#mesondefine HAVE_FUNC_ATTRIBUTE_NORETURN
+#mesondefine HAVE_FUNC_ATTRIBUTE_PACKED
+#mesondefine HAVE_FUNC_ATTRIBUTE_PURE
+#mesondefine HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+#mesondefine HAVE_FUNC_ATTRIBUTE_UNUSED
+#mesondefine HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+#mesondefine HAVE_FUNC_ATTRIBUTE_WEAK
+#mesondefine HAVE_MEMFD_CREATE
+#mesondefine HAVE_STRTOK_R
+#mesondefine HAVE_TIMESPEC_GET
#mesondefine HAVE_SYS_UIO_H
#mesondefine HAVE_PTHREAD
+#mesondefine HAVE_PTHREAD_SETAFFINITY
#mesondefine HAVE_EPOXY_EGL_H
#mesondefine HAVE_EPOXY_GLX_H
+#mesondefine CHECK_GL_ERRORS
#mesondefine ENABLE_MINIGBM_ALLOCATION
#mesondefine ENABLE_VENUS
#mesondefine ENABLE_VENUS_VALIDATE
-#mesondefine HAVE_FUNC_ATTRIBUTE_VISIBILITY
+#mesondefine ENABLE_DRM
+#mesondefine ENABLE_DRM_MSM
+#mesondefine ENABLE_RENDER_SERVER
+#mesondefine ENABLE_RENDER_SERVER_WORKER_PROCESS
+#mesondefine ENABLE_RENDER_SERVER_WORKER_THREAD
+#mesondefine ENABLE_RENDER_SERVER_WORKER_MINIJAIL
+#mesondefine RENDER_SERVER_EXEC_PATH
#mesondefine HAVE_EVENTFD_H
#mesondefine HAVE_DLFCN_H
+#mesondefine ENABLE_VIDEO
#mesondefine ENABLE_TRACING
-#mesondefine PIPE_ARCH_LITTLE_ENDIAN
-#mesondefine PIPE_ARCH_BIG_ENDIAN
+#mesondefine UTIL_ARCH_LITTLE_ENDIAN
+#mesondefine UTIL_ARCH_BIG_ENDIAN
#mesondefine PIPE_ARCH_X86
#mesondefine PIPE_ARCH_X86_64
#mesondefine PIPE_ARCH_PPC
diff --git a/meson.build b/meson.build
index 9c878bd5..ddb74daa 100644
--- a/meson.build
+++ b/meson.build
@@ -23,9 +23,9 @@
project(
'virglrenderer', 'c',
- version: '0.9.0',
+ version: '0.10.4',
license : 'MIT',
- meson_version : '>= 0.46',
+ meson_version : '>= 0.53',
default_options : ['buildtype=release', 'b_ndebug=if-release',
'warning_level=3', 'c_std=gnu11']
)
@@ -37,58 +37,51 @@ project(
# interface age
# 3. If the ABI has changed in an incompatible way increment the binary_age
# and set revision and interface_age to zero
-
binary_age = 1
-interface_age = 5
-revision = 3
+interface_age = 7
+revision = 7
cc = meson.get_compiler('c')
-add_project_arguments('-DHAVE_CONFIG_H=1', language : 'c')
-add_project_arguments('-D_GNU_SOURCE=1', language : 'c')
-add_project_arguments('-DVIRGL_RENDERER_UNSTABLE_APIS', language : 'c')
+if cc.get_id() == 'gcc' and cc.version().version_compare('< 4.1')
+ error('When using GCC, version 4.1 or later is required.')
+endif
warnings = [
'-Werror=implicit-function-declaration',
'-Werror=missing-prototypes',
'-Wmissing-prototypes',
+ '-Werror=incompatible-pointer-types',
'-Werror=int-to-pointer-cast',
'-Wno-overlength-strings',
]
-foreach w : warnings
- if cc.has_argument(w)
- add_project_arguments(w, language : 'c')
- endif
-endforeach
+add_project_arguments(cc.get_supported_arguments(warnings), language : 'c')
flags = [
'-fvisibility=hidden',
]
-foreach f : flags
- if cc.has_argument(f)
- add_project_arguments(f, language : 'c')
- endif
-endforeach
+add_project_arguments(cc.get_supported_arguments(flags), language : 'c')
prog_python = import('python').find_installation('python3')
libdrm_dep = dependency('libdrm', version : '>=2.4.50')
thread_dep = dependency('threads')
epoxy_dep = dependency('epoxy', version: '>= 1.5.4')
-m_dep = cc.find_library('m')
+m_dep = cc.find_library('m', required : false)
conf_data = configuration_data()
-conf_data.set('VERSION', '0.8.1')
+conf_data.set('VERSION', meson.project_version())
+conf_data.set('_GNU_SOURCE', 1)
+conf_data.set('VIRGL_RENDERER_UNSTABLE_APIS', 1)
with_tracing = get_option('tracing')
if with_tracing != 'none'
- if not cc.compiles('void f(void* v){} int main () { void *dummy __attribute__((cleanup (f))) = 0;}')
- error('Tracing requires compiler support for __attribute__((cleanup))')
-endif
-
+ if not cc.compiles('void f(void* v){} int main () { void *dummy __attribute__((cleanup (f))) = 0;}')
+ error('Tracing requires compiler support for __attribute__((cleanup))')
+ endif
endif
if with_tracing == 'percetto'
@@ -116,8 +109,15 @@ if cc.has_header('dlfcn.h')
conf_data.set('HAVE_DLFCN_H', 1)
endif
-if cc.has_header('pthread.h')
- conf_data.set('HAVE_PTHREAD', 1)
+if thread_dep.found() and host_machine.system() != 'windows'
+ conf_data.set('HAVE_PTHREAD', 1)
+ if host_machine.system() != 'netbsd' and cc.has_function(
+ 'pthread_setaffinity_np',
+ dependencies : thread_dep,
+ prefix : '#include <pthread.h>',
+ args : '-D_GNU_SOURCE')
+ conf_data.set('HAVE_PTHREAD_SETAFFINITY', 1)
+ endif
endif
if cc.has_header('sys/eventfd.h')
@@ -128,10 +128,33 @@ if cc.has_header('sys/select.h')
conf_data.set('HAVE_SYS_SELECT_H', 1)
endif
+foreach b : ['bswap32', 'bswap64', 'clz', 'clzll', 'expect', 'ffs', 'ffsll',
+ 'popcount', 'popcountll', 'types_compatible_p', 'unreachable']
+ if cc.has_function(b)
+ conf_data.set('HAVE___BUILTIN_@0@'.format(b.to_upper()), 1)
+ endif
+endforeach
+
+supported_function_attributes = cc.get_supported_function_attributes([
+ 'const', 'flatten', 'format', 'malloc', 'noreturn', 'packed', 'pure',
+ 'returns_nonnull', 'unused', 'warn_unused_result', 'weak',
+])
+foreach a : supported_function_attributes
+ conf_data.set('HAVE_FUNC_ATTRIBUTE_@0@'.format(a.to_upper()), 1)
+endforeach
+
+foreach f : ['memfd_create', 'strtok_r', 'timespec_get']
+ if cc.has_function(f)
+ conf_data.set('HAVE_@0@'.format(f.to_upper()), 1)
+ endif
+endforeach
+
if host_machine.endian() == 'little'
- conf_data.set('PIPE_ARCH_LITTLE_ENDIAN', true)
+ conf_data.set('UTIL_ARCH_LITTLE_ENDIAN', 1)
+ conf_data.set('UTIL_ARCH_BIG_ENDIAN', 0)
elif host_machine.endian() == 'big'
- conf_data.set('PIPE_ARCH_BIG_ENDIAN', true)
+ conf_data.set('UTIL_ARCH_LITTLE_ENDIAN', 0)
+ conf_data.set('UTIL_ARCH_BIG_ENDIAN', 1)
else
error('It wasn\'t possible to figure out the endianess of the machine')
endif
@@ -184,7 +207,12 @@ if with_egl
if cc.has_header('epoxy/egl.h', dependencies: epoxy_dep) and epoxy_dep.get_pkgconfig_variable('epoxy_has_egl') == '1'
gbm_dep = dependency('gbm', version: '>= ' + _gbm_ver, required: require_egl)
have_egl = gbm_dep.found()
- conf_data.set('HAVE_EPOXY_EGL_H', 1)
+ if (have_egl)
+ conf_data.set('HAVE_EPOXY_EGL_H', 1)
+ else
+ assert(not require_egl,
+ 'egl was explicitely requested which requires gbm, and this is not available')
+ endif
else
assert(not require_egl,
'egl was explicitely requested but it is not supported by epoxy')
@@ -213,27 +241,58 @@ if with_venus
endif
endif
-if cc.compiles('void __attribute__((hidden)) func() {}')
- conf_data.set('HAVE_FUNC_ATTRIBUTE_VISIBILITY', 1)
+have_vla = not cc.has_header_symbol('stdlib.h', '__STDC_NO_VLA__')
+
+# drm/msm support requires the compiler to support VLA:
+with_drm_msm = have_vla and get_option('drm-msm-experimental')
+if with_drm_msm
+ conf_data.set('ENABLE_DRM', 1)
+ conf_data.set('ENABLE_DRM_MSM', 1)
+endif
+with_drm = with_drm_msm
+
+with_check_gl_errors = get_option('check-gl-errors')
+if with_check_gl_errors
+ conf_data.set('CHECK_GL_ERRORS', 1)
+endif
+
+with_render_server = get_option('render-server')
+with_render_server_worker = get_option('render-server-worker')
+render_server_install_dir = get_option('prefix') / get_option('libexecdir')
+if with_render_server
+ if not with_venus
+ error('render server makes no sense without venus currently')
+ endif
+
+ conf_data.set('ENABLE_RENDER_SERVER', 1)
+ conf_data.set('RENDER_SERVER_EXEC_PATH',
+ '"' + render_server_install_dir / 'virgl_render_server' + '"')
+
+ if with_render_server_worker == 'process'
+ conf_data.set('ENABLE_RENDER_SERVER_WORKER_PROCESS', 1)
+ elif with_render_server_worker == 'thread'
+ conf_data.set('ENABLE_RENDER_SERVER_WORKER_THREAD', 1)
+ elif with_render_server_worker == 'minijail'
+ conf_data.set('ENABLE_RENDER_SERVER_WORKER_MINIJAIL', 1)
+ minijail_dep = dependency('libminijail')
+ else
+ error('unknown render server worker ' + with_render_server_worker)
+ endif
+endif
+
+with_video = get_option('video')
+if with_video
+ conf_data.set('ENABLE_VIDEO', 1)
+ libva_dep = dependency('libva')
+ libvadrm_dep = dependency('libva-drm')
endif
configure_file(input : 'config.h.meson',
output : 'config.h',
configuration : conf_data)
-pkgconf_data = configuration_data()
-pkgconf_data.set('PACKAGE_VERSION', meson.project_version())
-pkgconf_data.set('prefix', get_option('prefix'))
-pkgconf_data.set('exec_prefix', '${prefix}')
-pkgconf_data.set('libdir', '${prefix}/' + get_option('libdir'))
-pkgconf_data.set('includedir', '${prefix}/' + get_option('includedir'))
-
-pkg_config = configure_file(input : 'virglrenderer.pc.in',
- output : 'virglrenderer.pc',
- configuration : pkgconf_data)
-
-install_data(pkg_config,
- install_dir: get_option('libdir') + '/pkgconfig')
+add_project_arguments('-imacros', meson.build_root() / 'config.h', language : 'c')
+add_project_arguments('-DHAVE_CONFIG_H=1', language : 'c')
inc_configuration = include_directories(['.', 'src'])
@@ -244,30 +303,28 @@ with_valgrind = get_option('valgrind')
subdir('src')
subdir('vtest')
+if with_render_server
+subdir('server')
+endif
+
if with_tests
+ assert(have_egl, 'Tests require EGL, but it is not available')
subdir('tests')
endif
-lines = [
- '',
- 'prefix: ' + get_option('prefix'),
- 'libdir: ' + get_option('libdir'),
- '',
- 'c_args: ' + (' ').join(get_option('c_args')),
- '',
-]
-
-lines += 'egl: ' + (have_egl ? 'yes' : 'no')
-lines += 'glx: ' + (have_glx ? 'yes' : 'no')
-lines += ''
-lines += 'minigbm_alloc: ' + (with_minigbm_allocation ? 'yes' : 'no' )
-lines += ''
-lines += 'venus: ' + (with_venus ? 'yes' : 'no' )
-lines += ''
-lines += 'tests: ' + (with_tests ? 'yes' : 'no' )
-lines += 'fuzzer: ' + (with_fuzzer ? 'yes' : 'no' )
-lines += 'tracing: ' + with_tracing
-
-indent = ' '
-summary = indent + ('\n' + indent).join(lines)
-message('\n\nConfiguration summary:\n@0@\n'.format(summary))
+summary({'prefix': get_option('prefix'),
+ 'libdir': get_option('libdir'),
+ }, section: 'Directories')
+summary({'c_args': (' ').join(get_option('c_args')),
+ 'egl': have_egl,
+ 'glx': have_glx,
+ 'minigbm_alloc': with_minigbm_allocation,
+ 'venus': with_venus,
+ 'drm-msm': with_drm_msm,
+ 'render server': with_render_server,
+ 'render server worker': with_render_server ? with_render_server_worker : 'none',
+ 'video': with_video,
+ 'tests': with_tests,
+ 'fuzzer': with_fuzzer,
+ 'tracing': with_tracing,
+ }, section: 'Configuration')
diff --git a/meson_options.txt b/meson_options.txt
index 52b8df42..cb774064 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -53,6 +53,45 @@ option(
)
option(
+ 'check-gl-errors',
+ type : 'boolean',
+ value : 'true',
+ description : 'treat host-side gl errors as fatal'
+)
+
+# NOTE: expecting some slight fencing changes between host and guest
+# once ring_idx is plumbed through crosvm (ie. synchronizing between
+# host CPU and guest CPU will be a separate fence timeline)
+option(
+ 'drm-msm-experimental',
+ type : 'boolean',
+ value : 'false',
+ description : 'enable support for msm drm native context'
+)
+
+option(
+ 'render-server',
+ type : 'boolean',
+ value : 'false',
+ description : 'enable support for render server'
+)
+
+option(
+ 'render-server-worker',
+ type : 'combo',
+ value : 'process',
+ choices : ['process', 'thread', 'minijail'],
+ description : 'how a context in render server is serviced'
+)
+
+option(
+ 'video',
+ type : 'boolean',
+ value : 'false',
+ description : 'enable support for hardware video acceleration'
+)
+
+option(
'tests',
type : 'boolean',
value : 'false',
diff --git a/prebuilt-intermediates/config.h b/prebuilt-intermediates/config.h
index a36becb7..763b7263 100644
--- a/prebuilt-intermediates/config.h
+++ b/prebuilt-intermediates/config.h
@@ -1,11 +1,78 @@
+// Updated using config.h.meson
+#define _GNU_SOURCE
+#define VIRGL_RENDERER_UNSTABLE_APIS 1
#define HAVE___BUILTIN_BSWAP32 1
#define HAVE___BUILTIN_BSWAP64 1
-#define HAVE_DLFCN_H 1
-#define HAVE_EPOXY_EGL_H 1
-#define HAVE_EVENTFD 1
-#define HAVE_FUNC_ATTRIBUTE_VISIBILITY 1
-#define HAVE_POSIX_MEMALIGN 1
-#define HAVE_PTHREAD 1
-#define HAVE_SYS_SELECT_H 1
+#define HAVE___BUILTIN_CLZ 1
+#define HAVE___BUILTIN_CLZLL 1
+#define HAVE___BUILTIN_EXPECT 1
+#define HAVE___BUILTIN_FFS 1
+#define HAVE___BUILTIN_FFSLL 1
+#define HAVE___BUILTIN_POPCOUNT 1
+#define HAVE___BUILTIN_POPCOUNTLL 1
+#define HAVE___BUILTIN_TYPES_COMPATIBLE_P 1
+#define HAVE___BUILTIN_UNREACHABLE 1
+#define HAVE_FUNC_ATTRIBUTE_CONST 1
+#define HAVE_FUNC_ATTRIBUTE_FLATTEN 1
+#define HAVE_FUNC_ATTRIBUTE_FORMAT 1
+#define HAVE_FUNC_ATTRIBUTE_MALLOC 1
+#define HAVE_FUNC_ATTRIBUTE_NORETURN 1
+#define HAVE_FUNC_ATTRIBUTE_PACKED 1
+#define HAVE_FUNC_ATTRIBUTE_PURE 1
+#define HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL 1
+#define HAVE_FUNC_ATTRIBUTE_UNUSED 1
+#define HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT 1
+#define HAVE_FUNC_ATTRIBUTE_WEAK 1
+// The glibc host toolchain lacks support for memfd, but bionic supports it,
+// so this define is enabled only for 'android:' in Android.bp
+//#define HAVE_MEMFD_CREATE 1
+#define HAVE_STRTOK_R 1
+#define HAVE_TIMESPEC_GET 1
#define HAVE_SYS_UIO_H 1
-#define VIRGL_RENDERER_UNSTABLE_APIS 1
+#define HAVE_PTHREAD 1
+
+// Currently must be disabled because ANDROID code in virglrenderer
+// is broken. This should be fixed upstream.
+//#define HAVE_PTHREAD_SETAFFINITY 1
+
+#define HAVE_EPOXY_EGL_H 1
+
+// No X11/GLX support
+//#define HAVE_EPOXY_GLX_H 1
+
+// Performance impacting
+//#define CHECK_GL_ERRORS 1
+
+// Avoid dependency on minigbm
+//#define ENABLE_MINIGBM_ALLOCATION 1
+
+// Disable experimental venus support (for now)
+//#define ENABLE_VENUS 1
+//#define ENABLE_VENUS_VALIDATE 1
+
+// Disable direct DRM support - only used by freedreno
+//#define ENABLE_DRM 1
+//#define ENABLE_DRM_MSM 1
+
+// Disable render server (for now)
+//#define ENABLE_RENDER_SERVER 1
+//#define ENABLE_RENDER_SERVER_WORKER_PROCESS 1
+//#define ENABLE_RENDER_SERVER_WORKER_THREAD 1
+//#define ENABLE_RENDER_SERVER_WORKER_MINIJAIL 1
+//#define RENDER_SERVER_EXEC_PATH 1
+
+#define HAVE_EVENTFD_H 1
+#define HAVE_DLFCN_H 1
+
+// Disable tracing - performance impacting
+//#define ENABLE_TRACING 1
+
+// Android only supports little endian on target and host
+#define UTIL_ARCH_LITTLE_ENDIAN 1
+#define UTIL_ARCH_BIG_ENDIAN 0
+
+// Architecture-specific CPU detection code
+//#define PIPE_ARCH_X86 1
+
+// Keep simple_mtx.h happy
+#define HAVE_LINUX_FUTEX_H 1
diff --git a/prebuilt-intermediates/src/u_format_table.c b/prebuilt-intermediates/src/u_format_table.c
index 58bd9431..58253bb0 100644
--- a/prebuilt-intermediates/src/u_format_table.c
+++ b/prebuilt-intermediates/src/u_format_table.c
@@ -69,7 +69,7 @@ util_format_b8g8r8a8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = b */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -84,7 +84,7 @@ util_format_b8g8r8a8_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Z, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -113,7 +113,7 @@ util_format_b8g8r8x8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = b */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -128,7 +128,7 @@ util_format_b8g8r8x8_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Z, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -157,7 +157,7 @@ util_format_a8r8g8b8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = r */
@@ -172,7 +172,7 @@ util_format_a8r8g8b8_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = b */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -201,7 +201,7 @@ util_format_x8r8g8b8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = r */
@@ -216,7 +216,7 @@ util_format_x8r8g8b8_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = b */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -245,7 +245,7 @@ util_format_a8b8g8r8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = b */
@@ -260,7 +260,7 @@ util_format_a8b8g8r8_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -289,7 +289,7 @@ util_format_x8b8g8r8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = b */
@@ -304,7 +304,7 @@ util_format_x8b8g8r8_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -333,7 +333,7 @@ util_format_r8g8b8x8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -348,7 +348,7 @@ util_format_r8g8b8x8_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -377,7 +377,7 @@ util_format_b5g5r5x1_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 1, 15}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 5, 10}, /* y = r */
@@ -392,7 +392,7 @@ util_format_b5g5r5x1_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 1, 15} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -421,7 +421,7 @@ util_format_b5g5r5a1_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 1, 15}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 5, 10}, /* y = r */
@@ -436,7 +436,7 @@ util_format_b5g5r5a1_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 1, 15} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -465,7 +465,7 @@ util_format_b4g4r4a4_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 12}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 8}, /* y = r */
@@ -480,7 +480,7 @@ util_format_b4g4r4a4_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 12} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -509,7 +509,7 @@ util_format_b4g4r4x4_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 4, 12}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 8}, /* y = r */
@@ -524,7 +524,7 @@ util_format_b4g4r4x4_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 4, 12} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -553,7 +553,7 @@ util_format_a4b4g4r4_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 12}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 8}, /* y = g */
@@ -568,7 +568,7 @@ util_format_a4b4g4r4_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 12} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -597,7 +597,7 @@ util_format_b5g6r5_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 5, 11}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 6, 5}, /* y = g */
@@ -612,7 +612,7 @@ util_format_b5g6r5_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -641,7 +641,7 @@ util_format_r10g10b10a2_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 10, 20}, /* y = b */
@@ -656,7 +656,7 @@ util_format_r10g10b10a2_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -685,7 +685,7 @@ util_format_r10g10b10x2_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 10, 20}, /* y = b */
@@ -700,7 +700,7 @@ util_format_r10g10b10x2_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -729,7 +729,7 @@ util_format_b10g10r10a2_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 10, 20}, /* y = r */
@@ -744,7 +744,7 @@ util_format_b10g10r10a2_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -773,7 +773,7 @@ util_format_b2g3r3_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 3, 5}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 3, 2}, /* y = g */
@@ -788,7 +788,7 @@ util_format_b2g3r3_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -895,7 +895,7 @@ util_format_l4a4_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 4}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 0}, /* y = rgb */
@@ -910,7 +910,7 @@ util_format_l4a4_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -939,7 +939,7 @@ util_format_l8a8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = rgb */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = a */
@@ -954,7 +954,7 @@ util_format_l8a8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -1061,7 +1061,7 @@ util_format_l16a16_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 16}, /* x = rgb */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 0}, /* y = a */
@@ -1076,7 +1076,7 @@ util_format_l16a16_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -1157,7 +1157,7 @@ util_format_l8a8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* x = rgb */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 0}, /* y = a */
@@ -1172,7 +1172,7 @@ util_format_l8a8_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -1279,7 +1279,7 @@ util_format_l16a16_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 16}, /* x = rgb */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 0}, /* y = a */
@@ -1294,7 +1294,7 @@ util_format_l16a16_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -1401,7 +1401,7 @@ util_format_l16a16_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 16}, /* x = rgb */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 0}, /* y = a */
@@ -1416,7 +1416,7 @@ util_format_l16a16_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -1523,7 +1523,7 @@ util_format_l32a32_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 32}, /* x = rgb */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 0}, /* y = a */
@@ -1538,7 +1538,7 @@ util_format_l32a32_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -1645,7 +1645,7 @@ util_format_l8a8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = rgb */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = a */
@@ -1660,7 +1660,7 @@ util_format_l8a8_srgb_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* sr */
UTIL_FORMAT_SWIZZLE_X, /* sg */
@@ -1689,7 +1689,7 @@ util_format_r8g8b8_srgb_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* y = g */
@@ -1704,7 +1704,7 @@ util_format_r8g8b8_srgb_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -1733,7 +1733,7 @@ util_format_r8g8b8a8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -1748,7 +1748,7 @@ util_format_r8g8b8a8_srgb_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -1777,7 +1777,7 @@ util_format_a8b8g8r8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = b */
@@ -1792,7 +1792,7 @@ util_format_a8b8g8r8_srgb_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* sr */
UTIL_FORMAT_SWIZZLE_Z, /* sg */
@@ -1821,7 +1821,7 @@ util_format_x8b8g8r8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = b */
@@ -1836,7 +1836,7 @@ util_format_x8b8g8r8_srgb_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* sr */
UTIL_FORMAT_SWIZZLE_Z, /* sg */
@@ -1865,7 +1865,7 @@ util_format_b8g8r8a8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = b */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -1880,7 +1880,7 @@ util_format_b8g8r8a8_srgb_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Z, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -1909,7 +1909,7 @@ util_format_b8g8r8x8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = b */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -1924,7 +1924,7 @@ util_format_b8g8r8x8_srgb_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Z, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -1953,7 +1953,7 @@ util_format_a8r8g8b8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = r */
@@ -1968,7 +1968,7 @@ util_format_a8r8g8b8_srgb_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = b */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* sr */
UTIL_FORMAT_SWIZZLE_Z, /* sg */
@@ -1997,7 +1997,7 @@ util_format_x8r8g8b8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = r */
@@ -2012,7 +2012,7 @@ util_format_x8r8g8b8_srgb_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = b */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* sr */
UTIL_FORMAT_SWIZZLE_Z, /* sg */
@@ -2041,7 +2041,7 @@ util_format_r8sg8sb8ux8u_norm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
TRUE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -2056,7 +2056,7 @@ util_format_r8sg8sb8ux8u_norm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -2085,7 +2085,7 @@ util_format_r10sg10sb10sa2u_norm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
TRUE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 10, 20}, /* y = b */
@@ -2100,7 +2100,7 @@ util_format_r10sg10sb10sa2u_norm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -2129,7 +2129,7 @@ util_format_r5sg5sb6u_norm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
TRUE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 6, 10}, /* x = b */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 5, 5}, /* y = g */
@@ -2144,7 +2144,7 @@ util_format_r5sg5sb6u_norm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Z, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -2277,7 +2277,7 @@ util_format_z24_unorm_s8_uint_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
TRUE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 24}, /* x = s */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 24, 0}, /* y = z */
@@ -2292,7 +2292,7 @@ util_format_z24_unorm_s8_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* z */
UTIL_FORMAT_SWIZZLE_X, /* s */
@@ -2321,7 +2321,7 @@ util_format_s8_uint_z24_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
TRUE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 24, 8}, /* x = z */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 0}, /* y = s */
@@ -2336,7 +2336,7 @@ util_format_s8_uint_z24_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* z */
UTIL_FORMAT_SWIZZLE_Y, /* s */
@@ -2365,7 +2365,7 @@ util_format_x24s8_uint_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 24}, /* x = s */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 24, 0}, /* y = x */
@@ -2380,7 +2380,7 @@ util_format_x24s8_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_NONE, /* z */
UTIL_FORMAT_SWIZZLE_X, /* s */
@@ -2409,7 +2409,7 @@ util_format_s8x24_uint_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 24, 8}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 0}, /* y = s */
@@ -2424,7 +2424,7 @@ util_format_s8x24_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_NONE, /* z */
UTIL_FORMAT_SWIZZLE_Y, /* s */
@@ -2453,7 +2453,7 @@ util_format_z24x8_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 24, 0}, /* y = z */
@@ -2468,7 +2468,7 @@ util_format_z24x8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* z */
UTIL_FORMAT_SWIZZLE_NONE, /* s */
@@ -2497,7 +2497,7 @@ util_format_x8z24_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 24, 8}, /* x = z */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 0}, /* y = x */
@@ -2512,7 +2512,7 @@ util_format_x8z24_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* z */
UTIL_FORMAT_SWIZZLE_NONE, /* s */
@@ -2541,7 +2541,7 @@ util_format_z32_float_s8x24_uint_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
TRUE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 32}, /* x = z */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 24, 8}, /* y = x */
@@ -2556,7 +2556,7 @@ util_format_z32_float_s8x24_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* z */
UTIL_FORMAT_SWIZZLE_Z, /* s */
@@ -2585,7 +2585,7 @@ util_format_x32_s8x24_uint_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 32, 32}, /* x = x */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 24, 8}, /* y = x */
@@ -2600,7 +2600,7 @@ util_format_x32_s8x24_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_NONE, /* z */
UTIL_FORMAT_SWIZZLE_Z, /* s */
@@ -2863,7 +2863,7 @@ util_format_r8g8bx_snorm_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* x = x */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 0}, /* y = y */
@@ -2878,7 +2878,7 @@ util_format_r8g8bx_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3479,7 +3479,7 @@ util_format_r64g64_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 64}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 0}, /* y = g */
@@ -3494,7 +3494,7 @@ util_format_r64g64_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3523,7 +3523,7 @@ util_format_r64g64b64_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 128}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 64}, /* y = g */
@@ -3538,7 +3538,7 @@ util_format_r64g64b64_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3567,7 +3567,7 @@ util_format_r64g64b64a64_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 192}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 128}, /* y = g */
@@ -3582,7 +3582,7 @@ util_format_r64g64b64a64_float_description = {
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 64, 192} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3637,7 +3637,7 @@ util_format_r32g32_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 0}, /* y = g */
@@ -3652,7 +3652,7 @@ util_format_r32g32_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3681,7 +3681,7 @@ util_format_r32g32b32_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 32}, /* y = g */
@@ -3696,7 +3696,7 @@ util_format_r32g32b32_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3725,7 +3725,7 @@ util_format_r32g32b32a32_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 64}, /* y = g */
@@ -3740,7 +3740,7 @@ util_format_r32g32b32a32_float_description = {
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3795,7 +3795,7 @@ util_format_r32g32_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 0}, /* y = g */
@@ -3810,7 +3810,7 @@ util_format_r32g32_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3839,7 +3839,7 @@ util_format_r32g32b32_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 32}, /* y = g */
@@ -3854,7 +3854,7 @@ util_format_r32g32b32_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3883,7 +3883,7 @@ util_format_r32g32b32a32_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 64}, /* y = g */
@@ -3898,7 +3898,7 @@ util_format_r32g32b32a32_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3953,7 +3953,7 @@ util_format_r32g32_uscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 0}, /* y = g */
@@ -3968,7 +3968,7 @@ util_format_r32g32_uscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -3997,7 +3997,7 @@ util_format_r32g32b32_uscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 32}, /* y = g */
@@ -4012,7 +4012,7 @@ util_format_r32g32b32_uscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4041,7 +4041,7 @@ util_format_r32g32b32a32_uscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 64}, /* y = g */
@@ -4056,7 +4056,7 @@ util_format_r32g32b32a32_uscaled_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4111,7 +4111,7 @@ util_format_r32g32_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 0}, /* y = g */
@@ -4126,7 +4126,7 @@ util_format_r32g32_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4155,7 +4155,7 @@ util_format_r32g32b32_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 32}, /* y = g */
@@ -4170,7 +4170,7 @@ util_format_r32g32b32_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4199,7 +4199,7 @@ util_format_r32g32b32a32_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 64}, /* y = g */
@@ -4214,7 +4214,7 @@ util_format_r32g32b32a32_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4269,7 +4269,7 @@ util_format_r32g32_sscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 0}, /* y = g */
@@ -4284,7 +4284,7 @@ util_format_r32g32_sscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4313,7 +4313,7 @@ util_format_r32g32b32_sscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 32}, /* y = g */
@@ -4328,7 +4328,7 @@ util_format_r32g32b32_sscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4357,7 +4357,7 @@ util_format_r32g32b32a32_sscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 64}, /* y = g */
@@ -4372,7 +4372,7 @@ util_format_r32g32b32a32_sscaled_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4427,7 +4427,7 @@ util_format_r16g16_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 0}, /* y = g */
@@ -4442,7 +4442,7 @@ util_format_r16g16_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4471,7 +4471,7 @@ util_format_r16g16b16_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 16}, /* y = g */
@@ -4486,7 +4486,7 @@ util_format_r16g16b16_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4515,7 +4515,7 @@ util_format_r16g16b16a16_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 32}, /* y = g */
@@ -4530,7 +4530,7 @@ util_format_r16g16b16a16_float_description = {
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4585,7 +4585,7 @@ util_format_r16g16_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 0}, /* y = g */
@@ -4600,7 +4600,7 @@ util_format_r16g16_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4629,7 +4629,7 @@ util_format_r16g16b16_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 16}, /* y = g */
@@ -4644,7 +4644,7 @@ util_format_r16g16b16_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4673,7 +4673,7 @@ util_format_r16g16b16a16_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 32}, /* y = g */
@@ -4688,7 +4688,7 @@ util_format_r16g16b16a16_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4743,7 +4743,7 @@ util_format_r16g16_uscaled_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 0}, /* y = g */
@@ -4758,7 +4758,7 @@ util_format_r16g16_uscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4787,7 +4787,7 @@ util_format_r16g16b16_uscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 16}, /* y = g */
@@ -4802,7 +4802,7 @@ util_format_r16g16b16_uscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4831,7 +4831,7 @@ util_format_r16g16b16a16_uscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 32}, /* y = g */
@@ -4846,7 +4846,7 @@ util_format_r16g16b16a16_uscaled_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4901,7 +4901,7 @@ util_format_r16g16_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 0}, /* y = g */
@@ -4916,7 +4916,7 @@ util_format_r16g16_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4945,7 +4945,7 @@ util_format_r16g16b16_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 16}, /* y = g */
@@ -4960,7 +4960,7 @@ util_format_r16g16b16_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -4989,7 +4989,7 @@ util_format_r16g16b16a16_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 32}, /* y = g */
@@ -5004,7 +5004,7 @@ util_format_r16g16b16a16_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5059,7 +5059,7 @@ util_format_r16g16_sscaled_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 0}, /* y = g */
@@ -5074,7 +5074,7 @@ util_format_r16g16_sscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5103,7 +5103,7 @@ util_format_r16g16b16_sscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 16}, /* y = g */
@@ -5118,7 +5118,7 @@ util_format_r16g16b16_sscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5147,7 +5147,7 @@ util_format_r16g16b16a16_sscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 32}, /* y = g */
@@ -5162,7 +5162,7 @@ util_format_r16g16b16a16_sscaled_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5217,7 +5217,7 @@ util_format_r8g8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = g */
@@ -5232,7 +5232,7 @@ util_format_r8g8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5261,7 +5261,7 @@ util_format_r8g8b8_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* y = g */
@@ -5276,7 +5276,7 @@ util_format_r8g8b8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5305,7 +5305,7 @@ util_format_r8g8b8a8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -5320,7 +5320,7 @@ util_format_r8g8b8a8_unorm_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5375,7 +5375,7 @@ util_format_r8g8_uscaled_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 0}, /* y = g */
@@ -5390,7 +5390,7 @@ util_format_r8g8_uscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5419,7 +5419,7 @@ util_format_r8g8b8_uscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 8}, /* y = g */
@@ -5434,7 +5434,7 @@ util_format_r8g8b8_uscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5463,7 +5463,7 @@ util_format_r8g8b8a8_uscaled_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 16}, /* y = g */
@@ -5478,7 +5478,7 @@ util_format_r8g8b8a8_uscaled_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5533,7 +5533,7 @@ util_format_r8g8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 0}, /* y = g */
@@ -5548,7 +5548,7 @@ util_format_r8g8_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5577,7 +5577,7 @@ util_format_r8g8b8_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* y = g */
@@ -5592,7 +5592,7 @@ util_format_r8g8b8_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5621,7 +5621,7 @@ util_format_r8g8b8a8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -5636,7 +5636,7 @@ util_format_r8g8b8a8_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5691,7 +5691,7 @@ util_format_r8g8_sscaled_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 0}, /* y = g */
@@ -5706,7 +5706,7 @@ util_format_r8g8_sscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5735,7 +5735,7 @@ util_format_r8g8b8_sscaled_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 8}, /* y = g */
@@ -5750,7 +5750,7 @@ util_format_r8g8b8_sscaled_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5779,7 +5779,7 @@ util_format_r8g8b8a8_sscaled_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 16}, /* y = g */
@@ -5794,7 +5794,7 @@ util_format_r8g8b8a8_sscaled_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5849,7 +5849,7 @@ util_format_r32g32_fixed_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 0}, /* y = g */
@@ -5864,7 +5864,7 @@ util_format_r32g32_fixed_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5893,7 +5893,7 @@ util_format_r32g32b32_fixed_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 32}, /* y = g */
@@ -5908,7 +5908,7 @@ util_format_r32g32b32_fixed_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5937,7 +5937,7 @@ util_format_r32g32b32a32_fixed_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 64}, /* y = g */
@@ -5952,7 +5952,7 @@ util_format_r32g32b32a32_fixed_description = {
{UTIL_FORMAT_TYPE_FIXED, FALSE, FALSE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -5981,7 +5981,7 @@ util_format_r10g10b10x2_uscaled_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 10, 20}, /* y = b */
@@ -5996,7 +5996,7 @@ util_format_r10g10b10x2_uscaled_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6025,7 +6025,7 @@ util_format_r10g10b10x2_snorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30}, /* x = x */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 10, 20}, /* y = b */
@@ -6040,7 +6040,7 @@ util_format_r10g10b10x2_snorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6069,7 +6069,7 @@ util_format_yv12_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 16}, /* y = y */
@@ -6084,7 +6084,7 @@ util_format_yv12_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = w */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* y */
UTIL_FORMAT_SWIZZLE_Y, /* u */
@@ -6113,7 +6113,7 @@ util_format_yv16_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 16}, /* y = y */
@@ -6128,7 +6128,7 @@ util_format_yv16_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = w */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* y */
UTIL_FORMAT_SWIZZLE_Y, /* u */
@@ -6157,7 +6157,7 @@ util_format_iyuv_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 16}, /* y = y */
@@ -6172,7 +6172,7 @@ util_format_iyuv_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = w */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* y */
UTIL_FORMAT_SWIZZLE_Y, /* u */
@@ -6201,7 +6201,7 @@ util_format_nv12_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 16}, /* y = y */
@@ -6216,7 +6216,7 @@ util_format_nv12_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = w */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* y */
UTIL_FORMAT_SWIZZLE_Y, /* u */
@@ -6245,7 +6245,7 @@ util_format_nv21_description = {
FALSE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 16}, /* y = y */
@@ -6260,7 +6260,7 @@ util_format_nv21_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = w */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* y */
UTIL_FORMAT_SWIZZLE_Y, /* u */
@@ -6289,7 +6289,7 @@ util_format_a4r4_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 4}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 0}, /* y = a */
@@ -6304,7 +6304,7 @@ util_format_a4r4_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -6333,7 +6333,7 @@ util_format_r4a4_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 4}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 4, 0}, /* y = r */
@@ -6348,7 +6348,7 @@ util_format_r4a4_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -6377,7 +6377,7 @@ util_format_r8a8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = a */
@@ -6392,7 +6392,7 @@ util_format_r8a8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -6421,7 +6421,7 @@ util_format_a8r8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = r */
@@ -6436,7 +6436,7 @@ util_format_a8r8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -6465,7 +6465,7 @@ util_format_r10g10b10a2_uscaled_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 10, 20}, /* y = b */
@@ -6480,7 +6480,7 @@ util_format_r10g10b10a2_uscaled_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6509,7 +6509,7 @@ util_format_r10g10b10a2_sscaled_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 10, 20}, /* y = b */
@@ -6524,7 +6524,7 @@ util_format_r10g10b10a2_sscaled_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6553,7 +6553,7 @@ util_format_r10g10b10a2_snorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 10, 20}, /* y = b */
@@ -6568,7 +6568,7 @@ util_format_r10g10b10a2_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6597,7 +6597,7 @@ util_format_b10g10r10a2_uscaled_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 10, 20}, /* y = r */
@@ -6612,7 +6612,7 @@ util_format_b10g10r10a2_uscaled_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6641,7 +6641,7 @@ util_format_b10g10r10a2_sscaled_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 10, 20}, /* y = r */
@@ -6656,7 +6656,7 @@ util_format_b10g10r10a2_sscaled_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6685,7 +6685,7 @@ util_format_b10g10r10a2_snorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 10, 20}, /* y = r */
@@ -6700,7 +6700,7 @@ util_format_b10g10r10a2_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -6755,7 +6755,7 @@ util_format_r8g8_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 0}, /* y = g */
@@ -6770,7 +6770,7 @@ util_format_r8g8_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -6799,7 +6799,7 @@ util_format_r8g8b8_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 8}, /* y = g */
@@ -6814,7 +6814,7 @@ util_format_r8g8b8_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -6843,7 +6843,7 @@ util_format_r8g8b8a8_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 16}, /* y = g */
@@ -6858,7 +6858,7 @@ util_format_r8g8b8a8_uint_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -6913,7 +6913,7 @@ util_format_r8g8_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 0}, /* y = g */
@@ -6928,7 +6928,7 @@ util_format_r8g8_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -6957,7 +6957,7 @@ util_format_r8g8b8_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 8}, /* y = g */
@@ -6972,7 +6972,7 @@ util_format_r8g8b8_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7001,7 +7001,7 @@ util_format_r8g8b8a8_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 16}, /* y = g */
@@ -7016,7 +7016,7 @@ util_format_r8g8b8a8_sint_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 24} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7071,7 +7071,7 @@ util_format_r16g16_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 0}, /* y = g */
@@ -7086,7 +7086,7 @@ util_format_r16g16_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7115,7 +7115,7 @@ util_format_r16g16b16_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 16}, /* y = g */
@@ -7130,7 +7130,7 @@ util_format_r16g16b16_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7159,7 +7159,7 @@ util_format_r16g16b16a16_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 32}, /* y = g */
@@ -7174,7 +7174,7 @@ util_format_r16g16b16a16_uint_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7229,7 +7229,7 @@ util_format_r16g16_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 0}, /* y = g */
@@ -7244,7 +7244,7 @@ util_format_r16g16_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7273,7 +7273,7 @@ util_format_r16g16b16_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 16}, /* y = g */
@@ -7288,7 +7288,7 @@ util_format_r16g16b16_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7317,7 +7317,7 @@ util_format_r16g16b16a16_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 32}, /* y = g */
@@ -7332,7 +7332,7 @@ util_format_r16g16b16a16_sint_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 48} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7387,7 +7387,7 @@ util_format_r32g32_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 0}, /* y = g */
@@ -7402,7 +7402,7 @@ util_format_r32g32_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7431,7 +7431,7 @@ util_format_r32g32b32_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 32}, /* y = g */
@@ -7446,7 +7446,7 @@ util_format_r32g32b32_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7475,7 +7475,7 @@ util_format_r32g32b32a32_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 64}, /* y = g */
@@ -7490,7 +7490,7 @@ util_format_r32g32b32a32_uint_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7545,7 +7545,7 @@ util_format_r32g32_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 0}, /* y = g */
@@ -7560,7 +7560,7 @@ util_format_r32g32_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7589,7 +7589,7 @@ util_format_r32g32b32_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 64}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 32}, /* y = g */
@@ -7604,7 +7604,7 @@ util_format_r32g32b32_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7633,7 +7633,7 @@ util_format_r32g32b32a32_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 64}, /* y = g */
@@ -7648,7 +7648,7 @@ util_format_r32g32b32a32_sint_description = {
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 96} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -7755,7 +7755,7 @@ util_format_l8a8_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 8}, /* x = rgb */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 0}, /* y = a */
@@ -7770,7 +7770,7 @@ util_format_l8a8_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -7877,7 +7877,7 @@ util_format_l8a8_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 8}, /* x = rgb */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 0}, /* y = a */
@@ -7892,7 +7892,7 @@ util_format_l8a8_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -7999,7 +7999,7 @@ util_format_l16a16_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 16}, /* x = rgb */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 0}, /* y = a */
@@ -8014,7 +8014,7 @@ util_format_l16a16_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -8121,7 +8121,7 @@ util_format_l16a16_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 16}, /* x = rgb */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 0}, /* y = a */
@@ -8136,7 +8136,7 @@ util_format_l16a16_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -8243,7 +8243,7 @@ util_format_l32a32_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 32}, /* x = rgb */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 0}, /* y = a */
@@ -8258,7 +8258,7 @@ util_format_l32a32_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -8365,7 +8365,7 @@ util_format_l32a32_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 32}, /* x = rgb */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 0}, /* y = a */
@@ -8380,7 +8380,7 @@ util_format_l32a32_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -8409,7 +8409,7 @@ util_format_b10g10r10a2_uint_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 10, 20}, /* y = r */
@@ -8424,7 +8424,7 @@ util_format_b10g10r10a2_uint_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -8453,7 +8453,7 @@ util_format_r8g8b8x8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -8468,7 +8468,7 @@ util_format_r8g8b8x8_snorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8497,7 +8497,7 @@ util_format_r8g8b8x8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 16}, /* y = g */
@@ -8512,7 +8512,7 @@ util_format_r8g8b8x8_srgb_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -8541,7 +8541,7 @@ util_format_r8g8b8x8_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 16}, /* y = g */
@@ -8556,7 +8556,7 @@ util_format_r8g8b8x8_uint_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8585,7 +8585,7 @@ util_format_r8g8b8x8_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 24}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 16}, /* y = g */
@@ -8600,7 +8600,7 @@ util_format_r8g8b8x8_sint_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8629,7 +8629,7 @@ util_format_b10g10r10x2_unorm_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30}, /* x = x */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 10, 20}, /* y = r */
@@ -8644,7 +8644,7 @@ util_format_b10g10r10x2_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 2, 30} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -8673,7 +8673,7 @@ util_format_r16g16b16x16_unorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 32}, /* y = g */
@@ -8688,7 +8688,7 @@ util_format_r16g16b16x16_unorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 16, 48} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8717,7 +8717,7 @@ util_format_r16g16b16x16_snorm_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 32}, /* y = g */
@@ -8732,7 +8732,7 @@ util_format_r16g16b16x16_snorm_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 16, 48} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8761,7 +8761,7 @@ util_format_r16g16b16x16_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 32}, /* y = g */
@@ -8776,7 +8776,7 @@ util_format_r16g16b16x16_float_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 16, 48} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8805,7 +8805,7 @@ util_format_r16g16b16x16_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 32}, /* y = g */
@@ -8820,7 +8820,7 @@ util_format_r16g16b16x16_uint_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 16, 48} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8849,7 +8849,7 @@ util_format_r16g16b16x16_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 48}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 32}, /* y = g */
@@ -8864,7 +8864,7 @@ util_format_r16g16b16x16_sint_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 16, 48} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8893,7 +8893,7 @@ util_format_r32g32b32x32_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 64}, /* y = g */
@@ -8908,7 +8908,7 @@ util_format_r32g32b32x32_float_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 32, 96} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8937,7 +8937,7 @@ util_format_r32g32b32x32_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 64}, /* y = g */
@@ -8952,7 +8952,7 @@ util_format_r32g32b32x32_uint_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 32, 96} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -8981,7 +8981,7 @@ util_format_r32g32b32x32_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 96}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 64}, /* y = g */
@@ -8996,7 +8996,7 @@ util_format_r32g32b32x32_sint_description = {
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 32, 96} /* w = x */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -9025,7 +9025,7 @@ util_format_r8a8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 0}, /* y = a */
@@ -9040,7 +9040,7 @@ util_format_r8a8_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9069,7 +9069,7 @@ util_format_r16a16_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 0}, /* y = a */
@@ -9084,7 +9084,7 @@ util_format_r16a16_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9113,7 +9113,7 @@ util_format_r16a16_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 0}, /* y = a */
@@ -9128,7 +9128,7 @@ util_format_r16a16_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9157,7 +9157,7 @@ util_format_r16a16_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 16, 0}, /* y = a */
@@ -9172,7 +9172,7 @@ util_format_r16a16_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9201,7 +9201,7 @@ util_format_r32a32_float_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_FLOAT, FALSE, FALSE, 32, 0}, /* y = a */
@@ -9216,7 +9216,7 @@ util_format_r32a32_float_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9245,7 +9245,7 @@ util_format_r8a8_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 8, 0}, /* y = a */
@@ -9260,7 +9260,7 @@ util_format_r8a8_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9289,7 +9289,7 @@ util_format_r8a8_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 8}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 8, 0}, /* y = a */
@@ -9304,7 +9304,7 @@ util_format_r8a8_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9333,7 +9333,7 @@ util_format_r16a16_uint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 16, 0}, /* y = a */
@@ -9348,7 +9348,7 @@ util_format_r16a16_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9377,7 +9377,7 @@ util_format_r16a16_sint_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 16}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 16, 0}, /* y = a */
@@ -9392,7 +9392,7 @@ util_format_r16a16_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9421,7 +9421,7 @@ util_format_r32a32_uint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 32, 0}, /* y = a */
@@ -9436,7 +9436,7 @@ util_format_r32a32_uint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9465,7 +9465,7 @@ util_format_r32a32_sint_description = {
TRUE, /* is_array */
FALSE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 32}, /* x = r */
{UTIL_FORMAT_TYPE_SIGNED, FALSE, TRUE, 32, 0}, /* y = a */
@@ -9480,7 +9480,7 @@ util_format_r32a32_sint_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* r */
UTIL_FORMAT_SWIZZLE_0, /* g */
@@ -9509,7 +9509,7 @@ util_format_r10g10b10a2_uint_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 2, 30}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 10, 20}, /* y = b */
@@ -9524,7 +9524,7 @@ util_format_r10g10b10a2_uint_description = {
{UTIL_FORMAT_TYPE_UNSIGNED, FALSE, TRUE, 2, 30} /* w = a */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -9553,7 +9553,7 @@ util_format_b5g6r5_srgb_description = {
FALSE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 5, 11}, /* x = r */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 6, 5}, /* y = g */
@@ -9568,7 +9568,7 @@ util_format_b5g6r5_srgb_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_X, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -9597,7 +9597,7 @@ util_format_a8l8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = rgb */
@@ -9612,7 +9612,7 @@ util_format_a8l8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -9641,7 +9641,7 @@ util_format_a8l8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 0}, /* y = rgb */
@@ -9656,7 +9656,7 @@ util_format_a8l8_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -9685,7 +9685,7 @@ util_format_a8l8_srgb_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = rgb */
@@ -9700,7 +9700,7 @@ util_format_a8l8_srgb_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* sr */
UTIL_FORMAT_SWIZZLE_Y, /* sg */
@@ -9729,7 +9729,7 @@ util_format_a16l16_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 16}, /* x = a */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 0}, /* y = rgb */
@@ -9744,7 +9744,7 @@ util_format_a16l16_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_Y, /* g */
@@ -9773,7 +9773,7 @@ util_format_g8r8_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 8}, /* x = g */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 8, 0}, /* y = r */
@@ -9788,7 +9788,7 @@ util_format_g8r8_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -9817,7 +9817,7 @@ util_format_g8r8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 8}, /* x = g */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 0}, /* y = r */
@@ -9832,7 +9832,7 @@ util_format_g8r8_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -9861,7 +9861,7 @@ util_format_g16r16_unorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 16}, /* x = g */
{UTIL_FORMAT_TYPE_UNSIGNED, TRUE, FALSE, 16, 0}, /* y = r */
@@ -9876,7 +9876,7 @@ util_format_g16r16_unorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -9905,7 +9905,7 @@ util_format_g16r16_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 16}, /* x = g */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 16, 0}, /* y = r */
@@ -9920,7 +9920,7 @@ util_format_g16r16_snorm_description = {
{0, 0, 0, 0, 0}
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_Y, /* r */
UTIL_FORMAT_SWIZZLE_X, /* g */
@@ -9949,7 +9949,7 @@ util_format_a8b8g8r8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24}, /* x = a */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 16}, /* y = b */
@@ -9964,7 +9964,7 @@ util_format_a8b8g8r8_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
@@ -9993,7 +9993,7 @@ util_format_x8b8g8r8_snorm_description = {
TRUE, /* is_array */
TRUE, /* is_bitmask */
FALSE, /* is_mixed */
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
{UTIL_FORMAT_TYPE_VOID, FALSE, FALSE, 8, 24}, /* x = x */
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 16}, /* y = b */
@@ -10008,7 +10008,7 @@ util_format_x8b8g8r8_snorm_description = {
{UTIL_FORMAT_TYPE_SIGNED, TRUE, FALSE, 8, 24} /* w = r */
},
#endif
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
{
UTIL_FORMAT_SWIZZLE_W, /* r */
UTIL_FORMAT_SWIZZLE_Z, /* g */
diff --git a/server/.clang-format b/server/.clang-format
new file mode 120000
index 00000000..e06681e2
--- /dev/null
+++ b/server/.clang-format
@@ -0,0 +1 @@
+../src/venus/.clang-format \ No newline at end of file
diff --git a/server/main.c b/server/main.c
new file mode 100644
index 00000000..90c6c624
--- /dev/null
+++ b/server/main.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_context.h"
+#include "render_server.h"
+
+/* The main process is the server process. It enters render_server_main and
+ * never returns except on fatal errors.
+ *
+ * The server process supports only one connection currently. It creates a
+ * render_client to manage the connection. There is a client process at the
+ * other end of the connection. When the client process requests a new
+ * context to be created, the server process creates a worker. It also sets
+ * up a socket pair, with one end owned by the worker and the other end sent
+ * to and owned by the client process.
+ *
+ * A worker can be a subprocess forked from the server process, or a thread
+ * created by the server process. When a worker is a subprocess, the
+ * subprocess returns from render_server_main and enters render_context_main.
+ *
+ * When a worker is a thread, the thread enters render_context_main directly
+ * from its start function. In this case, render_context_main must be
+ * thread-safe.
+ */
+int
+main(int argc, char **argv)
+{
+ render_log_init();
+
+ struct render_context_args ctx_args;
+ bool ok = render_server_main(argc, argv, &ctx_args);
+
+ /* this is a subprocess */
+ if (ok && ctx_args.valid)
+ ok = render_context_main(&ctx_args);
+
+ return ok ? 0 : -1;
+}
diff --git a/server/meson.build b/server/meson.build
new file mode 100644
index 00000000..43e79d8e
--- /dev/null
+++ b/server/meson.build
@@ -0,0 +1,33 @@
+# Copyright 2021 Google LLC
+# SPDX-License-Identifier: MIT
+
+virgl_render_server_sources = [
+ 'main.c',
+ 'render_client.c',
+ 'render_common.c',
+ 'render_context.c',
+ 'render_server.c',
+ 'render_socket.c',
+ 'render_virgl.c',
+ 'render_worker.c',
+]
+
+virgl_render_server_depends = [libvirglrenderer_dep]
+
+if with_render_server_worker == 'thread'
+ virgl_render_server_depends += [thread_dep]
+elif with_render_server_worker == 'minijail'
+ virgl_render_server_depends += [minijail_dep]
+endif
+
+if with_tracing == 'percetto'
+ virgl_render_server_depends += [percetto_dep]
+endif
+
+virgl_render_server = executable(
+ 'virgl_render_server',
+ virgl_render_server_sources,
+ dependencies : virgl_render_server_depends,
+ install : true,
+ install_dir : render_server_install_dir,
+)
diff --git a/server/render_client.c b/server/render_client.c
new file mode 100644
index 00000000..cc85e3d2
--- /dev/null
+++ b/server/render_client.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_client.h"
+
+#include <unistd.h>
+#include <vulkan/vulkan.h>
+
+#include "render_context.h"
+#include "render_server.h"
+#include "render_virgl.h"
+#include "render_worker.h"
+
+/* There is a render_context_record for each worker.
+ *
+ * When the client process destroys a context, it closes the connection to the
+ * worker, which leads to worker termination. It also sends a
+ * RENDER_CLIENT_OP_DESTROY_CONTEXT to us to remove the record. Because we
+ * are responsible for cleaning up the worker, we don't care if the worker has
+ * terminated or not. We always kill, reap, and remove the record.
+ */
+struct render_context_record {
+ uint32_t ctx_id;
+ struct render_worker *worker;
+
+ struct list_head head;
+};
+
+static struct render_context_record *
+render_client_find_record(struct render_client *client, uint32_t ctx_id)
+{
+ list_for_each_entry (struct render_context_record, rec, &client->context_records,
+ head) {
+ if (rec->ctx_id == ctx_id)
+ return rec;
+ }
+ return NULL;
+}
+
+static void
+render_client_detach_all_records(struct render_client *client)
+{
+ struct render_server *srv = client->server;
+
+ /* free all render_workers without killing nor reaping */
+ render_worker_jail_detach_workers(srv->worker_jail);
+
+ list_for_each_entry_safe (struct render_context_record, rec, &client->context_records,
+ head)
+ free(rec);
+ list_inithead(&client->context_records);
+}
+
+static void
+render_client_remove_record(struct render_client *client,
+ struct render_context_record *rec)
+{
+ struct render_server *srv = client->server;
+
+ render_worker_destroy(srv->worker_jail, rec->worker);
+
+ list_del(&rec->head);
+ free(rec);
+}
+
+static void
+render_client_clear_records(struct render_client *client)
+{
+ list_for_each_entry_safe (struct render_context_record, rec, &client->context_records,
+ head)
+ render_client_remove_record(client, rec);
+}
+
+static void
+init_context_args(struct render_context_args *ctx_args,
+ uint32_t init_flags,
+ const struct render_client_op_create_context_request *req,
+ int ctx_fd)
+{
+ *ctx_args = (struct render_context_args){
+ .valid = true,
+ .init_flags = init_flags,
+ .ctx_id = req->ctx_id,
+ .ctx_fd = ctx_fd,
+ };
+
+ static_assert(sizeof(ctx_args->ctx_name) == sizeof(req->ctx_name), "");
+ memcpy(ctx_args->ctx_name, req->ctx_name, sizeof(req->ctx_name) - 1);
+}
+
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+
+static int
+render_client_worker_thread(void *thread_data)
+{
+ const struct render_context_args *ctx_args = thread_data;
+ return render_context_main(ctx_args) ? 0 : -1;
+}
+
+#endif /* ENABLE_RENDER_SERVER_WORKER_THREAD */
+
+static bool
+render_client_create_context(struct render_client *client,
+ const struct render_client_op_create_context_request *req,
+ int *out_remote_fd)
+{
+ struct render_server *srv = client->server;
+
+ struct render_context_record *rec = calloc(1, sizeof(*rec));
+ if (!rec)
+ return false;
+
+ int socket_fds[2];
+ if (!render_socket_pair(socket_fds)) {
+ free(rec);
+ return false;
+ }
+ int ctx_fd = socket_fds[0];
+ int remote_fd = socket_fds[1];
+
+ struct render_context_args ctx_args;
+ init_context_args(&ctx_args, client->init_flags, req, ctx_fd);
+
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ rec->worker = render_worker_create(srv->worker_jail, render_client_worker_thread,
+ &ctx_args, sizeof(ctx_args));
+ if (rec->worker)
+ ctx_fd = -1; /* ownership transferred */
+#else
+ rec->worker = render_worker_create(srv->worker_jail, NULL, NULL, 0);
+#endif
+ if (!rec->worker) {
+ render_log("failed to create a context worker");
+ close(ctx_fd);
+ close(remote_fd);
+ free(rec);
+ return false;
+ }
+
+ rec->ctx_id = req->ctx_id;
+ list_addtail(&rec->head, &client->context_records);
+
+ if (!render_worker_is_record(rec->worker)) {
+ /* this is the child process */
+ srv->state = RENDER_SERVER_STATE_SUBPROCESS;
+ *srv->context_args = ctx_args;
+
+ render_client_detach_all_records(client);
+
+ /* ctx_fd ownership transferred */
+ assert(srv->context_args->ctx_fd == ctx_fd);
+
+ close(remote_fd);
+ *out_remote_fd = -1;
+
+ return true;
+ }
+
+ /* this is the parent process */
+ if (ctx_fd >= 0)
+ close(ctx_fd);
+ *out_remote_fd = remote_fd;
+
+ return true;
+}
+
+static bool
+render_client_dispatch_destroy_context(struct render_client *client,
+ const union render_client_op_request *req)
+{
+ const uint32_t ctx_id = req->destroy_context.ctx_id;
+ struct render_context_record *rec = render_client_find_record(client, ctx_id);
+ if (rec)
+ render_client_remove_record(client, rec);
+
+ return true;
+}
+
+static bool
+render_client_dispatch_create_context(struct render_client *client,
+ const union render_client_op_request *req)
+{
+ struct render_server *srv = client->server;
+
+ int remote_fd;
+ bool ok = render_client_create_context(client, &req->create_context, &remote_fd);
+ if (!ok)
+ return false;
+
+ if (srv->state == RENDER_SERVER_STATE_SUBPROCESS) {
+ assert(remote_fd < 0);
+ return true;
+ }
+
+ const struct render_client_op_create_context_reply reply = {
+ .ok = ok,
+ };
+ if (!ok)
+ return render_socket_send_reply(&client->socket, &reply, sizeof(reply));
+
+ ok = render_socket_send_reply_with_fds(&client->socket, &reply, sizeof(reply),
+ &remote_fd, 1);
+ close(remote_fd);
+
+ return ok;
+}
+
+static bool
+render_client_dispatch_reset(struct render_client *client,
+ UNUSED const union render_client_op_request *req)
+{
+ render_client_clear_records(client);
+ return true;
+}
+
+static bool
+render_client_dispatch_init(struct render_client *client,
+ const union render_client_op_request *req)
+{
+ client->init_flags = req->init.flags;
+
+ /* init now to avoid doing it in each worker, but only when tracing is
+ * disabled because perfetto can get confused
+ */
+#ifndef ENABLE_TRACING
+ render_virgl_init(client->init_flags);
+#endif
+
+ /* this makes the Vulkan loader loads ICDs */
+ uint32_t unused_count;
+ vkEnumerateInstanceExtensionProperties(NULL, &unused_count, NULL);
+
+ return true;
+}
+
+static bool
+render_client_dispatch_nop(UNUSED struct render_client *client,
+ UNUSED const union render_client_op_request *req)
+{
+ return true;
+}
+
+struct render_client_dispatch_entry {
+ size_t expect_size;
+ bool (*dispatch)(struct render_client *client,
+ const union render_client_op_request *req);
+};
+
+static const struct render_client_dispatch_entry
+ render_client_dispatch_table[RENDER_CLIENT_OP_COUNT] = {
+#define RENDER_CLIENT_DISPATCH(NAME, name) \
+ [RENDER_CLIENT_OP_## \
+ NAME] = { .expect_size = sizeof(struct render_client_op_##name##_request), \
+ .dispatch = render_client_dispatch_##name }
+ RENDER_CLIENT_DISPATCH(NOP, nop),
+ RENDER_CLIENT_DISPATCH(INIT, init),
+ RENDER_CLIENT_DISPATCH(RESET, reset),
+ RENDER_CLIENT_DISPATCH(CREATE_CONTEXT, create_context),
+ RENDER_CLIENT_DISPATCH(DESTROY_CONTEXT, destroy_context),
+#undef RENDER_CLIENT_DISPATCH
+ };
+
+bool
+render_client_dispatch(struct render_client *client)
+{
+ union render_client_op_request req;
+ size_t req_size;
+ if (!render_socket_receive_request(&client->socket, &req, sizeof(req), &req_size))
+ return false;
+
+ if (req.header.op >= RENDER_CLIENT_OP_COUNT) {
+ render_log("invalid client op %d", req.header.op);
+ return false;
+ }
+
+ const struct render_client_dispatch_entry *entry =
+ &render_client_dispatch_table[req.header.op];
+ if (entry->expect_size != req_size) {
+ render_log("invalid request size %zu for client op %d", req_size, req.header.op);
+ return false;
+ }
+
+ if (!entry->dispatch(client, &req))
+ render_log("failed to dispatch client op %d", req.header.op);
+
+ return true;
+}
+
+void
+render_client_destroy(struct render_client *client)
+{
+ struct render_server *srv = client->server;
+
+ if (srv->state == RENDER_SERVER_STATE_SUBPROCESS) {
+ assert(list_is_empty(&client->context_records));
+ } else {
+ render_client_clear_records(client);
+
+ /* see render_client_dispatch_init */
+#ifndef ENABLE_TRACING
+ render_virgl_fini();
+#endif
+ }
+
+ render_socket_fini(&client->socket);
+ free(client);
+}
+
+struct render_client *
+render_client_create(struct render_server *srv, int client_fd)
+{
+ struct render_client *client = calloc(1, sizeof(*client));
+
+ if (!client)
+ return NULL;
+
+ client->server = srv;
+ render_socket_init(&client->socket, client_fd);
+
+ list_inithead(&client->context_records);
+
+ return client;
+}
diff --git a/server/render_client.h b/server/render_client.h
new file mode 100644
index 00000000..214ba20e
--- /dev/null
+++ b/server/render_client.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_CLIENT_H
+#define RENDER_CLIENT_H
+
+#include "render_common.h"
+
+struct render_client {
+ struct render_server *server;
+ struct render_socket socket;
+
+ uint32_t init_flags;
+
+ struct list_head context_records;
+};
+
+struct render_client *
+render_client_create(struct render_server *srv, int client_fd);
+
+void
+render_client_destroy(struct render_client *client);
+
+bool
+render_client_dispatch(struct render_client *client);
+
+#endif /* RENDER_CLIENT_H */
diff --git a/server/render_common.c b/server/render_common.c
new file mode 100644
index 00000000..e51bb88c
--- /dev/null
+++ b/server/render_common.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_common.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <syslog.h>
+
+void
+render_log_init(void)
+{
+ openlog(NULL, LOG_NDELAY | LOG_PERROR | LOG_PID, LOG_USER);
+}
+
+void
+render_log(const char *fmt, ...)
+{
+ va_list va;
+
+ va_start(va, fmt);
+ vsyslog(LOG_DEBUG, fmt, va);
+ va_end(va);
+}
diff --git a/server/render_common.h b/server/render_common.h
new file mode 100644
index 00000000..33707ecd
--- /dev/null
+++ b/server/render_common.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_COMMON_H
+#define RENDER_COMMON_H
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "util/compiler.h"
+#include "util/list.h"
+#include "util/macros.h"
+#include "util/u_pointer.h"
+
+#include "render_protocol.h"
+#include "render_socket.h"
+
+struct render_client;
+struct render_context;
+struct render_context_args;
+struct render_server;
+struct render_virgl;
+struct render_worker;
+struct render_worker_jail;
+
+void
+render_log_init(void);
+
+void
+render_log(const char *fmt, ...);
+
+#endif /* RENDER_COMMON_H */
diff --git a/server/render_context.c b/server/render_context.c
new file mode 100644
index 00000000..4a643d35
--- /dev/null
+++ b/server/render_context.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_context.h"
+
+#include <sys/mman.h>
+
+#include "util/u_thread.h"
+#include "virgl_util.h"
+#include "virglrenderer.h"
+#include "vrend_iov.h"
+
+#include "render_virgl.h"
+
+static bool
+render_context_import_resource(struct render_context *ctx,
+ const struct render_context_op_import_resource_request *req,
+ int res_fd)
+{
+ const uint32_t res_id = req->res_id;
+ const enum virgl_resource_fd_type fd_type = req->fd_type;
+ const uint64_t size = req->size;
+
+ if (fd_type == VIRGL_RESOURCE_FD_INVALID || !size) {
+ render_log("failed to attach invalid resource %d", res_id);
+ return false;
+ }
+
+ uint32_t import_fd_type;
+ switch (fd_type) {
+ case VIRGL_RESOURCE_FD_DMABUF:
+ import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF;
+ break;
+ case VIRGL_RESOURCE_FD_OPAQUE:
+ import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE;
+ break;
+ case VIRGL_RESOURCE_FD_SHM:
+ import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_SHM;
+ break;
+ default:
+ import_fd_type = 0;
+ break;
+ }
+ const struct virgl_renderer_resource_import_blob_args import_args = {
+ .res_handle = res_id,
+ .blob_mem = VIRGL_RENDERER_BLOB_MEM_HOST3D,
+ .fd_type = import_fd_type,
+ .fd = res_fd,
+ .size = size,
+ };
+
+ int ret = virgl_renderer_resource_import_blob(&import_args);
+ if (ret) {
+ render_log("failed to import blob resource %d (%d)", res_id, ret);
+ return false;
+ }
+
+ virgl_renderer_ctx_attach_resource(ctx->ctx_id, res_id);
+
+ return true;
+}
+
+void
+render_context_update_timeline(struct render_context *ctx,
+ uint32_t ring_idx,
+ uint32_t seqno)
+{
+ /* this can be called by the context's main thread and sync threads */
+ atomic_store(&ctx->shmem_timelines[ring_idx], seqno);
+ if (ctx->fence_eventfd >= 0)
+ write_eventfd(ctx->fence_eventfd, 1);
+}
+
+static bool
+render_context_init_virgl_context(struct render_context *ctx,
+ const struct render_context_op_init_request *req,
+ int shmem_fd,
+ int fence_eventfd)
+{
+ const int timeline_count = req->shmem_size / sizeof(*ctx->shmem_timelines);
+
+ void *shmem_ptr = mmap(NULL, req->shmem_size, PROT_WRITE, MAP_SHARED, shmem_fd, 0);
+ if (shmem_ptr == MAP_FAILED)
+ return false;
+
+ int ret = virgl_renderer_context_create_with_flags(ctx->ctx_id, req->flags,
+ ctx->name_len, ctx->name);
+ if (ret) {
+ munmap(shmem_ptr, req->shmem_size);
+ return false;
+ }
+
+ ctx->shmem_fd = shmem_fd;
+ ctx->shmem_size = req->shmem_size;
+ ctx->shmem_ptr = shmem_ptr;
+ ctx->shmem_timelines = shmem_ptr;
+
+ for (int i = 0; i < timeline_count; i++)
+ atomic_store(&ctx->shmem_timelines[i], 0);
+
+ ctx->timeline_count = timeline_count;
+
+ ctx->fence_eventfd = fence_eventfd;
+
+ return true;
+}
+
+static bool
+render_context_create_resource(struct render_context *ctx,
+ const struct render_context_op_create_resource_request *req,
+ enum virgl_resource_fd_type *out_fd_type,
+ uint32_t *out_map_info,
+ int *out_res_fd)
+{
+ const uint32_t res_id = req->res_id;
+ const struct virgl_renderer_resource_create_blob_args blob_args = {
+ .res_handle = res_id,
+ .ctx_id = ctx->ctx_id,
+ .blob_mem = VIRGL_RENDERER_BLOB_MEM_HOST3D,
+ .blob_flags = req->blob_flags,
+ .blob_id = req->blob_id,
+ .size = req->blob_size,
+ };
+ int ret = virgl_renderer_resource_create_blob(&blob_args);
+ if (ret) {
+ render_log("failed to create blob resource");
+ return false;
+ }
+
+ uint32_t map_info;
+ ret = virgl_renderer_resource_get_map_info(res_id, &map_info);
+ if (ret) {
+ /* properly set map_info when the resource has no map cache info */
+ map_info = VIRGL_RENDERER_MAP_CACHE_NONE;
+ }
+
+ uint32_t fd_type;
+ int res_fd;
+ ret = virgl_renderer_resource_export_blob(res_id, &fd_type, &res_fd);
+ if (ret) {
+ virgl_renderer_resource_unref(res_id);
+ return false;
+ }
+
+ /* RENDER_CONTEXT_OP_CREATE_RESOURCE implies attach and proxy will not send
+ * RENDER_CONTEXT_OP_IMPORT_RESOURCE to attach the resource again.
+ */
+ virgl_renderer_ctx_attach_resource(ctx->ctx_id, res_id);
+
+ switch (fd_type) {
+ case VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF:
+ *out_fd_type = VIRGL_RESOURCE_FD_DMABUF;
+ break;
+ case VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE:
+ *out_fd_type = VIRGL_RESOURCE_FD_OPAQUE;
+ break;
+ case VIRGL_RENDERER_BLOB_FD_TYPE_SHM:
+ *out_fd_type = VIRGL_RESOURCE_FD_SHM;
+ break;
+ default:
+ *out_fd_type = 0;
+ }
+
+ *out_map_info = map_info;
+ *out_res_fd = res_fd;
+
+ return true;
+}
+
+static bool
+render_context_dispatch_submit_fence(struct render_context *ctx,
+ const union render_context_op_request *req,
+ UNUSED const int *fds,
+ UNUSED int fd_count)
+{
+ /* always merge fences */
+ assert(!(req->submit_fence.flags & ~VIRGL_RENDERER_FENCE_FLAG_MERGEABLE));
+ const uint32_t flags = VIRGL_RENDERER_FENCE_FLAG_MERGEABLE;
+ const uint32_t ring_idx = req->submit_fence.ring_index;
+ const uint32_t seqno = req->submit_fence.seqno;
+
+ assert(ring_idx < (uint32_t)ctx->timeline_count);
+ int ret = virgl_renderer_context_create_fence(ctx->ctx_id, flags, ring_idx, seqno);
+
+ return !ret;
+}
+
+static bool
+render_context_dispatch_submit_cmd(struct render_context *ctx,
+ const union render_context_op_request *req,
+ UNUSED const int *fds,
+ UNUSED int fd_count)
+{
+ const int ndw = req->submit_cmd.size / sizeof(uint32_t);
+ void *cmd = (void *)req->submit_cmd.cmd;
+ if (req->submit_cmd.size > sizeof(req->submit_cmd.cmd)) {
+ cmd = malloc(req->submit_cmd.size);
+ if (!cmd)
+ return true;
+
+ const size_t inlined = sizeof(req->submit_cmd.cmd);
+ const size_t remain = req->submit_cmd.size - inlined;
+
+ memcpy(cmd, req->submit_cmd.cmd, inlined);
+ if (!render_socket_receive_data(&ctx->socket, (char *)cmd + inlined, remain)) {
+ free(cmd);
+ return false;
+ }
+ }
+
+ int ret = virgl_renderer_submit_cmd(cmd, ctx->ctx_id, ndw);
+
+ if (cmd != req->submit_cmd.cmd)
+ free(cmd);
+
+ const struct render_context_op_submit_cmd_reply reply = {
+ .ok = !ret,
+ };
+ if (!render_socket_send_reply(&ctx->socket, &reply, sizeof(reply)))
+ return false;
+
+ return true;
+}
+
+static bool
+render_context_dispatch_create_resource(struct render_context *ctx,
+ const union render_context_op_request *req,
+ UNUSED const int *fds,
+ UNUSED int fd_count)
+{
+ struct render_context_op_create_resource_reply reply = {
+ .fd_type = VIRGL_RESOURCE_FD_INVALID,
+ };
+ int res_fd;
+ bool ok = render_context_create_resource(ctx, &req->create_resource, &reply.fd_type,
+ &reply.map_info, &res_fd);
+ if (!ok)
+ return render_socket_send_reply(&ctx->socket, &reply, sizeof(reply));
+
+ ok =
+ render_socket_send_reply_with_fds(&ctx->socket, &reply, sizeof(reply), &res_fd, 1);
+ close(res_fd);
+
+ return ok;
+}
+
+static bool
+render_context_dispatch_destroy_resource(UNUSED struct render_context *ctx,
+ const union render_context_op_request *req,
+ UNUSED const int *fds,
+ UNUSED int fd_count)
+{
+ virgl_renderer_resource_unref(req->destroy_resource.res_id);
+ return true;
+}
+
+static bool
+render_context_dispatch_import_resource(struct render_context *ctx,
+ const union render_context_op_request *req,
+ const int *fds,
+ int fd_count)
+{
+ if (fd_count != 1) {
+ render_log("failed to attach resource with fd_count %d", fd_count);
+ return false;
+ }
+
+ /* classic 3d resource with valid size reuses the blob import path here */
+ return render_context_import_resource(ctx, &req->import_resource, fds[0]);
+}
+
+static bool
+render_context_dispatch_init(struct render_context *ctx,
+ const union render_context_op_request *req,
+ const int *fds,
+ int fd_count)
+{
+ if (fd_count != 1 && fd_count != 2)
+ return false;
+
+ const int shmem_fd = fds[0];
+ const int fence_eventfd = fd_count == 2 ? fds[1] : -1;
+ return render_context_init_virgl_context(ctx, &req->init, shmem_fd, fence_eventfd);
+}
+
+static bool
+render_context_dispatch_nop(UNUSED struct render_context *ctx,
+ UNUSED const union render_context_op_request *req,
+ UNUSED const int *fds,
+ UNUSED int fd_count)
+{
+ return true;
+}
+
+struct render_context_dispatch_entry {
+ size_t expect_size;
+ int max_fd_count;
+ bool (*dispatch)(struct render_context *ctx,
+ const union render_context_op_request *req,
+ const int *fds,
+ int fd_count);
+};
+
+static const struct render_context_dispatch_entry
+ render_context_dispatch_table[RENDER_CONTEXT_OP_COUNT] = {
+#define RENDER_CONTEXT_DISPATCH(NAME, name, max_fd) \
+ [RENDER_CONTEXT_OP_## \
+ NAME] = { .expect_size = sizeof(struct render_context_op_##name##_request), \
+ .max_fd_count = (max_fd), \
+ .dispatch = render_context_dispatch_##name }
+ RENDER_CONTEXT_DISPATCH(NOP, nop, 0),
+ RENDER_CONTEXT_DISPATCH(INIT, init, 2),
+ RENDER_CONTEXT_DISPATCH(CREATE_RESOURCE, create_resource, 0),
+ RENDER_CONTEXT_DISPATCH(IMPORT_RESOURCE, import_resource, 1),
+ RENDER_CONTEXT_DISPATCH(DESTROY_RESOURCE, destroy_resource, 0),
+ RENDER_CONTEXT_DISPATCH(SUBMIT_CMD, submit_cmd, 0),
+ RENDER_CONTEXT_DISPATCH(SUBMIT_FENCE, submit_fence, 0),
+#undef RENDER_CONTEXT_DISPATCH
+ };
+
+static bool
+render_context_dispatch(struct render_context *ctx)
+{
+ union render_context_op_request req;
+ size_t req_size;
+ int req_fds[8];
+ int req_fd_count;
+ if (!render_socket_receive_request_with_fds(&ctx->socket, &req, sizeof(req), &req_size,
+ req_fds, ARRAY_SIZE(req_fds),
+ &req_fd_count))
+ return false;
+
+ assert((unsigned int)req_fd_count <= ARRAY_SIZE(req_fds));
+
+ if (req.header.op >= RENDER_CONTEXT_OP_COUNT) {
+ render_log("invalid context op %d", req.header.op);
+ goto fail;
+ }
+
+ const struct render_context_dispatch_entry *entry =
+ &render_context_dispatch_table[req.header.op];
+ if (entry->expect_size != req_size || entry->max_fd_count < req_fd_count) {
+ render_log("invalid request size (%zu) or fd count (%d) for context op %d",
+ req_size, req_fd_count, req.header.op);
+ goto fail;
+ }
+
+ render_virgl_lock_dispatch();
+ const bool ok = entry->dispatch(ctx, &req, req_fds, req_fd_count);
+ render_virgl_unlock_dispatch();
+ if (!ok) {
+ render_log("failed to dispatch context op %d", req.header.op);
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ for (int i = 0; i < req_fd_count; i++)
+ close(req_fds[i]);
+ return false;
+}
+
+static bool
+render_context_run(struct render_context *ctx)
+{
+ while (true) {
+ if (!render_context_dispatch(ctx))
+ return false;
+ }
+
+ return true;
+}
+
+static void
+render_context_fini(struct render_context *ctx)
+{
+ render_virgl_lock_dispatch();
+ /* destroy the context first to join its sync threads and ring threads */
+ virgl_renderer_context_destroy(ctx->ctx_id);
+ render_virgl_unlock_dispatch();
+
+ render_virgl_remove_context(ctx);
+
+ if (ctx->shmem_ptr)
+ munmap(ctx->shmem_ptr, ctx->shmem_size);
+ if (ctx->shmem_fd >= 0)
+ close(ctx->shmem_fd);
+
+ if (ctx->fence_eventfd >= 0)
+ close(ctx->fence_eventfd);
+
+ if (ctx->name)
+ free(ctx->name);
+
+ render_socket_fini(&ctx->socket);
+}
+
+static void
+render_context_set_thread_name(uint32_t ctx_id, const char *ctx_name)
+{
+ char thread_name[16];
+ snprintf(thread_name, ARRAY_SIZE(thread_name), "virgl-%d-%s", ctx_id, ctx_name);
+ u_thread_setname(thread_name);
+}
+
+static bool
+render_context_init_name(struct render_context *ctx,
+ uint32_t ctx_id,
+ const char *ctx_name)
+{
+ ctx->name_len = strlen(ctx_name);
+ ctx->name = malloc(ctx->name_len + 1);
+ if (!ctx->name)
+ return false;
+
+ strcpy(ctx->name, ctx_name);
+
+ render_context_set_thread_name(ctx_id, ctx_name);
+
+#ifdef _GNU_SOURCE
+ /* Sets the guest app executable name used by mesa to load app-specific driver
+ * configuration. */
+ program_invocation_name = ctx->name;
+ program_invocation_short_name = ctx->name;
+#endif
+
+ return true;
+}
+
+static bool
+render_context_init(struct render_context *ctx, const struct render_context_args *args)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->ctx_id = args->ctx_id;
+ render_socket_init(&ctx->socket, args->ctx_fd);
+ ctx->shmem_fd = -1;
+ ctx->fence_eventfd = -1;
+
+ if (!render_context_init_name(ctx, args->ctx_id, args->ctx_name))
+ return false;
+
+ render_virgl_add_context(ctx);
+
+ return true;
+}
+
+bool
+render_context_main(const struct render_context_args *args)
+{
+ struct render_context ctx;
+
+ assert(args->valid && args->ctx_id && args->ctx_fd >= 0);
+
+ if (!render_virgl_init(args->init_flags)) {
+ close(args->ctx_fd);
+ return false;
+ }
+
+ if (!render_context_init(&ctx, args)) {
+ render_virgl_fini();
+ close(args->ctx_fd);
+ return false;
+ }
+
+ const bool ok = render_context_run(&ctx);
+ render_context_fini(&ctx);
+
+ render_virgl_fini();
+
+ return ok;
+}
diff --git a/server/render_context.h b/server/render_context.h
new file mode 100644
index 00000000..24435ab0
--- /dev/null
+++ b/server/render_context.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_CONTEXT_H
+#define RENDER_CONTEXT_H
+
+#include "render_common.h"
+
+#include <stdatomic.h>
+
+struct render_context {
+ uint32_t ctx_id;
+ struct render_socket socket;
+ struct list_head head;
+
+ char *name;
+ size_t name_len;
+
+ int shmem_fd;
+ size_t shmem_size;
+ void *shmem_ptr;
+ atomic_uint *shmem_timelines;
+
+ int timeline_count;
+
+ /* optional */
+ int fence_eventfd;
+};
+
+struct render_context_args {
+ bool valid;
+
+ uint32_t init_flags;
+
+ uint32_t ctx_id;
+ char ctx_name[32];
+
+ /* render_context_main always takes ownership even on errors */
+ int ctx_fd;
+};
+
+bool
+render_context_main(const struct render_context_args *args);
+
+void
+render_context_update_timeline(struct render_context *ctx,
+ uint32_t ring_idx,
+ uint32_t val);
+
+#endif /* RENDER_CONTEXT_H */
diff --git a/server/render_protocol.h b/server/render_protocol.h
new file mode 100644
index 00000000..778adcfc
--- /dev/null
+++ b/server/render_protocol.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_PROTOCOL_H
+#define RENDER_PROTOCOL_H
+
+#include <stdint.h>
+
+#include "virgl_resource.h"
+#include "virglrenderer.h"
+#include "virglrenderer_hw.h"
+
+/* this covers the command line options and the socket type */
+#define RENDER_SERVER_VERSION 0
+
+/* The protocol itself is internal to virglrenderer. There is no backward
+ * compatibility to be kept.
+ */
+
+/* client ops, which are handled by the server process */
+enum render_client_op {
+ RENDER_CLIENT_OP_NOP = 0,
+ RENDER_CLIENT_OP_INIT,
+ RENDER_CLIENT_OP_RESET,
+ RENDER_CLIENT_OP_CREATE_CONTEXT,
+ RENDER_CLIENT_OP_DESTROY_CONTEXT,
+
+ RENDER_CLIENT_OP_COUNT,
+};
+
+/* context ops, which are handled by workers (subprocesses or threads) created
+ * by the server process
+ */
+enum render_context_op {
+ RENDER_CONTEXT_OP_NOP = 0,
+ RENDER_CONTEXT_OP_INIT,
+ RENDER_CONTEXT_OP_CREATE_RESOURCE,
+ RENDER_CONTEXT_OP_IMPORT_RESOURCE,
+ RENDER_CONTEXT_OP_DESTROY_RESOURCE,
+ RENDER_CONTEXT_OP_SUBMIT_CMD,
+ RENDER_CONTEXT_OP_SUBMIT_FENCE,
+
+ RENDER_CONTEXT_OP_COUNT,
+};
+
+struct render_client_op_header {
+ enum render_client_op op;
+};
+
+struct render_client_op_nop_request {
+ struct render_client_op_header header;
+};
+
+/* Initialize virglrenderer.
+ *
+ * This roughly corresponds to virgl_renderer_init.
+ */
+struct render_client_op_init_request {
+ struct render_client_op_header header;
+ uint32_t flags; /* VIRGL_RENDERER_USE_* and others */
+};
+
+/* Remove all contexts.
+ *
+ * This roughly corresponds to virgl_renderer_reset.
+ */
+struct render_client_op_reset_request {
+ struct render_client_op_header header;
+};
+
+/* Create a context, which will be serviced by a worker.
+ *
+ * See also the comment before main() for the process model.
+ *
+ * This roughly corresponds to virgl_renderer_context_create_with_flags.
+ */
+struct render_client_op_create_context_request {
+ struct render_client_op_header header;
+ uint32_t ctx_id;
+ char ctx_name[32];
+};
+
+struct render_client_op_create_context_reply {
+ bool ok;
+ /* followed by 1 socket fd if ok */
+};
+
+/* Destroy a context, including the worker.
+ *
+ * This roughly corresponds to virgl_renderer_context_destroy.
+ */
+struct render_client_op_destroy_context_request {
+ struct render_client_op_header header;
+ uint32_t ctx_id;
+};
+
+union render_client_op_request {
+ struct render_client_op_header header;
+ struct render_client_op_nop_request nop;
+ struct render_client_op_init_request init;
+ struct render_client_op_reset_request reset;
+ struct render_client_op_create_context_request create_context;
+ struct render_client_op_destroy_context_request destroy_context;
+};
+
+struct render_context_op_header {
+ enum render_context_op op;
+};
+
+struct render_context_op_nop_request {
+ struct render_context_op_header header;
+};
+
+/* Initialize the context.
+ *
+ * The shmem is required and currently holds an array of atomic_uint. Each
+ * atomic_uint represents the current sequence number of a ring (as defined by
+ * the virtio-gpu spec).
+ *
+ * The eventfd is optional. When given, it will be written to when there are
+ * changes to any of the sequence numbers.
+ *
+ * This roughly corresponds to virgl_renderer_context_create_with_flags.
+ */
+struct render_context_op_init_request {
+ struct render_context_op_header header;
+ uint32_t flags; /* VIRGL_RENDERER_CONTEXT_FLAG_*/
+ size_t shmem_size;
+ /* followed by 1 shmem fd and optionally 1 eventfd */
+};
+
+/* Export a blob resource from the context
+ *
+ * This roughly corresponds to:
+ * - virgl_renderer_resource_create_blob
+ * - virgl_renderer_resource_get_map_info
+ * - virgl_renderer_resource_export_blob
+ * - virgl_renderer_ctx_attach_resource
+ */
+struct render_context_op_create_resource_request {
+ struct render_context_op_header header;
+ uint32_t res_id;
+ uint64_t blob_id;
+ uint64_t blob_size;
+ uint32_t blob_flags; /* VIRGL_RENDERER_BLOB_FLAG_* */
+};
+
+struct render_context_op_create_resource_reply {
+ enum virgl_resource_fd_type fd_type;
+ uint32_t map_info; /* VIRGL_RENDERER_MAP_* */
+ /* followed by 1 fd if not VIRGL_RESOURCE_FD_INVALID */
+};
+
+/* Import a blob resource to the context
+ *
+ * This roughly corresponds to:
+ * - virgl_renderer_resource_import_blob
+ * - virgl_renderer_ctx_attach_resource
+ */
+struct render_context_op_import_resource_request {
+ struct render_context_op_header header;
+ uint32_t res_id;
+ enum virgl_resource_fd_type fd_type;
+ uint64_t size;
+ /* followed by 1 fd */
+};
+
+/* Free a blob resource from the context
+ *
+ * This roughly corresponds to:
+ * - virgl_renderer_resource_unref
+ */
+struct render_context_op_destroy_resource_request {
+ struct render_context_op_header header;
+ uint32_t res_id;
+};
+
+/* Submit a small command stream to the context.
+ *
+ * The size limit depends on the socket type. Currently, SOCK_SEQPACKET is
+ * used and the size limit is best treated as one page.
+ *
+ * This roughly corresponds to virgl_renderer_submit_cmd.
+ */
+struct render_context_op_submit_cmd_request {
+ struct render_context_op_header header;
+ size_t size;
+ char cmd[256];
+ /* if size > sizeof(cmd), followed by (size - sizeof(cmd)) bytes in another
+ * message; size still must be small
+ */
+};
+
+struct render_context_op_submit_cmd_reply {
+ bool ok;
+};
+
+/* Submit a fence to the context.
+ *
+ * This submits a fence to the specified ring. When the fence signals, the
+ * current sequence number of the ring in the shmem is updated.
+ *
+ * This roughly corresponds to virgl_renderer_context_create_fence.
+ */
+struct render_context_op_submit_fence_request {
+ struct render_context_op_header header;
+ uint32_t flags; /* VIRGL_RENDERER_FENCE_FLAG_* */
+ /* TODO fix virgl_renderer_context_create_fence to use ring_index */
+ uint32_t ring_index;
+ uint32_t seqno;
+};
+
+union render_context_op_request {
+ struct render_context_op_header header;
+ struct render_context_op_nop_request nop;
+ struct render_context_op_init_request init;
+ struct render_context_op_create_resource_request create_resource;
+ struct render_context_op_import_resource_request import_resource;
+ struct render_context_op_destroy_resource_request destroy_resource;
+ struct render_context_op_submit_cmd_request submit_cmd;
+ struct render_context_op_submit_fence_request submit_fence;
+};
+
+#endif /* RENDER_PROTOCOL_H */
diff --git a/server/render_server.c b/server/render_server.c
new file mode 100644
index 00000000..6b129e4f
--- /dev/null
+++ b/server/render_server.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_server.h"
+
+#include <errno.h>
+#include <getopt.h>
+#include <poll.h>
+#include <unistd.h>
+
+#include "render_client.h"
+#include "render_worker.h"
+
+#define RENDER_SERVER_MAX_WORKER_COUNT 256
+
+enum render_server_poll_type {
+ RENDER_SERVER_POLL_SOCKET = 0,
+ RENDER_SERVER_POLL_SIGCHLD /* optional */,
+ RENDER_SERVER_POLL_COUNT,
+};
+
+static int
+render_server_init_poll_fds(struct render_server *srv,
+ struct pollfd poll_fds[static RENDER_SERVER_POLL_COUNT])
+{
+ const int socket_fd = srv->client->socket.fd;
+ const int sigchld_fd = render_worker_jail_get_sigchld_fd(srv->worker_jail);
+
+ poll_fds[RENDER_SERVER_POLL_SOCKET] = (const struct pollfd){
+ .fd = socket_fd,
+ .events = POLLIN,
+ };
+ poll_fds[RENDER_SERVER_POLL_SIGCHLD] = (const struct pollfd){
+ .fd = sigchld_fd,
+ .events = POLLIN,
+ };
+
+ return sigchld_fd >= 0 ? 2 : 1;
+}
+
+static bool
+render_server_poll(UNUSED struct render_server *srv,
+ struct pollfd *poll_fds,
+ int poll_fd_count)
+{
+ int ret;
+ do {
+ ret = poll(poll_fds, poll_fd_count, -1);
+ } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (ret <= 0) {
+ render_log("failed to poll in the main loop");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+render_server_run(struct render_server *srv)
+{
+ struct render_client *client = srv->client;
+
+ struct pollfd poll_fds[RENDER_SERVER_POLL_COUNT];
+ const int poll_fd_count = render_server_init_poll_fds(srv, poll_fds);
+
+ while (srv->state == RENDER_SERVER_STATE_RUN) {
+ if (!render_server_poll(srv, poll_fds, poll_fd_count))
+ return false;
+
+ if (poll_fds[RENDER_SERVER_POLL_SOCKET].revents) {
+ if (!render_client_dispatch(client))
+ return false;
+ }
+
+ if (poll_fds[RENDER_SERVER_POLL_SIGCHLD].revents) {
+ if (!render_worker_jail_reap_workers(srv->worker_jail))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void
+render_server_fini(struct render_server *srv)
+{
+ if (srv->client)
+ render_client_destroy(srv->client);
+
+ if (srv->worker_jail)
+ render_worker_jail_destroy(srv->worker_jail);
+
+ if (srv->client_fd >= 0)
+ close(srv->client_fd);
+}
+
+static bool
+render_server_parse_options(struct render_server *srv, int argc, char **argv)
+{
+ enum {
+ OPT_SOCKET_FD = 'a',
+ OPT_WORKER_SECCOMP_BPF,
+ OPT_WORKER_SECCOMP_MINIJAIL_POLICY,
+ OPT_WORKER_SECCOMP_MINIJAIL_LOG,
+ OPT_COUNT,
+ };
+ static const struct option options[] = {
+ { "socket-fd", required_argument, NULL, OPT_SOCKET_FD },
+ { "worker-seccomp-bpf", required_argument, NULL, OPT_WORKER_SECCOMP_BPF },
+ { "worker-seccomp-minijail-policy", required_argument, NULL,
+ OPT_WORKER_SECCOMP_MINIJAIL_POLICY },
+ { "worker-seccomp-minijail-log", no_argument, NULL,
+ OPT_WORKER_SECCOMP_MINIJAIL_LOG },
+ { NULL, 0, NULL, 0 }
+ };
+ static_assert(OPT_COUNT <= 'z', "");
+
+ while (true) {
+ const int ret = getopt_long(argc, argv, "", options, NULL);
+ if (ret == -1)
+ break;
+
+ switch (ret) {
+ case OPT_SOCKET_FD:
+ srv->client_fd = atoi(optarg);
+ break;
+ case OPT_WORKER_SECCOMP_BPF:
+ srv->worker_seccomp_bpf = optarg;
+ break;
+ case OPT_WORKER_SECCOMP_MINIJAIL_POLICY:
+ srv->worker_seccomp_minijail_policy = optarg;
+ break;
+ case OPT_WORKER_SECCOMP_MINIJAIL_LOG:
+ srv->worker_seccomp_minijail_log = true;
+ break;
+ default:
+ render_log("unknown option specified");
+ return false;
+ break;
+ }
+ }
+
+ if (optind < argc) {
+ render_log("non-option arguments specified");
+ return false;
+ }
+
+ if (srv->client_fd < 0 || !render_socket_is_seqpacket(srv->client_fd)) {
+ render_log("no valid client fd specified");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+render_server_init(struct render_server *srv,
+ int argc,
+ char **argv,
+ struct render_context_args *ctx_args)
+{
+ memset(srv, 0, sizeof(*srv));
+ srv->state = RENDER_SERVER_STATE_RUN;
+ srv->context_args = ctx_args;
+ srv->client_fd = -1;
+
+ if (!render_server_parse_options(srv, argc, argv))
+ return false;
+
+ enum render_worker_jail_seccomp_filter seccomp_filter =
+ RENDER_WORKER_JAIL_SECCOMP_NONE;
+ const char *seccomp_path = NULL;
+ if (srv->worker_seccomp_minijail_log && srv->worker_seccomp_minijail_policy) {
+ seccomp_filter = RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY_LOG;
+ seccomp_path = srv->worker_seccomp_minijail_policy;
+ } else if (srv->worker_seccomp_bpf) {
+ seccomp_filter = RENDER_WORKER_JAIL_SECCOMP_BPF;
+ seccomp_path = srv->worker_seccomp_bpf;
+ } else if (srv->worker_seccomp_minijail_policy) {
+ seccomp_filter = RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY;
+ seccomp_path = srv->worker_seccomp_minijail_policy;
+ }
+
+ srv->worker_jail = render_worker_jail_create(RENDER_SERVER_MAX_WORKER_COUNT,
+ seccomp_filter, seccomp_path);
+ if (!srv->worker_jail) {
+ render_log("failed to create worker jail");
+ goto fail;
+ }
+
+ srv->client = render_client_create(srv, srv->client_fd);
+ if (!srv->client) {
+ render_log("failed to create client");
+ goto fail;
+ }
+ /* ownership transferred */
+ srv->client_fd = -1;
+
+ return true;
+
+fail:
+ render_server_fini(srv);
+ return false;
+}
+
+bool
+render_server_main(int argc, char **argv, struct render_context_args *ctx_args)
+{
+ struct render_server srv;
+ if (!render_server_init(&srv, argc, argv, ctx_args))
+ return false;
+
+ const bool ok = render_server_run(&srv);
+ render_server_fini(&srv);
+
+ return ok;
+}
diff --git a/server/render_server.h b/server/render_server.h
new file mode 100644
index 00000000..ba71ce21
--- /dev/null
+++ b/server/render_server.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_SERVER_H
+#define RENDER_SERVER_H
+
+#include "render_common.h"
+
+enum render_server_state {
+ RENDER_SERVER_STATE_RUN,
+ RENDER_SERVER_STATE_SUBPROCESS,
+};
+
+struct render_server {
+ enum render_server_state state;
+
+ /* only initialized in subprocesses */
+ struct render_context_args *context_args;
+
+ /* options */
+ int client_fd;
+ const char *worker_seccomp_bpf;
+ const char *worker_seccomp_minijail_policy;
+ bool worker_seccomp_minijail_log;
+
+ struct render_worker_jail *worker_jail;
+
+ /* only one client in the current design */
+ struct render_client *client;
+};
+
+bool
+render_server_main(int argc, char **argv, struct render_context_args *ctx_args);
+
+#endif /* RENDER_SERVER_H */
diff --git a/server/render_socket.c b/server/render_socket.c
new file mode 100644
index 00000000..58edcf46
--- /dev/null
+++ b/server/render_socket.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_socket.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#define RENDER_SOCKET_MAX_FD_COUNT 8
+
+/* The socket pair between the server process and the client process is set up
+ * by the client process (or yet another process). Because render_server_run
+ * does not poll yet, the fd is expected to be blocking.
+ *
+ * We also expect the fd to be always valid. If the client process dies, the
+ * fd becomes invalid and is considered a fatal error.
+ *
+ * There is also a socket pair between each context worker and the client
+ * process. The pair is set up by render_socket_pair here.
+ *
+ * The fd is also expected to be blocking. When the client process closes its
+ * end of the socket pair, the context worker terminates.
+ */
+bool
+render_socket_pair(int out_fds[static 2])
+{
+ int ret = socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, out_fds);
+ if (ret) {
+ render_log("failed to create socket pair");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+render_socket_is_seqpacket(int fd)
+{
+ int type;
+ socklen_t len = sizeof(type);
+ if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len))
+ return false;
+ return type == SOCK_SEQPACKET;
+}
+
+void
+render_socket_init(struct render_socket *socket, int fd)
+{
+ assert(fd >= 0);
+ *socket = (struct render_socket){
+ .fd = fd,
+ };
+}
+
+void
+render_socket_fini(struct render_socket *socket)
+{
+ close(socket->fd);
+}
+
+static const int *
+get_received_fds(const struct msghdr *msg, int *out_count)
+{
+ const struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ if (unlikely(!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
+ cmsg->cmsg_type != SCM_RIGHTS || cmsg->cmsg_len < CMSG_LEN(0))) {
+ *out_count = 0;
+ return NULL;
+ }
+
+ *out_count = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
+ return (const int *)CMSG_DATA(cmsg);
+}
+
+static bool
+render_socket_recvmsg(struct render_socket *socket, struct msghdr *msg, size_t *out_size)
+{
+ do {
+ const ssize_t s = recvmsg(socket->fd, msg, MSG_CMSG_CLOEXEC);
+ if (unlikely(s <= 0)) {
+ if (!s)
+ return false;
+
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+
+ render_log("failed to receive message: %s", strerror(errno));
+ return false;
+ }
+
+ if (unlikely(msg->msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+ render_log("failed to receive message: truncated");
+
+ int fd_count;
+ const int *fds = get_received_fds(msg, &fd_count);
+ for (int i = 0; i < fd_count; i++)
+ close(fds[i]);
+
+ return false;
+ }
+
+ *out_size = s;
+ return true;
+ } while (true);
+}
+
+static bool
+render_socket_receive_request_internal(struct render_socket *socket,
+ void *data,
+ size_t max_size,
+ size_t *out_size,
+ int *fds,
+ int max_fd_count,
+ int *out_fd_count)
+{
+ assert(data && max_size);
+ struct msghdr msg = {
+ .msg_iov =
+ &(struct iovec){
+ .iov_base = data,
+ .iov_len = max_size,
+ },
+ .msg_iovlen = 1,
+ };
+
+ char cmsg_buf[CMSG_SPACE(sizeof(*fds) * RENDER_SOCKET_MAX_FD_COUNT)];
+ if (max_fd_count) {
+ assert(fds && max_fd_count <= RENDER_SOCKET_MAX_FD_COUNT);
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = CMSG_SPACE(sizeof(*fds) * max_fd_count);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ memset(cmsg, 0, sizeof(*cmsg));
+ }
+
+ if (!render_socket_recvmsg(socket, &msg, out_size))
+ return false;
+
+ if (max_fd_count) {
+ int received_fd_count;
+ const int *received_fds = get_received_fds(&msg, &received_fd_count);
+ assert(received_fd_count <= max_fd_count);
+
+ memcpy(fds, received_fds, sizeof(*fds) * received_fd_count);
+ *out_fd_count = received_fd_count;
+ } else if (out_fd_count) {
+ *out_fd_count = 0;
+ }
+
+ return true;
+}
+
+bool
+render_socket_receive_request(struct render_socket *socket,
+ void *data,
+ size_t max_size,
+ size_t *out_size)
+{
+ return render_socket_receive_request_internal(socket, data, max_size, out_size, NULL,
+ 0, NULL);
+}
+
+bool
+render_socket_receive_request_with_fds(struct render_socket *socket,
+ void *data,
+ size_t max_size,
+ size_t *out_size,
+ int *fds,
+ int max_fd_count,
+ int *out_fd_count)
+{
+ return render_socket_receive_request_internal(socket, data, max_size, out_size, fds,
+ max_fd_count, out_fd_count);
+}
+
+bool
+render_socket_receive_data(struct render_socket *socket, void *data, size_t size)
+{
+ size_t received_size;
+ if (!render_socket_receive_request(socket, data, size, &received_size))
+ return false;
+
+ if (size != received_size) {
+ render_log("failed to receive data: expected %zu but received %zu", size,
+ received_size);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+render_socket_sendmsg(struct render_socket *socket, const struct msghdr *msg)
+{
+ do {
+ const ssize_t s = sendmsg(socket->fd, msg, MSG_NOSIGNAL);
+ if (unlikely(s < 0)) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+
+ render_log("failed to send message: %s", strerror(errno));
+ return false;
+ }
+
+ /* no partial send since the socket type is SOCK_SEQPACKET */
+ assert(msg->msg_iovlen == 1 && msg->msg_iov[0].iov_len == (size_t)s);
+ return true;
+ } while (true);
+}
+
+static inline bool
+render_socket_send_reply_internal(struct render_socket *socket,
+ const void *data,
+ size_t size,
+ const int *fds,
+ int fd_count)
+{
+ assert(data && size);
+ struct msghdr msg = {
+ .msg_iov =
+ &(struct iovec){
+ .iov_base = (void *)data,
+ .iov_len = size,
+ },
+ .msg_iovlen = 1,
+ };
+
+ char cmsg_buf[CMSG_SPACE(sizeof(*fds) * RENDER_SOCKET_MAX_FD_COUNT)];
+ if (fd_count) {
+ assert(fds && fd_count <= RENDER_SOCKET_MAX_FD_COUNT);
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = CMSG_SPACE(sizeof(*fds) * fd_count);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(*fds) * fd_count);
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(*fds) * fd_count);
+ }
+
+ return render_socket_sendmsg(socket, &msg);
+}
+
+bool
+render_socket_send_reply(struct render_socket *socket, const void *data, size_t size)
+{
+ return render_socket_send_reply_internal(socket, data, size, NULL, 0);
+}
+
+bool
+render_socket_send_reply_with_fds(struct render_socket *socket,
+ const void *data,
+ size_t size,
+ const int *fds,
+ int fd_count)
+{
+ return render_socket_send_reply_internal(socket, data, size, fds, fd_count);
+}
diff --git a/server/render_socket.h b/server/render_socket.h
new file mode 100644
index 00000000..ead1f8cc
--- /dev/null
+++ b/server/render_socket.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_SOCKET_H
+#define RENDER_SOCKET_H
+
+#include "render_common.h"
+
+struct render_socket {
+ int fd;
+};
+
+bool
+render_socket_pair(int out_fds[static 2]);
+
+bool
+render_socket_is_seqpacket(int fd);
+
+void
+render_socket_init(struct render_socket *socket, int fd);
+
+void
+render_socket_fini(struct render_socket *socket);
+
+bool
+render_socket_receive_request(struct render_socket *socket,
+ void *data,
+ size_t max_size,
+ size_t *out_size);
+
+bool
+render_socket_receive_request_with_fds(struct render_socket *socket,
+ void *data,
+ size_t max_size,
+ size_t *out_size,
+ int *fds,
+ int max_fd_count,
+ int *out_fd_count);
+
+bool
+render_socket_receive_data(struct render_socket *socket, void *data, size_t size);
+
+bool
+render_socket_send_reply(struct render_socket *socket, const void *data, size_t size);
+
+bool
+render_socket_send_reply_with_fds(struct render_socket *socket,
+ const void *data,
+ size_t size,
+ const int *fds,
+ int fd_count);
+
+#endif /* RENDER_SOCKET_H */
diff --git a/server/render_virgl.c b/server/render_virgl.c
new file mode 100644
index 00000000..fb6b5b7a
--- /dev/null
+++ b/server/render_virgl.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_virgl.h"
+
+#include "virglrenderer.h"
+
+#include "render_context.h"
+
+struct render_virgl render_virgl_internal = {
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ .struct_mutex = _MTX_INITIALIZER_NP,
+ .dispatch_mutex = _MTX_INITIALIZER_NP,
+#endif
+ .init_count = 0,
+};
+
+static struct render_virgl *
+render_virgl_lock_struct(void)
+{
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ mtx_lock(&render_virgl_internal.struct_mutex);
+#endif
+ return &render_virgl_internal;
+}
+
+static void
+render_virgl_unlock_struct(void)
+{
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ mtx_unlock(&render_virgl_internal.struct_mutex);
+#endif
+}
+
+static struct render_context *
+render_virgl_lookup_context(uint32_t ctx_id)
+{
+ const struct render_virgl *virgl = render_virgl_lock_struct();
+ struct render_context *ctx = NULL;
+
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ list_for_each_entry (struct render_context, iter, &virgl->contexts, head) {
+ if (iter->ctx_id == ctx_id) {
+ ctx = iter;
+ break;
+ }
+ }
+#else
+ assert(list_is_singular(&virgl->contexts));
+ ctx = list_first_entry(&virgl->contexts, struct render_context, head);
+ assert(ctx->ctx_id == ctx_id);
+ (void)ctx_id;
+#endif
+
+ render_virgl_unlock_struct();
+
+ return ctx;
+}
+
+static void
+render_virgl_debug_callback(const char *fmt, va_list ap)
+{
+ char buf[1024];
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ render_log(buf);
+}
+
+static void
+render_virgl_cb_write_context_fence(UNUSED void *cookie,
+ uint32_t ctx_id,
+ uint32_t ring_idx,
+ uint64_t fence_id)
+{
+ struct render_context *ctx = render_virgl_lookup_context(ctx_id);
+ assert(ctx);
+
+ const uint32_t seqno = (uint32_t)fence_id;
+ render_context_update_timeline(ctx, ring_idx, seqno);
+}
+
+static const struct virgl_renderer_callbacks render_virgl_cbs = {
+ .version = VIRGL_RENDERER_CALLBACKS_VERSION,
+ .write_context_fence = render_virgl_cb_write_context_fence,
+};
+
+void
+render_virgl_add_context(struct render_context *ctx)
+{
+ struct render_virgl *virgl = render_virgl_lock_struct();
+ list_addtail(&ctx->head, &virgl->contexts);
+ render_virgl_unlock_struct();
+}
+
+void
+render_virgl_remove_context(struct render_context *ctx)
+{
+ render_virgl_lock_struct();
+ list_del(&ctx->head);
+ render_virgl_unlock_struct();
+}
+
+void
+render_virgl_fini(void)
+{
+ struct render_virgl *virgl = render_virgl_lock_struct();
+
+ if (virgl->init_count) {
+ virgl->init_count--;
+ if (!virgl->init_count) {
+ render_virgl_lock_dispatch();
+ virgl_renderer_cleanup(virgl);
+ render_virgl_unlock_dispatch();
+ }
+ }
+
+ render_virgl_unlock_struct();
+}
+
+bool
+render_virgl_init(uint32_t init_flags)
+{
+ /* we only care if virgl and/or venus are enabled */
+ init_flags &= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_NO_VIRGL;
+
+ /* always use sync thread and async fence cb for low latency */
+ init_flags |= VIRGL_RENDERER_THREAD_SYNC | VIRGL_RENDERER_ASYNC_FENCE_CB |
+ VIRGL_RENDERER_USE_EXTERNAL_BLOB;
+
+ struct render_virgl *virgl = render_virgl_lock_struct();
+
+ if (virgl->init_count) {
+ if (virgl->init_flags != init_flags) {
+ render_log("failed to re-initialize with flags 0x%x", init_flags);
+ goto fail;
+ }
+ } else {
+ render_virgl_lock_dispatch();
+ virgl_set_debug_callback(render_virgl_debug_callback);
+ int ret = virgl_renderer_init(virgl, init_flags,
+ (struct virgl_renderer_callbacks *)&render_virgl_cbs);
+ render_virgl_unlock_dispatch();
+ if (ret) {
+ render_log("failed to initialize virglrenderer");
+ goto fail;
+ }
+
+ list_inithead(&virgl->contexts);
+ virgl->init_flags = init_flags;
+ }
+
+ virgl->init_count++;
+
+ render_virgl_unlock_struct();
+
+ return true;
+
+fail:
+ render_virgl_unlock_struct();
+ return false;
+}
diff --git a/server/render_virgl.h b/server/render_virgl.h
new file mode 100644
index 00000000..cb362f4e
--- /dev/null
+++ b/server/render_virgl.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_VIRGL_H
+#define RENDER_VIRGL_H
+
+#include "render_common.h"
+
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+#include "c11/threads.h"
+#endif
+
+/* Workers call into virglrenderer. When they are processes, not much care is
+ * required. We just want to be careful that the server process might have
+ * initialized viglrenderer before workers are forked.
+ *
+ * But when workers are threads, we need to grab a lock to protect
+ * virglrenderer.
+ *
+ * TODO skip virglrenderer.h and go straight to vkr_renderer.h. That allows
+ * us to remove this file.
+ */
+struct render_virgl {
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ /* this protects the struct */
+ mtx_t struct_mutex;
+ /* this protects virglrenderer */
+ mtx_t dispatch_mutex;
+#endif
+
+ /* for nested initialization */
+ int init_count;
+ uint32_t init_flags;
+
+ struct list_head contexts;
+};
+
+extern struct render_virgl render_virgl_internal;
+
+bool
+render_virgl_init(uint32_t init_flags);
+
+void
+render_virgl_fini(void);
+
+void
+render_virgl_add_context(struct render_context *ctx);
+
+void
+render_virgl_remove_context(struct render_context *ctx);
+
+static inline void
+render_virgl_lock_dispatch(void)
+{
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ mtx_lock(&render_virgl_internal.dispatch_mutex);
+#endif
+}
+
+static inline void
+render_virgl_unlock_dispatch(void)
+{
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ mtx_unlock(&render_virgl_internal.dispatch_mutex);
+#endif
+}
+
+#endif /* RENDER_VIRGL_H */
diff --git a/server/render_worker.c b/server/render_worker.c
new file mode 100644
index 00000000..35ef1d0d
--- /dev/null
+++ b/server/render_worker.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "render_worker.h"
+
+/* One and only one of ENABLE_RENDER_SERVER_WORKER_* must be set.
+ *
+ * With ENABLE_RENDER_SERVER_WORKER_PROCESS, each worker is a subprocess
+ * forked from the server process.
+ *
+ * With ENABLE_RENDER_SERVER_WORKER_THREAD, each worker is a thread of the
+ * server process.
+ *
+ * With ENABLE_RENDER_SERVER_WORKER_MINIJAIL, each worker is a subprocess
+ * forked from the server process, jailed with minijail.
+ */
+#if (ENABLE_RENDER_SERVER_WORKER_PROCESS + ENABLE_RENDER_SERVER_WORKER_THREAD + \
+ ENABLE_RENDER_SERVER_WORKER_MINIJAIL) != 1
+#error "no worker defined"
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/signalfd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <threads.h>
+#include <unistd.h>
+
+struct minijail;
+
+struct render_worker_jail {
+ int max_worker_count;
+
+ int sigchld_fd;
+ struct minijail *minijail;
+
+ struct list_head workers;
+ int worker_count;
+};
+
+struct render_worker {
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ thrd_t thread;
+#else
+ pid_t pid;
+#endif
+ bool destroyed;
+ bool reaped;
+
+ struct list_head head;
+
+ char thread_data[];
+};
+
+#ifdef ENABLE_RENDER_SERVER_WORKER_MINIJAIL
+
+#include <fcntl.h>
+#include <libminijail.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+static bool
+load_bpf_program(struct sock_fprog *prog, const char *path)
+{
+ int fd = -1;
+ void *data = NULL;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ goto fail;
+
+ const off_t size = lseek(fd, 0, SEEK_END);
+ if (size <= 0 || size % sizeof(struct sock_filter))
+ goto fail;
+ lseek(fd, 0, SEEK_SET);
+
+ data = malloc(size);
+ if (!data)
+ goto fail;
+
+ off_t cur = 0;
+ while (cur < size) {
+ const ssize_t r = read(fd, (char *)data + cur, size - cur);
+ if (r <= 0)
+ goto fail;
+ cur += r;
+ }
+
+ close(fd);
+
+ prog->len = size / sizeof(struct sock_filter);
+ prog->filter = data;
+
+ return true;
+
+fail:
+ if (data)
+ free(data);
+ if (fd >= 0)
+ close(fd);
+ return false;
+}
+
+static struct minijail *
+create_minijail(enum render_worker_jail_seccomp_filter seccomp_filter,
+ const char *seccomp_path)
+{
+ struct minijail *j = minijail_new();
+
+ /* TODO namespaces and many more */
+ minijail_no_new_privs(j);
+
+ if (seccomp_filter != RENDER_WORKER_JAIL_SECCOMP_NONE) {
+ if (seccomp_filter == RENDER_WORKER_JAIL_SECCOMP_BPF) {
+ struct sock_fprog prog;
+ if (!load_bpf_program(&prog, seccomp_path)) {
+ minijail_destroy(j);
+ return NULL;
+ }
+
+ minijail_set_seccomp_filters(j, &prog);
+ free(prog.filter);
+ } else {
+ if (seccomp_filter == RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY_LOG)
+ minijail_log_seccomp_filter_failures(j);
+ minijail_parse_seccomp_filters(j, seccomp_path);
+ }
+
+ minijail_use_seccomp_filter(j);
+ }
+
+ return j;
+}
+
+static pid_t
+fork_minijail(const struct minijail *template)
+{
+ struct minijail *j = minijail_new();
+ if (!j)
+ return -1;
+
+ /* is this faster? */
+ if (minijail_copy_jail(template, j)) {
+ minijail_destroy(j);
+ return -1;
+ }
+
+ pid_t pid = minijail_fork(j);
+ minijail_destroy(j);
+
+ return pid;
+}
+
+#endif /* ENABLE_RENDER_SERVER_WORKER_MINIJAIL */
+
+#ifndef ENABLE_RENDER_SERVER_WORKER_THREAD
+
+static int
+create_sigchld_fd(void)
+{
+ const int signum = SIGCHLD;
+
+ sigset_t set;
+ if (sigemptyset(&set) || sigaddset(&set, signum)) {
+ render_log("failed to initialize sigset_t");
+ return -1;
+ }
+
+ int fd = signalfd(-1, &set, SFD_NONBLOCK | SFD_CLOEXEC);
+ if (fd < 0) {
+ render_log("failed to create signalfd");
+ return -1;
+ }
+
+ if (sigprocmask(SIG_BLOCK, &set, NULL)) {
+ render_log("failed to call sigprocmask");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+#endif /* !ENABLE_RENDER_SERVER_WORKER_THREAD */
+
+static void
+render_worker_jail_add_worker(struct render_worker_jail *jail,
+ struct render_worker *worker)
+{
+ list_add(&worker->head, &jail->workers);
+ jail->worker_count++;
+}
+
+static void
+render_worker_jail_remove_worker(struct render_worker_jail *jail,
+ struct render_worker *worker)
+{
+ list_del(&worker->head);
+ jail->worker_count--;
+
+ free(worker);
+}
+
+static struct render_worker *
+render_worker_jail_reap_any_worker(struct render_worker_jail *jail, bool block)
+{
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ (void)jail;
+ (void)block;
+ return NULL;
+#else
+ const int options = WEXITED | (block ? 0 : WNOHANG);
+ siginfo_t siginfo = { 0 };
+ const int ret = waitid(P_ALL, 0, &siginfo, options);
+ const pid_t pid = ret ? 0 : siginfo.si_pid;
+ if (!pid)
+ return NULL;
+
+ list_for_each_entry (struct render_worker, worker, &jail->workers, head) {
+ if (worker->pid == pid) {
+ worker->reaped = true;
+ return worker;
+ }
+ }
+
+ render_log("unknown child process %d", pid);
+ return NULL;
+#endif
+}
+
+struct render_worker_jail *
+render_worker_jail_create(int max_worker_count,
+ enum render_worker_jail_seccomp_filter seccomp_filter,
+ const char *seccomp_path)
+{
+ struct render_worker_jail *jail = calloc(1, sizeof(*jail));
+ if (!jail)
+ return NULL;
+
+ jail->max_worker_count = max_worker_count;
+ jail->sigchld_fd = -1;
+ list_inithead(&jail->workers);
+
+#ifndef ENABLE_RENDER_SERVER_WORKER_THREAD
+ jail->sigchld_fd = create_sigchld_fd();
+ if (jail->sigchld_fd < 0)
+ goto fail;
+#endif
+
+#if defined(ENABLE_RENDER_SERVER_WORKER_MINIJAIL)
+ jail->minijail = create_minijail(seccomp_filter, seccomp_path);
+ if (!jail->minijail)
+ goto fail;
+#else
+ /* TODO RENDER_WORKER_JAIL_SECCOMP_BPF */
+ if (seccomp_filter != RENDER_WORKER_JAIL_SECCOMP_NONE)
+ goto fail;
+ (void)seccomp_path;
+#endif
+
+ return jail;
+
+fail:
+ free(jail);
+ return NULL;
+}
+
+static void
+render_worker_jail_wait_workers(struct render_worker_jail *jail)
+{
+ while (jail->worker_count) {
+ struct render_worker *worker =
+ render_worker_jail_reap_any_worker(jail, true /* block */);
+ if (worker) {
+ assert(worker->destroyed && worker->reaped);
+ render_worker_jail_remove_worker(jail, worker);
+ }
+ }
+}
+
+void
+render_worker_jail_destroy(struct render_worker_jail *jail)
+{
+ render_worker_jail_wait_workers(jail);
+
+#if defined(ENABLE_RENDER_SERVER_WORKER_MINIJAIL)
+ minijail_destroy(jail->minijail);
+#endif
+
+ if (jail->sigchld_fd >= 0)
+ close(jail->sigchld_fd);
+
+ free(jail);
+}
+
+int
+render_worker_jail_get_sigchld_fd(const struct render_worker_jail *jail)
+{
+ return jail->sigchld_fd;
+}
+
+static bool
+render_worker_jail_drain_sigchld_fd(struct render_worker_jail *jail)
+{
+ if (jail->sigchld_fd < 0)
+ return true;
+
+ do {
+ struct signalfd_siginfo siginfos[8];
+ const ssize_t r = read(jail->sigchld_fd, siginfos, sizeof(siginfos));
+ if (r == sizeof(siginfos))
+ continue;
+ if (r > 0 || (r < 0 && errno == EAGAIN))
+ break;
+
+ render_log("failed to read signalfd");
+ return false;
+ } while (true);
+
+ return true;
+}
+
+bool
+render_worker_jail_reap_workers(struct render_worker_jail *jail)
+{
+ if (!render_worker_jail_drain_sigchld_fd(jail))
+ return false;
+
+ do {
+ struct render_worker *worker =
+ render_worker_jail_reap_any_worker(jail, false /* block */);
+ if (!worker)
+ break;
+
+ assert(worker->reaped);
+ if (worker->destroyed)
+ render_worker_jail_remove_worker(jail, worker);
+ } while (true);
+
+ return true;
+}
+
+void
+render_worker_jail_detach_workers(struct render_worker_jail *jail)
+{
+ /* free workers without killing nor reaping */
+ list_for_each_entry_safe (struct render_worker, worker, &jail->workers, head)
+ render_worker_jail_remove_worker(jail, worker);
+}
+
+struct render_worker *
+render_worker_create(struct render_worker_jail *jail,
+ int (*thread_func)(void *thread_data),
+ void *thread_data,
+ size_t thread_data_size)
+{
+ if (jail->worker_count >= jail->max_worker_count) {
+ render_log("too many workers");
+ return NULL;
+ }
+
+ struct render_worker *worker = calloc(1, sizeof(*worker) + thread_data_size);
+ if (!worker)
+ return NULL;
+
+ memcpy(worker->thread_data, thread_data, thread_data_size);
+
+ bool ok;
+#if defined(ENABLE_RENDER_SERVER_WORKER_PROCESS)
+ worker->pid = fork();
+ ok = worker->pid >= 0;
+ (void)thread_func;
+#elif defined(ENABLE_RENDER_SERVER_WORKER_THREAD)
+ ok = thrd_create(&worker->thread, thread_func, worker->thread_data) == thrd_success;
+#elif defined(ENABLE_RENDER_SERVER_WORKER_MINIJAIL)
+ worker->pid = fork_minijail(jail->minijail);
+ ok = worker->pid >= 0;
+ (void)thread_func;
+#endif
+ if (!ok) {
+ free(worker);
+ return NULL;
+ }
+
+ render_worker_jail_add_worker(jail, worker);
+
+ return worker;
+}
+
+void
+render_worker_destroy(struct render_worker_jail *jail, struct render_worker *worker)
+{
+ assert(render_worker_is_record(worker));
+
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ /* we trust the thread to clean up and exit in finite time */
+ thrd_join(worker->thread, NULL);
+ worker->reaped = true;
+#else
+ /* kill to make sure the worker exits in finite time */
+ if (!worker->reaped)
+ kill(worker->pid, SIGKILL);
+#endif
+
+ worker->destroyed = true;
+
+ if (worker->reaped)
+ render_worker_jail_remove_worker(jail, worker);
+}
+
+bool
+render_worker_is_record(const struct render_worker *worker)
+{
+ /* return false if called from the worker itself */
+#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD
+ return !thrd_equal(worker->thread, thrd_current());
+#else
+ return worker->pid > 0;
+#endif
+}
diff --git a/server/render_worker.h b/server/render_worker.h
new file mode 100644
index 00000000..c18938b3
--- /dev/null
+++ b/server/render_worker.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef RENDER_WORKER_H
+#define RENDER_WORKER_H
+
+#include "render_common.h"
+
+enum render_worker_jail_seccomp_filter {
+ /* seccomp_path is ignored and seccomp is disabled */
+ RENDER_WORKER_JAIL_SECCOMP_NONE,
+ /* seccomp_path is a file containing a BPF program */
+ RENDER_WORKER_JAIL_SECCOMP_BPF,
+ /* seccomp_path is a file containing a minijail policy */
+ RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY,
+ RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY_LOG,
+};
+
+struct render_worker_jail *
+render_worker_jail_create(int max_worker_count,
+ enum render_worker_jail_seccomp_filter seccomp_filter,
+ const char *seccomp_path);
+
+void
+render_worker_jail_destroy(struct render_worker_jail *jail);
+
+int
+render_worker_jail_get_sigchld_fd(const struct render_worker_jail *jail);
+
+bool
+render_worker_jail_reap_workers(struct render_worker_jail *jail);
+
+void
+render_worker_jail_detach_workers(struct render_worker_jail *jail);
+
+struct render_worker *
+render_worker_create(struct render_worker_jail *jail,
+ int (*thread_func)(void *thread_data),
+ void *thread_data,
+ size_t thread_data_size);
+
+void
+render_worker_destroy(struct render_worker_jail *jail, struct render_worker *worker);
+
+bool
+render_worker_is_record(const struct render_worker *worker);
+
+#endif /* RENDER_WORKER_H */
diff --git a/src/drm/.clang-format b/src/drm/.clang-format
new file mode 100644
index 00000000..068b217e
--- /dev/null
+++ b/src/drm/.clang-format
@@ -0,0 +1,137 @@
+AlignAfterOpenBracket: true
+AlignConsecutiveMacros: true
+AlignConsecutiveBitFields: true
+AllowAllArgumentsOnNextLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: false
+AlwaysBreakAfterReturnType: TopLevelDefinitions
+BasedOnStyle: LLVM
+BraceWrapping:
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: true
+ AfterStruct: false
+ BeforeElse: false
+ SplitEmptyFunction: true
+BinPackArguments: true
+BinPackParameters: true
+BreakBeforeBraces: Custom
+ColumnLimit: 90
+ContinuationIndentWidth: 3
+Cpp11BracedListStyle: false
+Cpp11BracedListStyle: true
+BreakStringLiterals: false
+ForEachMacros:
+ - BITSET_FOREACH_SET
+ - BITSET_FOREACH_RANGE
+ - LIST_FOR_EACH_ENTRY
+ - LIST_FOR_EACH_ENTRY_SAFE
+ - LIST_FOR_EACH_ENTRY_SAFE_REV
+ - list_for_each_entry
+ - list_for_each_entry_safe
+ - list_for_each_entry_rev
+ - list_for_each_entry_rev_safe
+ - list_for_each_entry_from
+ - list_for_each_entry_from_rev
+ - foreach_list_typed
+ - u_foreach_bit
+ - util_dynarray_foreach
+ - rb_tree_foreach
+ - rb_tree_foreach_safe
+ - nir_foreach_variable
+ - nir_foreach_variable_safe
+ - nir_foreach_uniform_variable
+ - nir_foreach_uniform_variable_safe
+ - nir_foreach_register
+ - nir_foreach_register_safe
+ - nir_foreach_use
+ - nir_foreach_use_safe
+ - nir_foreach_if_use
+ - nir_foreach_if_use_safe
+ - nir_foreach_def
+ - nir_foreach_def_safe
+ - nir_foreach_phi_src
+ - nir_foreach_phi_src_safe
+ - nir_foreach_parallel_copy_entry
+ - nir_foreach_instr
+ - nir_foreach_instr_reverse
+ - nir_foreach_instr_safe
+ - nir_foreach_instr_reverse_safe
+ - nir_foreach_instr_from_safe
+ - nir_foreach_function
+ - nir_foreach_block
+ - nir_foreach_block_safe
+ - nir_foreach_block_reverse
+ - nir_foreach_block_reverse_safe
+ - nir_foreach_block_in_cf_node
+ - nir_foreach_shader_in_variable
+ - nir_foreach_shader_out_variable_safe
+ - nir_foreach_variable_in_list
+ - nir_foreach_variable_with_modes_safe
+ - nir_foreach_variable_with_modes
+ - nir_foreach_shader_out_variable
+ - ir2_foreach_instr
+ - ir2_foreach_live_reg
+ - ir2_foreach_avail
+ - ir2_foreach_src
+ - foreach_two_lists
+ - foreach_bit
+ - foreach_sched_node
+ - foreach_src
+ - foreach_src_n
+ - foreach_dst
+ - foreach_dst_n
+ - ra_foreach_dst
+ - ra_foreach_src
+ - ra_foreach_src_rev
+ - foreach_ssa_use
+ - foreach_ssa_srcp_n
+ - foreach_ssa_srcp
+ - foreach_ssa_src_n
+ - foreach_ssa_src
+ - foreach_input_n
+ - foreach_input
+ - foreach_output_n
+ - foreach_output
+ - foreach_instr
+ - foreach_instr_rev
+ - foreach_instr_safe
+ - foreach_instr_from_safe
+ - foreach_block
+ - foreach_block_safe
+ - foreach_block_rev
+ - foreach_array
+ - foreach_array_safe
+ - foreach_name_n
+ - foreach_name
+ - foreach_def
+ - foreach_use
+ - foreach_interval
+ - foreach_interval_safe
+ - foreach_interval_rev
+ - foreach_interval_rev_safe
+ - foreach_batch
+ - hash_table_foreach
+ - set_foreach
+ - foreach_line_in_section
+ - perf_time
+ - perf_time_ctx
+ - foreach_submit
+ - foreach_submit_safe
+
+IncludeBlocks: Preserve
+IncludeCategories:
+ - Regex: '<[[:alnum:].]+>'
+ Priority: 1
+ - Regex: '.*\/.*'
+ Priority: 2
+ - Regex: '.*'
+ Priority: 3
+IndentWidth: 3
+PenaltyBreakBeforeFirstCallParameter: 1
+PenaltyExcessCharacter: 100
+SpaceAfterCStyleCast: false
+SpaceBeforeCpp11BracedList: false
+SpaceBeforeCtorInitializerColon: false
+SpacesInContainerLiterals: false
+
diff --git a/src/drm/drm-uapi/msm_drm.h b/src/drm/drm-uapi/msm_drm.h
new file mode 100644
index 00000000..3c7b097c
--- /dev/null
+++ b/src/drm/drm-uapi/msm_drm.h
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MSM_DRM_H__
+#define __MSM_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints:
+ * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
+ * user/kernel compatibility
+ * 2) Keep fields aligned to their size
+ * 3) Because of how drm_ioctl() works, we can add new fields at
+ * the end of an ioctl if some care is taken: drm_ioctl() will
+ * zero out the new fields at the tail of the ioctl, so a zero
+ * value should have a backwards compatible meaning. And for
+ * output params, userspace won't see the newly added output
+ * fields.. so that has to be somehow ok.
+ */
+
+#define MSM_PIPE_NONE 0x00
+#define MSM_PIPE_2D0 0x01
+#define MSM_PIPE_2D1 0x02
+#define MSM_PIPE_3D0 0x10
+
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK 0xffff
+#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
+
+/* timeouts are specified in clock-monotonic absolute times (to simplify
+ * restarting interrupted ioctls). The following struct is logically the
+ * same as 'struct timespec' but 32/64b ABI safe.
+ */
+struct drm_msm_timespec {
+ __s64 tv_sec; /* seconds */
+ __s64 tv_nsec; /* nanoseconds */
+};
+
+/* Below "RO" indicates a read-only param, "WO" indicates write-only, and
+ * "RW" indicates a param that can be both read (GET_PARAM) and written
+ * (SET_PARAM)
+ */
+#define MSM_PARAM_GPU_ID 0x01 /* RO */
+#define MSM_PARAM_GMEM_SIZE 0x02 /* RO */
+#define MSM_PARAM_CHIP_ID 0x03 /* RO */
+#define MSM_PARAM_MAX_FREQ 0x04 /* RO */
+#define MSM_PARAM_TIMESTAMP 0x05 /* RO */
+#define MSM_PARAM_GMEM_BASE 0x06 /* RO */
+#define MSM_PARAM_PRIORITIES 0x07 /* RO: The # of priority levels */
+#define MSM_PARAM_PP_PGTABLE 0x08 /* RO: Deprecated, always returns zero */
+#define MSM_PARAM_FAULTS 0x09 /* RO */
+#define MSM_PARAM_SUSPENDS 0x0a /* RO */
+#define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */
+#define MSM_PARAM_COMM 0x0c /* WO: override for task->comm */
+#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
+#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
+#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+
+/* For backwards compat. The original support for preemption was based on
+ * a single ring per priority level so # of priority levels equals the #
+ * of rings. With drm/scheduler providing additional levels of priority,
+ * the number of priorities is greater than the # of rings. The param is
+ * renamed to better reflect this.
+ */
+#define MSM_PARAM_NR_RINGS MSM_PARAM_PRIORITIES
+
+struct drm_msm_param {
+ __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 param; /* in, MSM_PARAM_x */
+ __u64 value; /* out (get_param) or in (set_param) */
+ __u32 len; /* zero for non-pointer params */
+ __u32 pad; /* must be zero */
+};
+
+/*
+ * GEM buffers:
+ */
+
+#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
+#define MSM_BO_GPU_READONLY 0x00000002
+#define MSM_BO_CACHE_MASK 0x000f0000
+/* cache modes */
+#define MSM_BO_CACHED 0x00010000
+#define MSM_BO_WC 0x00020000
+#define MSM_BO_UNCACHED 0x00040000 /* deprecated, use MSM_BO_WC */
+#define MSM_BO_CACHED_COHERENT 0x080000
+
+#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
+ MSM_BO_GPU_READONLY | \
+ MSM_BO_CACHE_MASK)
+
+struct drm_msm_gem_new {
+ __u64 size; /* in */
+ __u32 flags; /* in, mask of MSM_BO_x */
+ __u32 handle; /* out */
+};
+
+/* Get or set GEM buffer info. The requested value can be passed
+ * directly in 'value', or for data larger than 64b 'value' is a
+ * pointer to userspace buffer, with 'len' specifying the number of
+ * bytes copied into that buffer. For info returned by pointer,
+ * calling the GEM_INFO ioctl with null 'value' will return the
+ * required buffer size in 'len'
+ */
+#define MSM_INFO_GET_OFFSET 0x00 /* get mmap() offset, returned by value */
+#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
+#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
+#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
+#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
+
+struct drm_msm_gem_info {
+ __u32 handle; /* in */
+ __u32 info; /* in - one of MSM_INFO_* */
+ __u64 value; /* in or out */
+ __u32 len; /* in or out */
+ __u32 pad;
+};
+
+#define MSM_PREP_READ 0x01
+#define MSM_PREP_WRITE 0x02
+#define MSM_PREP_NOSYNC 0x04
+
+#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
+
+struct drm_msm_gem_cpu_prep {
+ __u32 handle; /* in */
+ __u32 op; /* in, mask of MSM_PREP_x */
+ struct drm_msm_timespec timeout; /* in */
+};
+
+struct drm_msm_gem_cpu_fini {
+ __u32 handle; /* in */
+};
+
+/*
+ * Cmdstream Submission:
+ */
+
+/* The value written into the cmdstream is logically:
+ *
+ * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
+ *
+ * When we have GPU's w/ >32bit ptrs, it should be possible to deal
+ * with this by emit'ing two reloc entries with appropriate shift
+ * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
+ *
+ * NOTE that reloc's must be sorted by order of increasing submit_offset,
+ * otherwise EINVAL.
+ */
+struct drm_msm_gem_submit_reloc {
+ __u32 submit_offset; /* in, offset from submit_bo */
+ __u32 or; /* in, value OR'd with result */
+ __s32 shift; /* in, amount of left shift (can be negative) */
+ __u32 reloc_idx; /* in, index of reloc_bo buffer */
+ __u64 reloc_offset; /* in, offset from start of reloc_bo */
+};
+
+/* submit-types:
+ * BUF - this cmd buffer is executed normally.
+ * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
+ * processed normally, but the kernel does not setup an IB to
+ * this buffer in the first-level ringbuffer
+ * CTX_RESTORE_BUF - only executed if there has been a GPU context
+ * switch since the last SUBMIT ioctl
+ */
+#define MSM_SUBMIT_CMD_BUF 0x0001
+#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
+#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
+struct drm_msm_gem_submit_cmd {
+ __u32 type; /* in, one of MSM_SUBMIT_CMD_x */
+ __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
+ __u32 submit_offset; /* in, offset into submit_bo */
+ __u32 size; /* in, cmdstream size */
+ __u32 pad;
+ __u32 nr_relocs; /* in, number of submit_reloc's */
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
+};
+
+/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
+ * cmdstream buffer(s) themselves or reloc entries) has one (and only
+ * one) entry in the submit->bos[] table.
+ *
+ * As a optimization, the current buffer (gpu virtual address) can be
+ * passed back through the 'presumed' field. If on a subsequent reloc,
+ * userspace passes back a 'presumed' address that is still valid,
+ * then patching the cmdstream for this entry is skipped. This can
+ * avoid kernel needing to map/access the cmdstream bo in the common
+ * case.
+ */
+#define MSM_SUBMIT_BO_READ 0x0001
+#define MSM_SUBMIT_BO_WRITE 0x0002
+#define MSM_SUBMIT_BO_DUMP 0x0004
+
+#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
+ MSM_SUBMIT_BO_WRITE | \
+ MSM_SUBMIT_BO_DUMP)
+
+struct drm_msm_gem_submit_bo {
+ __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
+ __u32 handle; /* in, GEM handle */
+ __u64 presumed; /* in/out, presumed buffer address */
+};
+
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
+#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
+#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
+#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
+#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
+#define MSM_SUBMIT_FENCE_SN_IN 0x02000000 /* userspace passes in seqno fence */
+#define MSM_SUBMIT_FLAGS ( \
+ MSM_SUBMIT_NO_IMPLICIT | \
+ MSM_SUBMIT_FENCE_FD_IN | \
+ MSM_SUBMIT_FENCE_FD_OUT | \
+ MSM_SUBMIT_SUDO | \
+ MSM_SUBMIT_SYNCOBJ_IN | \
+ MSM_SUBMIT_SYNCOBJ_OUT | \
+ MSM_SUBMIT_FENCE_SN_IN | \
+ 0)
+
+#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
+ MSM_SUBMIT_SYNCOBJ_RESET | \
+ 0)
+
+struct drm_msm_gem_submit_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
+/* Each cmdstream submit consists of a table of buffers involved, and
+ * one or more cmdstream buffers. This allows for conditional execution
+ * (context-restore), and IB buffers needed for per tile/bin draw cmds.
+ */
+struct drm_msm_gem_submit {
+ __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
+ __u32 fence; /* out (or in with MSM_SUBMIT_FENCE_SN_IN flag) */
+ __u32 nr_bos; /* in, number of submit_bo's */
+ __u32 nr_cmds; /* in, number of submit_cmd's */
+ __u64 bos; /* in, ptr to array of submit_bo's */
+ __u64 cmds; /* in, ptr to array of submit_cmd's */
+ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
+ __u32 queueid; /* in, submitqueue id */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
+ __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
+ __u32 syncobj_stride; /* in, stride of syncobj arrays. */
+ __u32 pad; /*in, reserved for future use, always 0. */
+
+};
+
+/* The normal way to synchronize with the GPU is just to CPU_PREP on
+ * a buffer if you need to access it from the CPU (other cmdstream
+ * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
+ * handle the required synchronization under the hood). This ioctl
+ * mainly just exists as a way to implement the gallium pipe_fence
+ * APIs without requiring a dummy bo to synchronize on.
+ */
+struct drm_msm_wait_fence {
+ __u32 fence; /* in */
+ __u32 pad;
+ struct drm_msm_timespec timeout; /* in */
+ __u32 queueid; /* in, submitqueue id */
+};
+
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
+#define __MSM_MADV_PURGED 2 /* internal state */
+
+struct drm_msm_gem_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, MSM_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
+/*
+ * Draw queues allow the user to set specific submission parameter. Command
+ * submissions specify a specific submitqueue to use. ID 0 is reserved for
+ * backwards compatibility as a "default" submitqueue
+ */
+
+#define MSM_SUBMITQUEUE_FLAGS (0)
+
+/*
+ * The submitqueue priority should be between 0 and MSM_PARAM_PRIORITIES-1,
+ * a lower numeric value is higher priority.
+ */
+struct drm_msm_submitqueue {
+ __u32 flags; /* in, MSM_SUBMITQUEUE_x */
+ __u32 prio; /* in, Priority level */
+ __u32 id; /* out, identifier */
+};
+
+#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
+
+struct drm_msm_submitqueue_query {
+ __u64 data;
+ __u32 id;
+ __u32 param;
+ __u32 len;
+ __u32 pad;
+};
+
+#define DRM_MSM_GET_PARAM 0x00
+#define DRM_MSM_SET_PARAM 0x01
+#define DRM_MSM_GEM_NEW 0x02
+#define DRM_MSM_GEM_INFO 0x03
+#define DRM_MSM_GEM_CPU_PREP 0x04
+#define DRM_MSM_GEM_CPU_FINI 0x05
+#define DRM_MSM_GEM_SUBMIT 0x06
+#define DRM_MSM_WAIT_FENCE 0x07
+#define DRM_MSM_GEM_MADVISE 0x08
+/* placeholder:
+#define DRM_MSM_GEM_SVM_NEW 0x09
+ */
+#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
+#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
+#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
+
+#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
+#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
+#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
+#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
+#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
+#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __MSM_DRM_H__ */
diff --git a/src/drm/drm_fence.c b/src/drm/drm_fence.c
new file mode 100644
index 00000000..7215883d
--- /dev/null
+++ b/src/drm/drm_fence.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <poll.h>
+#include <string.h>
+
+#include "virgl_context.h"
+#include "virgl_util.h"
+
+#include "util/os_file.h"
+#include "util/u_atomic.h"
+#include "util/u_thread.h"
+
+#include "drm_fence.h"
+#include "drm_util.h"
+
+/**
+ * Tracking for a single fence on a timeline
+ */
+struct drm_fence {
+ int fd;
+ uint32_t flags;
+ uint64_t fence_id;
+ struct list_head node;
+};
+
+static void
+drm_fence_destroy(struct drm_fence *fence)
+{
+ close(fence->fd);
+ list_del(&fence->node);
+ free(fence);
+}
+
+static struct drm_fence *
+drm_fence_create(int fd, uint32_t flags, uint64_t fence_id)
+{
+ struct drm_fence *fence = calloc(1, sizeof(*fence));
+
+ if (!fence)
+ return NULL;
+
+ fence->fd = os_dupfd_cloexec(fd);
+
+ if (fence->fd < 0) {
+ free(fence);
+ return NULL;
+ }
+
+ fence->flags = flags;
+ fence->fence_id = fence_id;
+
+ return fence;
+}
+
+static int
+thread_sync(void *arg)
+{
+ struct drm_timeline *timeline = arg;
+
+ u_thread_setname(timeline->name);
+
+ mtx_lock(&timeline->fence_mutex);
+ while (!timeline->stop_sync_thread) {
+ if (list_is_empty(&timeline->pending_fences)) {
+ if (cnd_wait(&timeline->fence_cond, &timeline->fence_mutex))
+ drm_log("error waiting on fence condition");
+ continue;
+ }
+
+ struct drm_fence *fence =
+ list_first_entry(&timeline->pending_fences, struct drm_fence, node);
+ int ret;
+
+ mtx_unlock(&timeline->fence_mutex);
+ ret = poll(&(struct pollfd){fence->fd, POLLIN}, 1, -1); /* wait forever */
+ mtx_lock(&timeline->fence_mutex);
+
+ if (ret == 1) {
+ drm_dbg("fence signaled: %p (%" PRIu64 ")", fence, fence->fence_id);
+ timeline->vctx->fence_retire(timeline->vctx, timeline->ring_idx,
+ fence->fence_id);
+ write_eventfd(timeline->eventfd, 1);
+ drm_fence_destroy(fence);
+ } else if (ret != 0) {
+ drm_log("poll failed: %s", strerror(errno));
+ }
+ }
+ mtx_unlock(&timeline->fence_mutex);
+
+ return 0;
+}
+
+void
+drm_timeline_init(struct drm_timeline *timeline, struct virgl_context *vctx,
+ const char *name, int eventfd, int ring_idx)
+{
+ timeline->vctx = vctx;
+ timeline->name = name;
+ timeline->eventfd = eventfd;
+ timeline->ring_idx = ring_idx;
+
+ timeline->last_fence_fd = -1;
+
+ list_inithead(&timeline->pending_fences);
+
+ mtx_init(&timeline->fence_mutex, mtx_plain);
+ cnd_init(&timeline->fence_cond);
+
+ timeline->sync_thread = u_thread_create(thread_sync, timeline);
+}
+
+void
+drm_timeline_fini(struct drm_timeline *timeline)
+{
+ /* signal thread_sync to shutdown: */
+ mtx_lock(&timeline->fence_mutex);
+ timeline->stop_sync_thread = true;
+ cnd_signal(&timeline->fence_cond);
+ mtx_unlock(&timeline->fence_mutex);
+
+ /* wait for thread_sync to exit: */
+ thrd_join(timeline->sync_thread, NULL);
+
+ if (timeline->last_fence_fd != -1)
+ close(timeline->last_fence_fd);
+
+ /* cleanup remaining fences: */
+ list_for_each_entry_safe (struct drm_fence, fence, &timeline->pending_fences, node) {
+ drm_fence_destroy(fence);
+ }
+
+ cnd_destroy(&timeline->fence_cond);
+ mtx_destroy(&timeline->fence_mutex);
+}
+
+int
+drm_timeline_submit_fence(struct drm_timeline *timeline, uint32_t flags,
+ uint64_t fence_id)
+{
+ if (timeline->last_fence_fd == -1)
+ return -EINVAL;
+
+ struct drm_fence *fence =
+ drm_fence_create(timeline->last_fence_fd, flags, fence_id);
+
+ if (!fence)
+ return -ENOMEM;
+
+ drm_dbg("fence: %p (%" PRIu64 ")", fence, fence->fence_id);
+
+ mtx_lock(&timeline->fence_mutex);
+ list_addtail(&fence->node, &timeline->pending_fences);
+ cnd_signal(&timeline->fence_cond);
+ mtx_unlock(&timeline->fence_mutex);
+
+ return 0;
+}
+
+/* takes ownership of the fd */
+void
+drm_timeline_set_last_fence_fd(struct drm_timeline *timeline, int fd)
+{
+ if (timeline->last_fence_fd != -1)
+ close(timeline->last_fence_fd);
+ timeline->last_fence_fd = fd;
+}
diff --git a/src/drm/drm_fence.h b/src/drm/drm_fence.h
new file mode 100644
index 00000000..99cada4e
--- /dev/null
+++ b/src/drm/drm_fence.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef DRM_FENCE_H_
+#define DRM_FENCE_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "c11/threads.h"
+#include "util/list.h"
+
+/*
+ * Helpers to deal with managing dma-fence fd's. This should something that
+ * can be re-used by any virtgpu native context implementation.
+ */
+
+struct drm_fence;
+struct virgl_context;
+
+/**
+ * Represents a single timeline of fence-fd's. Fences on a timeline are
+ * signaled in FIFO order.
+ */
+struct drm_timeline {
+ struct virgl_context *vctx;
+ const char *name;
+ int eventfd;
+ int ring_idx;
+
+ int last_fence_fd;
+ struct list_head pending_fences;
+
+ mtx_t fence_mutex;
+ cnd_t fence_cond;
+ thrd_t sync_thread;
+ bool stop_sync_thread;
+};
+
+void drm_timeline_init(struct drm_timeline *timeline, struct virgl_context *vctx,
+ const char *name, int eventfd, int ring_idx);
+
+void drm_timeline_fini(struct drm_timeline *timeline);
+
+int drm_timeline_submit_fence(struct drm_timeline *timeline, uint32_t flags,
+ uint64_t fence_id);
+
+void drm_timeline_set_last_fence_fd(struct drm_timeline *timeline, int fd);
+
+#endif /* DRM_FENCE_H_ */
diff --git a/src/drm/drm_renderer.c b/src/drm/drm_renderer.c
new file mode 100644
index 00000000..27ece782
--- /dev/null
+++ b/src/drm/drm_renderer.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "config.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <xf86drm.h>
+
+#include "drm_hw.h"
+#include "drm_renderer.h"
+
+#ifdef ENABLE_DRM_MSM
+# include "msm/msm_renderer.h"
+#endif
+
+static struct virgl_renderer_capset_drm capset;
+
+static const struct backend {
+ uint32_t context_type;
+ const char *name;
+ int (*probe)(int fd, struct virgl_renderer_capset_drm *capset);
+ struct virgl_context *(*create)(int fd);
+} backends[] = {
+#ifdef ENABLE_DRM_MSM
+ {
+ .context_type = VIRTGPU_DRM_CONTEXT_MSM,
+ .name = "msm",
+ .probe = msm_renderer_probe,
+ .create = msm_renderer_create,
+ },
+#endif
+};
+
+int
+drm_renderer_init(int drm_fd)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(backends); i++) {
+ const struct backend *b = &backends[i];
+ int fd;
+
+ if (drm_fd != -1) {
+ fd = drm_fd;
+ } else {
+ fd = drmOpenWithType(b->name, NULL, DRM_NODE_RENDER);
+ if (fd < 0)
+ continue;
+ }
+
+ drmVersionPtr ver = drmGetVersion(fd);
+ if (!ver) {
+ close(fd);
+ return -ENOMEM;
+ }
+
+ if (strcmp(ver->name, b->name)) {
+ /* In the drmOpenWithType() path, we will only get back an fd
+ * for the device with matching name. But when we are using
+ * an externally provided fd, we need to go thru the backends
+ * table to see which one has the matching name.
+ */
+ assert(drm_fd != -1);
+ drmFreeVersion(ver);
+ continue;
+ }
+
+ capset.version_major = ver->version_major;
+ capset.version_minor = ver->version_minor;
+ capset.version_patchlevel = ver->version_patchlevel;
+ capset.context_type = b->context_type;
+
+ int ret = b->probe(fd, &capset);
+ if (ret)
+ memset(&capset, 0, sizeof(capset));
+
+ drmFreeVersion(ver);
+ close(fd);
+ return ret;
+ }
+
+ if (drm_fd != -1)
+ close(drm_fd);
+
+ return -ENODEV;
+}
+
+void
+drm_renderer_fini(void)
+{
+ drm_log("");
+}
+
+void
+drm_renderer_reset(void)
+{
+ drm_log("");
+}
+
+size_t
+drm_renderer_capset(void *_c)
+{
+ struct virgl_renderer_capset_drm *c = _c;
+ drm_log("c=%p", c);
+
+ if (!capset.context_type)
+ return 0;
+
+ if (c)
+ *c = capset;
+
+ return sizeof(*c);
+}
+
+struct virgl_context *
+drm_renderer_create(UNUSED size_t debug_len, UNUSED const char *debug_name)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(backends); i++) {
+ const struct backend *b = &backends[i];
+
+ if (b->context_type != capset.context_type)
+ continue;
+
+ int fd = drmOpenWithType(b->name, NULL, DRM_NODE_RENDER);
+ if (fd < 0)
+ return NULL;
+
+ return b->create(fd);
+ }
+
+ return NULL;
+}
diff --git a/src/drm/drm_renderer.h b/src/drm/drm_renderer.h
new file mode 100644
index 00000000..d6fb5eca
--- /dev/null
+++ b/src/drm/drm_renderer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef DRM_RENDERER_H_
+#define DRM_RENDERER_H_
+
+#include "config.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef ENABLE_DRM
+
+int drm_renderer_init(int drm_fd);
+
+void drm_renderer_fini(void);
+
+void drm_renderer_reset(void);
+
+size_t drm_renderer_capset(void *capset);
+
+struct virgl_context *drm_renderer_create(size_t debug_len, const char *debug_name);
+
+#else /* ENABLE_DRM_MSM */
+
+static inline int
+drm_renderer_init(UNUSED int drm_fd)
+{
+ virgl_log("DRM native context support was not enabled in virglrenderer\n");
+ return -1;
+}
+
+static inline void
+drm_renderer_fini(void)
+{
+}
+
+static inline void
+drm_renderer_reset(void)
+{
+}
+
+static inline size_t
+drm_renderer_capset(UNUSED void *capset)
+{
+ return 0;
+}
+
+static inline struct virgl_context *
+drm_renderer_create(UNUSED size_t debug_len, UNUSED const char *debug_name)
+{
+ return NULL;
+}
+
+#endif /* ENABLE_DRM */
+
+#endif /* DRM_RENDERER_H_ */
diff --git a/src/drm/drm_util.c b/src/drm/drm_util.c
new file mode 100644
index 00000000..4f6535a8
--- /dev/null
+++ b/src/drm/drm_util.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <stdarg.h>
+#include <string.h>
+
+#include "drm_util.h"
+#include "virgl_util.h"
+
+#include "util/macros.h"
+
+void
+_drm_log(const char *fmt, ...)
+{
+ const char prefix[] = "drm: ";
+ char line[1024];
+ size_t len;
+ va_list va;
+ int ret;
+
+ len = ARRAY_SIZE(prefix) - 1;
+ memcpy(line, prefix, len);
+
+ va_start(va, fmt);
+ ret = vsnprintf(line + len, ARRAY_SIZE(line) - len, fmt, va);
+ va_end(va);
+
+ if (ret < 0) {
+ const char log_error[] = "log error";
+ memcpy(line + len, log_error, ARRAY_SIZE(log_error) - 1);
+ len += ARRAY_SIZE(log_error) - 1;
+ } else if ((size_t)ret < ARRAY_SIZE(line) - len) {
+ len += ret;
+ } else {
+ len = ARRAY_SIZE(line) - 1;
+ }
+
+ /* make room for newline */
+ if (len + 1 >= ARRAY_SIZE(line))
+ len--;
+
+ line[len++] = '\n';
+ line[len] = '\0';
+
+ virgl_log("%s", line);
+}
diff --git a/src/drm/drm_util.h b/src/drm/drm_util.h
new file mode 100644
index 00000000..e137e543
--- /dev/null
+++ b/src/drm/drm_util.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef DRM_UTIL_H_
+#define DRM_UTIL_H_
+
+#pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#pragma GCC diagnostic ignored "-Wlanguage-extension-token"
+#pragma GCC diagnostic ignored "-Wgnu-statement-expression"
+
+#include "linux/overflow.h"
+
+void _drm_log(const char *fmt, ...);
+#define drm_log(fmt, ...) _drm_log("%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#if 0
+#define drm_dbg drm_log
+#else
+#define drm_dbg(fmt, ...) \
+ do { \
+ } while (0)
+#endif
+
+#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
+#define U642VOID(x) ((void *)(unsigned long)(x))
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000ull
+#endif
+
+#endif /* DRM_UTIL_H_ */
diff --git a/src/drm/linux/overflow.h b/src/drm/linux/overflow.h
new file mode 100644
index 00000000..8a25314b
--- /dev/null
+++ b/src/drm/linux/overflow.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#define __must_check __attribute__((__warn_unused_result__))
+
+/*
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define is_signed_type(type) (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+/*
+ * Avoids triggering -Wtype-limits compilation warning,
+ * while using unsigned data types to check a < 0.
+ */
+#define is_non_negative(a) ((a) > 0 || (a) == 0)
+#define is_negative(a) (!(is_non_negative(a)))
+
+/*
+ * Allows for effectively applying __must_check to a macro so we can have
+ * both the type-agnostic benefits of the macros while also being able to
+ * enforce that the return value is, in fact, checked.
+ */
+static inline bool __must_check __must_check_overflow(bool overflow)
+{
+ return unlikely(overflow);
+}
+
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) __must_check_overflow(({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_add_overflow(__a, __b, __d); \
+}))
+
+#define check_sub_overflow(a, b, d) __must_check_overflow(({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_sub_overflow(__a, __b, __d); \
+}))
+
+#define check_mul_overflow(a, b, d) __must_check_overflow(({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_mul_overflow(__a, __b, __d); \
+}))
+
+/** check_shl_overflow() - Calculate a left-shifted value and check overflow
+ *
+ * @a: Value to be shifted
+ * @s: How many bits left to shift
+ * @d: Pointer to where to store the result
+ *
+ * Computes *@d = (@a << @s)
+ *
+ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * make sense. Example conditions:
+ * - 'a << s' causes bits to be lost when stored in *d.
+ * - 's' is garbage (e.g. negative) or so large that the result of
+ * 'a << s' is guaranteed to be 0.
+ * - 'a' is negative.
+ * - 'a << s' sets the sign bit, if any, in '*d'.
+ *
+ * '*d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if true is returned.
+ */
+#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
+ typeof(a) _a = a; \
+ typeof(s) _s = s; \
+ typeof(d) _d = d; \
+ u64 _a_full = _a; \
+ unsigned int _to_shift = \
+ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
+ *_d = (_a_full << _to_shift); \
+ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
+ (*_d >> _to_shift) != _a); \
+}))
+
+/**
+ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+ *
+ * @factor1: first factor
+ * @factor2: second factor
+ *
+ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(factor1, factor2, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+ *
+ * @addend1: first addend
+ * @addend2: second addend
+ *
+ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_add(size_t addend1, size_t addend2)
+{
+ size_t bytes;
+
+ if (check_add_overflow(addend1, addend2, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
+ *
+ * @minuend: value to subtract from
+ * @subtrahend: value to subtract from @minuend
+ *
+ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. For
+ * composition with the size_add() and size_mul() helpers, neither
+ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
+ * The lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+{
+ size_t bytes;
+
+ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
+ check_sub_overflow(minuend, subtrahend, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array_size(a, b) size_mul(a, b)
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
+
+/**
+ * flex_array_size() - Calculate size of a flexible array member
+ * within an enclosing structure.
+ *
+ * @p: Pointer to the structure.
+ * @member: Name of the flexible array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of a flexible array of @count number of @member
+ * elements, at the end of structure @p.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define flex_array_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
+ size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
+
+/**
+ * struct_size() - Calculate size of structure with trailing flexible array.
+ *
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @count number of @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ sizeof(*(p)) + flex_array_size(p, member, count), \
+ size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/src/drm/msm/msm_proto.h b/src/drm/msm/msm_proto.h
new file mode 100644
index 00000000..9279475a
--- /dev/null
+++ b/src/drm/msm/msm_proto.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef MSM_PROTO_H_
+#define MSM_PROTO_H_
+
+/**
+ * General protocol notes:
+ * 1) Request (req) messages are generally sent over DRM_VIRTGPU_EXECBUFFER
+ * but can also be sent via DRM_VIRTGPU_RESOURCE_CREATE_BLOB (in which
+ * case they are processed by the host before ctx->get_blob())
+ * 2) Response (rsp) messages are returned via shmem->rsp_mem, at an offset
+ * specified by the guest in the req message. Not all req messages have
+ * a rsp.
+ * 3) Host and guest could have different pointer sizes, ie. 32b guest and
+ * 64b host, or visa versa, so similar to kernel uabi, req and rsp msgs
+ * should be explicitly padded to avoid 32b vs 64b struct padding issues
+ */
+
+/**
+ * Defines the layout of shmem buffer used for host->guest communication.
+ */
+struct msm_shmem {
+ /**
+ * The sequence # of last cmd processed by the host
+ */
+ uint32_t seqno;
+
+ /**
+ * Offset to the start of rsp memory region in the shmem buffer. This
+ * is set by the host when the shmem buffer is allocated, to allow for
+ * extending the shmem buffer with new fields. The size of the rsp
+ * memory region is the size of the shmem buffer (controlled by the
+ * guest) minus rsp_mem_offset.
+ *
+ * The guest should use the msm_shmem_has_field() macro to determine
+ * if the host supports a given field, ie. to handle compatibility of
+ * newer guest vs older host.
+ *
+ * Making the guest userspace responsible for backwards compatibility
+ * simplifies the host VMM.
+ */
+ uint32_t rsp_mem_offset;
+
+#define msm_shmem_has_field(shmem, field) ({ \
+ struct msm_shmem *_shmem = (shmem); \
+ (_shmem->rsp_mem_offset > offsetof(struct msm_shmem, field)); \
+ })
+
+ /**
+ * Counter that is incremented on asynchronous errors, like SUBMIT
+ * or GEM_NEW failures. The guest should treat errors as context-
+ * lost.
+ */
+ uint32_t async_error;
+};
+
+#define DEFINE_CAST(parent, child) \
+ static inline struct child *to_##child(const struct parent *x) \
+ { \
+ return (struct child *)x; \
+ }
+
+/*
+ * Possible cmd types for "command stream", ie. payload of EXECBUF ioctl:
+ */
+enum msm_ccmd {
+ MSM_CCMD_NOP = 1, /* No payload, can be used to sync with host */
+ MSM_CCMD_IOCTL_SIMPLE,
+ MSM_CCMD_GEM_NEW,
+ MSM_CCMD_GEM_SET_IOVA,
+ MSM_CCMD_GEM_CPU_PREP,
+ MSM_CCMD_GEM_SET_NAME,
+ MSM_CCMD_GEM_SUBMIT,
+ MSM_CCMD_GEM_UPLOAD,
+ MSM_CCMD_SUBMITQUEUE_QUERY,
+ MSM_CCMD_WAIT_FENCE,
+ MSM_CCMD_SET_DEBUGINFO,
+ MSM_CCMD_LAST,
+};
+
+struct msm_ccmd_req {
+ uint32_t cmd;
+ uint32_t len;
+ uint32_t seqno;
+
+ /* Offset into shmem ctrl buffer to write response. The host ensures
+ * that it doesn't write outside the bounds of the ctrl buffer, but
+ * otherwise it is up to the guest to manage allocation of where responses
+ * should be written in the ctrl buf.
+ */
+ uint32_t rsp_off;
+};
+
+struct msm_ccmd_rsp {
+ uint32_t len;
+};
+
+#define MSM_CCMD(_cmd, _len) (struct msm_ccmd_req){ \
+ .cmd = MSM_CCMD_##_cmd, \
+ .len = (_len), \
+ }
+
+/*
+ * MSM_CCMD_NOP
+ */
+struct msm_ccmd_nop_req {
+ struct msm_ccmd_req hdr;
+};
+
+/*
+ * MSM_CCMD_IOCTL_SIMPLE
+ *
+ * Forward simple/flat IOC_RW or IOC_W ioctls. Limited ioctls are supported.
+ */
+struct msm_ccmd_ioctl_simple_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t cmd;
+ uint8_t payload[];
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_ioctl_simple_req)
+
+struct msm_ccmd_ioctl_simple_rsp {
+ struct msm_ccmd_rsp hdr;
+
+ /* ioctl return value, interrupted syscalls are handled on the host without
+ * returning to the guest.
+ */
+ int32_t ret;
+
+ /* The output payload for IOC_RW ioctls, the payload is the same size as
+ * msm_context_cmd_ioctl_simple_req.
+ *
+ * For IOC_W ioctls (userspace writes, kernel reads) this is zero length.
+ */
+ uint8_t payload[];
+};
+
+/*
+ * MSM_CCMD_GEM_NEW
+ *
+ * GEM buffer allocation, maps to DRM_MSM_GEM_NEW plus DRM_MSM_GEM_INFO to
+ * set the BO's iova (to avoid extra guest -> host trip)
+ *
+ * No response.
+ */
+struct msm_ccmd_gem_new_req {
+ struct msm_ccmd_req hdr;
+
+ uint64_t iova;
+ uint64_t size;
+ uint32_t flags;
+ uint32_t blob_id;
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_new_req)
+
+/*
+ * MSM_CCMD_GEM_SET_IOVA
+ *
+ * Set the buffer iova (for imported BOs). Also used to release the iova
+ * (by setting it to zero) when a BO is freed.
+ */
+struct msm_ccmd_gem_set_iova_req {
+ struct msm_ccmd_req hdr;
+
+ uint64_t iova;
+ uint32_t res_id;
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_iova_req)
+
+/*
+ * MSM_CCMD_GEM_CPU_PREP
+ *
+ * Maps to DRM_MSM_GEM_CPU_PREP
+ *
+ * Note: Since we don't want to block the single threaded host, this returns
+ * immediately with -EBUSY if the fence is not yet signaled. The guest
+ * should poll if needed.
+ */
+struct msm_ccmd_gem_cpu_prep_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t res_id;
+ uint32_t op;
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_cpu_prep_req)
+
+struct msm_ccmd_gem_cpu_prep_rsp {
+ struct msm_ccmd_rsp hdr;
+
+ int32_t ret;
+};
+
+/*
+ * MSM_CCMD_GEM_SET_NAME
+ *
+ * Maps to DRM_MSM_GEM_INFO:MSM_INFO_SET_NAME
+ *
+ * No response.
+ */
+struct msm_ccmd_gem_set_name_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t res_id;
+ /* Note: packet size aligned to 4 bytes, so the string name may
+ * be shorter than the packet header indicates.
+ */
+ uint32_t len;
+ uint8_t payload[];
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_name_req)
+
+/*
+ * MSM_CCMD_GEM_SUBMIT
+ *
+ * Maps to DRM_MSM_GEM_SUBMIT
+ *
+ * The actual for-reals cmdstream submission. Note this intentionally
+ * does not support relocs, since we already require a non-ancient
+ * kernel.
+ *
+ * Note, no in/out fence-fd, that synchronization is handled on guest
+ * kernel side (ugg).. need to come up with a better story for fencing.
+ * We probably need to sort something out for that to handle syncobjs.
+ *
+ * No response.
+ */
+struct msm_ccmd_gem_submit_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t flags;
+ uint32_t queue_id;
+ uint32_t nr_bos;
+ uint32_t nr_cmds;
+
+ /**
+ * The fence "seqno" assigned by the guest userspace. The host SUBMIT
+ * ioctl uses the MSM_SUBMIT_FENCE_SN_IN flag to let the guest assign
+ * the sequence #, to avoid the guest needing to wait for a response
+ * from the host.
+ */
+ uint32_t fence;
+
+ /**
+ * Payload is first an array of 'struct drm_msm_gem_submit_bo' of
+ * length determined by nr_bos (note that handles are guest resource
+ * ids which are translated to host GEM handles by the host VMM),
+ * followed by an array of 'struct drm_msm_gem_submit_cmd' of length
+ * determined by nr_cmds
+ */
+ int8_t payload[];
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_submit_req)
+
+/*
+ * MSM_CCMD_GEM_UPLOAD
+ *
+ * Upload data to a GEM buffer
+ *
+ * No response.
+ */
+struct msm_ccmd_gem_upload_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t res_id;
+ uint32_t pad;
+ uint32_t off;
+
+ /* Note: packet size aligned to 4 bytes, so the payload may
+ * be shorter than the packet header indicates.
+ */
+ uint32_t len;
+ uint8_t payload[];
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_upload_req)
+
+/*
+ * MSM_CCMD_SUBMITQUEUE_QUERY
+ *
+ * Maps to DRM_MSM_SUBMITQUEUE_QUERY
+ */
+struct msm_ccmd_submitqueue_query_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t queue_id;
+ uint32_t param;
+ uint32_t len; /* size of payload in rsp */
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_submitqueue_query_req)
+
+struct msm_ccmd_submitqueue_query_rsp {
+ struct msm_ccmd_rsp hdr;
+
+ int32_t ret;
+ uint32_t out_len;
+ uint8_t payload[];
+};
+
+/*
+ * MSM_CCMD_WAIT_FENCE
+ *
+ * Maps to DRM_MSM_WAIT_FENCE
+ *
+ * Note: Since we don't want to block the single threaded host, this returns
+ * immediately with -ETIMEDOUT if the fence is not yet signaled. The guest
+ * should poll if needed.
+ */
+struct msm_ccmd_wait_fence_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t queue_id;
+ uint32_t fence;
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_wait_fence_req)
+
+struct msm_ccmd_wait_fence_rsp {
+ struct msm_ccmd_rsp hdr;
+
+ int32_t ret;
+};
+
+/*
+ * MSM_CCMD_SET_DEBUGINFO
+ *
+ * Set per-guest-process debug info (comm and cmdline). For GPU faults/
+ * crashes, it isn't too useful to see the crosvm (for ex.) comm/cmdline,
+ * since the host process is only a proxy. This allows the guest to
+ * pass through the guest process comm and commandline for debugging
+ * purposes.
+ *
+ * No response.
+ */
+struct msm_ccmd_set_debuginfo_req {
+ struct msm_ccmd_req hdr;
+
+ uint32_t comm_len;
+ uint32_t cmdline_len;
+
+ /**
+ * Payload is first the comm string followed by cmdline string, padded
+ * out to a multiple of 4.
+ */
+ int8_t payload[];
+};
+DEFINE_CAST(msm_ccmd_req, msm_ccmd_set_debuginfo_req)
+
+#endif /* MSM_PROTO_H_ */
diff --git a/src/drm/msm/msm_renderer.c b/src/drm/msm/msm_renderer.c
new file mode 100644
index 00000000..a032ad39
--- /dev/null
+++ b/src/drm/msm/msm_renderer.c
@@ -0,0 +1,1286 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <xf86drm.h>
+
+#include "virgl_context.h"
+#include "virgl_util.h"
+#include "virglrenderer.h"
+
+#include "util/anon_file.h"
+#include "util/hash_table.h"
+#include "util/macros.h"
+#include "util/os_file.h"
+#include "util/u_atomic.h"
+#include "util/u_thread.h"
+
+#include "drm_fence.h"
+
+#include "msm_drm.h"
+#include "msm_proto.h"
+#include "msm_renderer.h"
+
+static unsigned nr_timelines;
+
+/**
+ * A single context (from the PoV of the virtio-gpu protocol) maps to
+ * a single drm device open. Other drm/msm constructs (ie. submitqueue)
+ * are opaque to the protocol.
+ *
+ * Typically each guest process will open a single virtio-gpu "context".
+ * The single drm device open maps to an individual msm_gem_address_space
+ * on the kernel side, providing GPU address space isolation between
+ * guest processes.
+ *
+ * GEM buffer objects are tracked via one of two id's:
+ * - resource-id: global, assigned by guest kernel
+ * - blob-id: context specific, assigned by guest userspace
+ *
+ * The blob-id is used to link the bo created via MSM_CCMD_GEM_NEW and
+ * the get_blob() cb. It is unused in the case of a bo that is imported
+ * from another context. An object is added to the blob table in GEM_NEW
+ * and removed in ctx->get_blob() (where it is added to resource_table).
+ * By avoiding having an obj in both tables, we can safely free remaining
+ * entries in either hashtable at context teardown.
+ */
+struct msm_context {
+ struct virgl_context base;
+
+ struct msm_shmem *shmem;
+ uint8_t *rsp_mem;
+ uint32_t rsp_mem_sz;
+
+ struct msm_ccmd_rsp *current_rsp;
+
+ int fd;
+
+ struct hash_table *blob_table;
+ struct hash_table *resource_table;
+
+ /**
+ * Maps submit-queue id to ring_idx
+ */
+ struct hash_table *sq_to_ring_idx_table;
+
+ int eventfd;
+
+ /**
+ * Indexed by ring_idx-1, which is the same as the submitqueue priority+1.
+ * On the kernel side, there is some drm_sched_entity per {drm_file, prio}
+ * tuple, and the sched entity determines the fence timeline, ie. submits
+ * against a single sched entity complete in fifo order.
+ */
+ struct drm_timeline timelines[];
+};
+DEFINE_CAST(virgl_context, msm_context)
+
+#define valid_payload_len(req) ((req)->len <= ((req)->hdr.len - sizeof(*(req))))
+
+static struct hash_entry *
+table_search(struct hash_table *ht, uint32_t key)
+{
+ /* zero is not a valid key for u32_keys hashtable: */
+ if (!key)
+ return NULL;
+ return _mesa_hash_table_search(ht, (void *)(uintptr_t)key);
+}
+
+static int
+gem_info(struct msm_context *mctx, uint32_t handle, uint32_t param, uint64_t *val)
+{
+ struct drm_msm_gem_info args = {
+ .handle = handle,
+ .info = param,
+ .value = *val,
+ };
+ int ret;
+
+ ret = drmCommandWriteRead(mctx->fd, DRM_MSM_GEM_INFO, &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ *val = args.value;
+ return 0;
+}
+
+static int
+gem_close(int fd, uint32_t handle)
+{
+ struct drm_gem_close close_req = {
+ .handle = handle,
+ };
+ return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close_req);
+}
+
+struct msm_object {
+ uint32_t blob_id;
+ uint32_t res_id;
+ uint32_t handle;
+ uint32_t flags;
+ uint32_t size;
+ bool exported : 1;
+ bool exportable : 1;
+ struct virgl_resource *res;
+ void *map;
+};
+
+static struct msm_object *
+msm_object_create(uint32_t handle, uint32_t flags, uint32_t size)
+{
+ struct msm_object *obj = calloc(1, sizeof(*obj));
+
+ if (!obj)
+ return NULL;
+
+ obj->handle = handle;
+ obj->flags = flags;
+ obj->size = size;
+
+ return obj;
+}
+
+static bool
+valid_blob_id(struct msm_context *mctx, uint32_t blob_id)
+{
+ /* must be non-zero: */
+ if (blob_id == 0)
+ return false;
+
+ /* must not already be in-use: */
+ if (table_search(mctx->blob_table, blob_id))
+ return false;
+
+ return true;
+}
+
+static void
+msm_object_set_blob_id(struct msm_context *mctx, struct msm_object *obj, uint32_t blob_id)
+{
+ assert(valid_blob_id(mctx, blob_id));
+
+ obj->blob_id = blob_id;
+ _mesa_hash_table_insert(mctx->blob_table, (void *)(uintptr_t)obj->blob_id, obj);
+}
+
+static bool
+valid_res_id(struct msm_context *mctx, uint32_t res_id)
+{
+ return !table_search(mctx->resource_table, res_id);
+}
+
+static void
+msm_object_set_res_id(struct msm_context *mctx, struct msm_object *obj, uint32_t res_id)
+{
+ assert(valid_res_id(mctx, res_id));
+
+ obj->res_id = res_id;
+ _mesa_hash_table_insert(mctx->resource_table, (void *)(uintptr_t)obj->res_id, obj);
+}
+
+static void
+msm_remove_object(struct msm_context *mctx, struct msm_object *obj)
+{
+ drm_dbg("obj=%p, blob_id=%u, res_id=%u", obj, obj->blob_id, obj->res_id);
+ _mesa_hash_table_remove_key(mctx->resource_table, (void *)(uintptr_t)obj->res_id);
+}
+
+static struct msm_object *
+msm_retrieve_object_from_blob_id(struct msm_context *mctx, uint64_t blob_id)
+{
+ assert((blob_id >> 32) == 0);
+ uint32_t id = blob_id;
+ struct hash_entry *entry = table_search(mctx->blob_table, id);
+ if (!entry)
+ return NULL;
+ struct msm_object *obj = entry->data;
+ _mesa_hash_table_remove(mctx->blob_table, entry);
+ return obj;
+}
+
+static struct msm_object *
+msm_get_object_from_res_id(struct msm_context *mctx, uint32_t res_id)
+{
+ const struct hash_entry *entry = table_search(mctx->resource_table, res_id);
+ return likely(entry) ? entry->data : NULL;
+}
+
+static uint32_t
+handle_from_res_id(struct msm_context *mctx, uint32_t res_id)
+{
+ struct msm_object *obj = msm_get_object_from_res_id(mctx, res_id);
+ if (!obj)
+ return 0; /* zero is an invalid GEM handle */
+ return obj->handle;
+}
+
+static bool
+has_cached_coherent(int fd)
+{
+ struct drm_msm_gem_new new_req = {
+ .size = 0x1000,
+ .flags = MSM_BO_CACHED_COHERENT,
+ };
+
+ /* Do a test allocation to see if cached-coherent is supported: */
+ if (!drmCommandWriteRead(fd, DRM_MSM_GEM_NEW, &new_req, sizeof(new_req))) {
+ gem_close(fd, new_req.handle);
+ return true;
+ }
+
+ return false;
+}
+
+static int
+get_param64(int fd, uint32_t param, uint64_t *value)
+{
+ struct drm_msm_param req = {
+ .pipe = MSM_PIPE_3D0,
+ .param = param,
+ };
+ int ret;
+
+ *value = 0;
+
+ ret = drmCommandWriteRead(fd, DRM_MSM_GET_PARAM, &req, sizeof(req));
+ if (ret)
+ return ret;
+
+ *value = req.value;
+
+ return 0;
+}
+
+static int
+get_param32(int fd, uint32_t param, uint32_t *value)
+{
+ uint64_t v64;
+ int ret = get_param64(fd, param, &v64);
+ *value = v64;
+ return ret;
+}
+
+/**
+ * Probe capset params.
+ */
+int
+msm_renderer_probe(int fd, struct virgl_renderer_capset_drm *capset)
+{
+ drm_log("");
+
+ /* Require MSM_SUBMIT_FENCE_SN_IN: */
+ if (capset->version_minor < 9) {
+ drm_log("Host kernel too old");
+ return -ENOTSUP;
+ }
+
+ capset->wire_format_version = 2;
+ capset->u.msm.has_cached_coherent = has_cached_coherent(fd);
+
+ get_param32(fd, MSM_PARAM_PRIORITIES, &capset->u.msm.priorities);
+ get_param64(fd, MSM_PARAM_VA_START, &capset->u.msm.va_start);
+ get_param64(fd, MSM_PARAM_VA_SIZE, &capset->u.msm.va_size);
+ get_param32(fd, MSM_PARAM_GPU_ID, &capset->u.msm.gpu_id);
+ get_param32(fd, MSM_PARAM_GMEM_SIZE, &capset->u.msm.gmem_size);
+ get_param64(fd, MSM_PARAM_GMEM_BASE, &capset->u.msm.gmem_base);
+ get_param64(fd, MSM_PARAM_CHIP_ID, &capset->u.msm.chip_id);
+ get_param32(fd, MSM_PARAM_MAX_FREQ, &capset->u.msm.max_freq);
+
+ nr_timelines = capset->u.msm.priorities;
+
+ drm_log("wire_format_version: %u", capset->wire_format_version);
+ drm_log("version_major: %u", capset->version_major);
+ drm_log("version_minor: %u", capset->version_minor);
+ drm_log("version_patchlevel: %u", capset->version_patchlevel);
+ drm_log("has_cached_coherent: %u", capset->u.msm.has_cached_coherent);
+ drm_log("priorities: %u", capset->u.msm.priorities);
+ drm_log("va_start: 0x%0" PRIx64, capset->u.msm.va_start);
+ drm_log("va_size: 0x%0" PRIx64, capset->u.msm.va_size);
+ drm_log("gpu_id: %u", capset->u.msm.gpu_id);
+ drm_log("gmem_size: %u", capset->u.msm.gmem_size);
+ drm_log("gmem_base: 0x%0" PRIx64, capset->u.msm.gmem_base);
+ drm_log("chip_id: 0x%0" PRIx64, capset->u.msm.chip_id);
+ drm_log("max_freq: %u", capset->u.msm.max_freq);
+
+ if (!capset->u.msm.va_size) {
+ drm_log("Host kernel does not support userspace allocated IOVA");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static void
+resource_delete_fxn(struct hash_entry *entry)
+{
+ free((void *)entry->data);
+}
+
+static void
+msm_renderer_destroy(struct virgl_context *vctx)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+
+ for (unsigned i = 0; i < nr_timelines; i++)
+ drm_timeline_fini(&mctx->timelines[i]);
+
+ close(mctx->eventfd);
+
+ if (mctx->shmem)
+ munmap(mctx->shmem, sizeof(*mctx->shmem));
+
+ _mesa_hash_table_destroy(mctx->resource_table, resource_delete_fxn);
+ _mesa_hash_table_destroy(mctx->blob_table, resource_delete_fxn);
+ _mesa_hash_table_destroy(mctx->sq_to_ring_idx_table, NULL);
+
+ close(mctx->fd);
+ free(mctx);
+}
+
+static void
+msm_renderer_attach_resource(struct virgl_context *vctx, struct virgl_resource *res)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+ struct msm_object *obj = msm_get_object_from_res_id(mctx, res->res_id);
+
+ drm_dbg("obj=%p, res_id=%u", obj, res->res_id);
+
+ if (!obj) {
+ int fd;
+ enum virgl_resource_fd_type fd_type = virgl_resource_export_fd(res, &fd);
+
+ /* If importing a dmabuf resource created by another context (or
+ * externally), then import it to create a gem obj handle in our
+ * context:
+ */
+ if (fd_type == VIRGL_RESOURCE_FD_DMABUF) {
+ uint32_t handle;
+ int ret;
+
+ ret = drmPrimeFDToHandle(mctx->fd, fd, &handle);
+ if (ret) {
+ drm_log("Could not import: %s", strerror(errno));
+ close(fd);
+ return;
+ }
+
+ /* lseek() to get bo size */
+ int size = lseek(fd, 0, SEEK_END);
+ if (size < 0)
+ drm_log("lseek failed: %d (%s)", size, strerror(errno));
+ close(fd);
+
+ obj = msm_object_create(handle, 0, size);
+ if (!obj)
+ return;
+
+ msm_object_set_res_id(mctx, obj, res->res_id);
+
+ drm_dbg("obj=%p, res_id=%u, handle=%u", obj, obj->res_id, obj->handle);
+ } else {
+ if (fd_type != VIRGL_RESOURCE_FD_INVALID)
+ close(fd);
+ return;
+ }
+ }
+
+ obj->res = res;
+}
+
+static void
+msm_renderer_detach_resource(struct virgl_context *vctx, struct virgl_resource *res)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+ struct msm_object *obj = msm_get_object_from_res_id(mctx, res->res_id);
+
+ drm_dbg("obj=%p, res_id=%u", obj, res->res_id);
+
+ if (!obj || (obj->res != res))
+ return;
+
+ if (res->fd_type == VIRGL_RESOURCE_FD_SHM) {
+ munmap(mctx->shmem, sizeof(*mctx->shmem));
+
+ mctx->shmem = NULL;
+ mctx->rsp_mem = NULL;
+ mctx->rsp_mem_sz = 0;
+
+ /* shmem resources don't have an backing host GEM bo:, so bail now: */
+ return;
+ }
+
+ msm_remove_object(mctx, obj);
+
+ if (obj->map)
+ munmap(obj->map, obj->size);
+
+ gem_close(mctx->fd, obj->handle);
+
+ free(obj);
+}
+
+static enum virgl_resource_fd_type
+msm_renderer_export_opaque_handle(struct virgl_context *vctx, struct virgl_resource *res,
+ int *out_fd)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+ struct msm_object *obj = msm_get_object_from_res_id(mctx, res->res_id);
+ int ret;
+
+ drm_dbg("obj=%p, res_id=%u", obj, res->res_id);
+
+ if (!obj) {
+ drm_log("invalid res_id %u", res->res_id);
+ return VIRGL_RESOURCE_FD_INVALID;
+ }
+
+ if (!obj->exportable) {
+ /* crosvm seems to like to export things it doesn't actually need an
+ * fd for.. don't let it spam our fd table!
+ */
+ return VIRGL_RESOURCE_FD_INVALID;
+ }
+
+ ret = drmPrimeHandleToFD(mctx->fd, obj->handle, DRM_CLOEXEC | DRM_RDWR, out_fd);
+ if (ret) {
+ drm_log("failed to get dmabuf fd: %s", strerror(errno));
+ return VIRGL_RESOURCE_FD_INVALID;
+ }
+
+ return VIRGL_RESOURCE_FD_DMABUF;
+}
+
+static int
+msm_renderer_transfer_3d(UNUSED struct virgl_context *vctx,
+ UNUSED struct virgl_resource *res,
+ UNUSED const struct vrend_transfer_info *info,
+ UNUSED int transfer_mode)
+{
+ drm_log("unsupported");
+ return -1;
+}
+
+static int
+msm_renderer_get_blob(struct virgl_context *vctx, uint32_t res_id, uint64_t blob_id,
+ uint64_t blob_size, uint32_t blob_flags,
+ struct virgl_context_blob *blob)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+
+ drm_dbg("blob_id=%" PRIu64 ", res_id=%u, blob_size=%" PRIu64 ", blob_flags=0x%x",
+ blob_id, res_id, blob_size, blob_flags);
+
+ if ((blob_id >> 32) != 0) {
+ drm_log("invalid blob_id: %" PRIu64, blob_id);
+ return -EINVAL;
+ }
+
+ /* blob_id of zero is reserved for the shmem buffer: */
+ if (blob_id == 0) {
+ int fd;
+
+ if (blob_flags != VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
+ drm_log("invalid blob_flags: 0x%x", blob_flags);
+ return -EINVAL;
+ }
+
+ if (mctx->shmem) {
+ drm_log("There can be only one!");
+ return -EINVAL;
+ }
+
+ fd = os_create_anonymous_file(blob_size, "msm-shmem");
+ if (fd < 0) {
+ drm_log("Failed to create shmem file: %s", strerror(errno));
+ return -ENOMEM;
+ }
+
+ int ret = fcntl(fd, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_SHRINK | F_SEAL_GROW);
+ if (ret) {
+ drm_log("fcntl failed: %s", strerror(errno));
+ close(fd);
+ return -ENOMEM;
+ }
+
+ mctx->shmem = mmap(NULL, blob_size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0);
+ if (mctx->shmem == MAP_FAILED) {
+ drm_log("shmem mmap failed: %s", strerror(errno));
+ close(fd);
+ return -ENOMEM;
+ }
+
+ mctx->shmem->rsp_mem_offset = sizeof(*mctx->shmem);
+
+ uint8_t *ptr = (uint8_t *)mctx->shmem;
+ mctx->rsp_mem = &ptr[mctx->shmem->rsp_mem_offset];
+ mctx->rsp_mem_sz = blob_size - mctx->shmem->rsp_mem_offset;
+
+ blob->type = VIRGL_RESOURCE_FD_SHM;
+ blob->u.fd = fd;
+ blob->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
+
+ return 0;
+ }
+
+ if (!valid_res_id(mctx, res_id)) {
+ drm_log("Invalid res_id %u", res_id);
+ return -EINVAL;
+ }
+
+ struct msm_object *obj = msm_retrieve_object_from_blob_id(mctx, blob_id);
+
+ /* If GEM_NEW fails, we can end up here without a backing obj: */
+ if (!obj) {
+ drm_log("No object");
+ return -ENOENT;
+ }
+
+ /* a memory can only be exported once; we don't want two resources to point
+ * to the same storage.
+ */
+ if (obj->exported) {
+ drm_log("Already exported!");
+ return -EINVAL;
+ }
+
+ msm_object_set_res_id(mctx, obj, res_id);
+
+ if (blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_SHAREABLE) {
+ int fd, ret;
+
+ ret = drmPrimeHandleToFD(mctx->fd, obj->handle, DRM_CLOEXEC | DRM_RDWR, &fd);
+ if (ret) {
+ drm_log("Export to fd failed");
+ return -EINVAL;
+ }
+
+ blob->type = VIRGL_RESOURCE_FD_DMABUF;
+ blob->u.fd = fd;
+ } else {
+ blob->type = VIRGL_RESOURCE_OPAQUE_HANDLE;
+ blob->u.opaque_handle = obj->handle;
+ }
+
+ if (obj->flags & MSM_BO_CACHED_COHERENT) {
+ blob->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
+ } else {
+ blob->map_info = VIRGL_RENDERER_MAP_CACHE_WC;
+ }
+
+ obj->exported = true;
+ obj->exportable = !!(blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE);
+
+ return 0;
+}
+
+static void *
+msm_context_rsp_noshadow(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ return &mctx->rsp_mem[hdr->rsp_off];
+}
+
+static void *
+msm_context_rsp(struct msm_context *mctx, const struct msm_ccmd_req *hdr, unsigned len)
+{
+ unsigned rsp_mem_sz = mctx->rsp_mem_sz;
+ unsigned off = hdr->rsp_off;
+
+ if ((off > rsp_mem_sz) || (len > rsp_mem_sz - off)) {
+ drm_log("invalid shm offset: off=%u, len=%u (shmem_size=%u)", off, len, rsp_mem_sz);
+ return NULL;
+ }
+
+ struct msm_ccmd_rsp *rsp = msm_context_rsp_noshadow(mctx, hdr);
+
+ assert(len >= sizeof(*rsp));
+
+ /* With newer host and older guest, we could end up wanting a larger rsp struct
+ * than guest expects, so allocate a shadow buffer in this case rather than
+ * having to deal with this in all the different ccmd handlers. This is similar
+ * in a way to what drm_ioctl() does.
+ */
+ if (len > rsp->len) {
+ rsp = malloc(len);
+ if (!rsp)
+ return NULL;
+ rsp->len = len;
+ }
+
+ mctx->current_rsp = rsp;
+
+ return rsp;
+}
+
+static int
+msm_ccmd_nop(UNUSED struct msm_context *mctx, UNUSED const struct msm_ccmd_req *hdr)
+{
+ return 0;
+}
+
+static int
+msm_ccmd_ioctl_simple(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_ioctl_simple_req *req = to_msm_ccmd_ioctl_simple_req(hdr);
+ unsigned payload_len = _IOC_SIZE(req->cmd);
+ unsigned req_len = size_add(sizeof(*req), payload_len);
+
+ if (hdr->len != req_len) {
+ drm_log("%u != %u", hdr->len, req_len);
+ return -EINVAL;
+ }
+
+ /* Apply a reasonable upper bound on ioctl size: */
+ if (payload_len > 128) {
+ drm_log("invalid ioctl payload length: %u", payload_len);
+ return -EINVAL;
+ }
+
+ /* Allow-list of supported ioctls: */
+ unsigned iocnr = _IOC_NR(req->cmd) - DRM_COMMAND_BASE;
+ switch (iocnr) {
+ case DRM_MSM_GET_PARAM:
+ case DRM_MSM_SUBMITQUEUE_NEW:
+ case DRM_MSM_SUBMITQUEUE_CLOSE:
+ break;
+ default:
+ drm_log("invalid ioctl: %08x (%u)", req->cmd, iocnr);
+ return -EINVAL;
+ }
+
+ struct msm_ccmd_ioctl_simple_rsp *rsp;
+ unsigned rsp_len = sizeof(*rsp);
+
+ if (req->cmd & IOC_OUT)
+ rsp_len = size_add(rsp_len, payload_len);
+
+ rsp = msm_context_rsp(mctx, hdr, rsp_len);
+
+ if (!rsp)
+ return -ENOMEM;
+
+ /* Copy the payload because the kernel can write (if IOC_OUT bit
+ * is set) and to avoid casting away the const:
+ */
+ char payload[payload_len];
+ memcpy(payload, req->payload, payload_len);
+
+ rsp->ret = drmIoctl(mctx->fd, req->cmd, payload);
+
+ if (req->cmd & IOC_OUT)
+ memcpy(rsp->payload, payload, payload_len);
+
+ if (iocnr == DRM_MSM_SUBMITQUEUE_NEW && !rsp->ret) {
+ struct drm_msm_submitqueue *args = (void *)payload;
+
+ drm_dbg("submitqueue %u, prio %u", args->id, args->prio);
+
+ _mesa_hash_table_insert(mctx->sq_to_ring_idx_table, (void *)(uintptr_t)args->id,
+ (void *)(uintptr_t)args->prio);
+ }
+
+ return 0;
+}
+
+static int
+msm_ccmd_gem_new(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_gem_new_req *req = to_msm_ccmd_gem_new_req(hdr);
+ int ret = 0;
+
+ if (!valid_blob_id(mctx, req->blob_id)) {
+ drm_log("Invalid blob_id %u", req->blob_id);
+ ret = -EINVAL;
+ goto out_error;
+ }
+
+ /*
+ * First part, allocate the GEM bo:
+ */
+ struct drm_msm_gem_new gem_new = {
+ .size = req->size,
+ .flags = req->flags,
+ };
+
+ ret = drmCommandWriteRead(mctx->fd, DRM_MSM_GEM_NEW, &gem_new, sizeof(gem_new));
+ if (ret) {
+ drm_log("GEM_NEW failed: %d (%s)", ret, strerror(errno));
+ goto out_error;
+ }
+
+ /*
+ * Second part, set the iova:
+ */
+ uint64_t iova = req->iova;
+ ret = gem_info(mctx, gem_new.handle, MSM_INFO_SET_IOVA, &iova);
+ if (ret) {
+ drm_log("SET_IOVA failed: %d (%s)", ret, strerror(errno));
+ goto out_close;
+ }
+
+ /*
+ * And then finally create our msm_object for tracking the resource,
+ * and add to blob table:
+ */
+ struct msm_object *obj = msm_object_create(gem_new.handle, req->flags, req->size);
+
+ if (!obj) {
+ ret = -ENOMEM;
+ goto out_close;
+ }
+
+ msm_object_set_blob_id(mctx, obj, req->blob_id);
+
+ drm_dbg("obj=%p, blob_id=%u, handle=%u, iova=%" PRIx64, obj, obj->blob_id,
+ obj->handle, iova);
+
+ return 0;
+
+out_close:
+ gem_close(mctx->fd, gem_new.handle);
+out_error:
+ if (mctx->shmem)
+ mctx->shmem->async_error++;
+ return ret;
+}
+
+static int
+msm_ccmd_gem_set_iova(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_gem_set_iova_req *req = to_msm_ccmd_gem_set_iova_req(hdr);
+ struct msm_object *obj = msm_get_object_from_res_id(mctx, req->res_id);
+ int ret = 0;
+
+ if (!obj) {
+ drm_log("Could not lookup obj: res_id=%u", req->res_id);
+ ret = -ENOENT;
+ goto out_error;
+ }
+
+ uint64_t iova = req->iova;
+ if (iova) {
+ TRACE_SCOPE_BEGIN("SET_IOVA");
+ ret = gem_info(mctx, obj->handle, MSM_INFO_SET_IOVA, &iova);
+ TRACE_SCOPE_END("SET_IOVA");
+ } else {
+ TRACE_SCOPE_BEGIN("CLEAR_IOVA");
+ ret = gem_info(mctx, obj->handle, MSM_INFO_SET_IOVA, &iova);
+ TRACE_SCOPE_END("CLEAR_IOVA");
+ }
+ if (ret) {
+ drm_log("SET_IOVA failed: %d (%s)", ret, strerror(errno));
+ goto out_error;
+ }
+
+ drm_dbg("obj=%p, blob_id=%u, handle=%u, iova=%" PRIx64, obj, obj->blob_id,
+ obj->handle, iova);
+
+ return 0;
+
+out_error:
+ if (mctx->shmem)
+ mctx->shmem->async_error++;
+ return 0;
+}
+
+static int
+msm_ccmd_gem_cpu_prep(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_gem_cpu_prep_req *req = to_msm_ccmd_gem_cpu_prep_req(hdr);
+ struct msm_ccmd_gem_cpu_prep_rsp *rsp = msm_context_rsp(mctx, hdr, sizeof(*rsp));
+
+ if (!rsp)
+ return -ENOMEM;
+
+ struct drm_msm_gem_cpu_prep args = {
+ .handle = handle_from_res_id(mctx, req->res_id),
+ .op = req->op | MSM_PREP_NOSYNC,
+ };
+
+ rsp->ret = drmCommandWrite(mctx->fd, DRM_MSM_GEM_CPU_PREP, &args, sizeof(args));
+
+ return 0;
+}
+
+static int
+msm_ccmd_gem_set_name(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_gem_set_name_req *req = to_msm_ccmd_gem_set_name_req(hdr);
+
+ struct drm_msm_gem_info args = {
+ .handle = handle_from_res_id(mctx, req->res_id),
+ .info = MSM_INFO_SET_NAME,
+ .value = VOID2U64(req->payload),
+ .len = req->len,
+ };
+
+ if (!valid_payload_len(req))
+ return -EINVAL;
+
+ int ret = drmCommandWrite(mctx->fd, DRM_MSM_GEM_INFO, &args, sizeof(args));
+ if (ret)
+ drm_log("ret=%d, len=%u, name=%.*s", ret, req->len, req->len, req->payload);
+
+ return 0;
+}
+
+static void
+msm_dump_submit(struct drm_msm_gem_submit *req)
+{
+#ifndef NDEBUG
+ drm_log(" flags=0x%x, queueid=%u", req->flags, req->queueid);
+ for (unsigned i = 0; i < req->nr_bos; i++) {
+ struct drm_msm_gem_submit_bo *bos = U642VOID(req->bos);
+ struct drm_msm_gem_submit_bo *bo = &bos[i];
+ drm_log(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
+ }
+ for (unsigned i = 0; i < req->nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd *cmds = U642VOID(req->cmds);
+ struct drm_msm_gem_submit_cmd *cmd = &cmds[i];
+ drm_log(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u", i,
+ cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
+ }
+#else
+ (void)req;
+#endif
+}
+
+static int
+msm_ccmd_gem_submit(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_gem_submit_req *req = to_msm_ccmd_gem_submit_req(hdr);
+
+ size_t sz = sizeof(*req);
+ sz = size_add(sz, size_mul(req->nr_bos, sizeof(struct drm_msm_gem_submit_bo)));
+ sz = size_add(sz, size_mul(req->nr_cmds, sizeof(struct drm_msm_gem_submit_cmd)));
+
+ /* Normally kernel would validate out of bounds situations and return -EFAULT,
+ * but since we are copying the bo handles, we need to validate that the
+ * guest can't trigger us to make an out of bounds memory access:
+ */
+ if (sz > hdr->len) {
+ drm_log("out of bounds: nr_bos=%u, nr_cmds=%u", req->nr_bos, req->nr_cmds);
+ return -ENOSPC;
+ }
+
+ const unsigned bo_limit = 8192 / sizeof(struct drm_msm_gem_submit_bo);
+ bool bos_on_stack = req->nr_bos < bo_limit;
+ struct drm_msm_gem_submit_bo _bos[bos_on_stack ? req->nr_bos : 0];
+ struct drm_msm_gem_submit_bo *bos;
+
+ if (bos_on_stack) {
+ bos = _bos;
+ } else {
+ bos = malloc(req->nr_bos * sizeof(bos[0]));
+ if (!bos)
+ return -ENOMEM;
+ }
+
+ memcpy(bos, req->payload, req->nr_bos * sizeof(bos[0]));
+
+ for (uint32_t i = 0; i < req->nr_bos; i++)
+ bos[i].handle = handle_from_res_id(mctx, bos[i].handle);
+
+ struct drm_msm_gem_submit args = {
+ .flags = req->flags | MSM_SUBMIT_FENCE_FD_OUT | MSM_SUBMIT_FENCE_SN_IN,
+ .fence = req->fence,
+ .nr_bos = req->nr_bos,
+ .nr_cmds = req->nr_cmds,
+ .bos = VOID2U64(bos),
+ .cmds = VOID2U64(&req->payload[req->nr_bos * sizeof(struct drm_msm_gem_submit_bo)]),
+ .queueid = req->queue_id,
+ };
+
+ int ret = drmCommandWriteRead(mctx->fd, DRM_MSM_GEM_SUBMIT, &args, sizeof(args));
+ drm_dbg("fence=%u, ret=%d", args.fence, ret);
+
+ if (unlikely(ret)) {
+ drm_log("submit failed: %s", strerror(errno));
+ msm_dump_submit(&args);
+ if (mctx->shmem)
+ mctx->shmem->async_error++;
+ } else {
+ const struct hash_entry *entry =
+ table_search(mctx->sq_to_ring_idx_table, args.queueid);
+
+ if (!entry) {
+ drm_log("unknown submitqueue: %u", args.queueid);
+ goto out;
+ }
+
+ unsigned prio = (uintptr_t)entry->data;
+
+ drm_timeline_set_last_fence_fd(&mctx->timelines[prio], args.fence_fd);
+ }
+
+out:
+ if (!bos_on_stack)
+ free(bos);
+ return 0;
+}
+
+static int
+map_object(struct msm_context *mctx, struct msm_object *obj)
+{
+ uint64_t offset;
+ int ret;
+
+ if (obj->map)
+ return 0;
+
+ uint32_t handle = handle_from_res_id(mctx, obj->res_id);
+ ret = gem_info(mctx, handle, MSM_INFO_GET_OFFSET, &offset);
+ if (ret) {
+ drm_log("alloc failed: %s", strerror(errno));
+ return ret;
+ }
+
+ uint8_t *map =
+ mmap(0, obj->size, PROT_READ | PROT_WRITE, MAP_SHARED, mctx->fd, offset);
+ if (map == MAP_FAILED) {
+ drm_log("mmap failed: %s", strerror(errno));
+ return -ENOMEM;
+ }
+
+ obj->map = map;
+
+ return 0;
+}
+
+static int
+msm_ccmd_gem_upload(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_gem_upload_req *req = to_msm_ccmd_gem_upload_req(hdr);
+ int ret;
+
+ if (req->pad || !valid_payload_len(req)) {
+ drm_log("Invalid upload ccmd");
+ return -EINVAL;
+ }
+
+ struct msm_object *obj = msm_get_object_from_res_id(mctx, req->res_id);
+ if (!obj) {
+ drm_log("No obj: res_id=%u", req->res_id);
+ return -ENOENT;
+ }
+
+ ret = map_object(mctx, obj);
+ if (ret)
+ return ret;
+
+ memcpy(&obj->map[req->off], req->payload, req->len);
+
+ return 0;
+}
+
+static int
+msm_ccmd_submitqueue_query(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_submitqueue_query_req *req =
+ to_msm_ccmd_submitqueue_query_req(hdr);
+ struct msm_ccmd_submitqueue_query_rsp *rsp =
+ msm_context_rsp(mctx, hdr, size_add(sizeof(*rsp), req->len));
+
+ if (!rsp)
+ return -ENOMEM;
+
+ struct drm_msm_submitqueue_query args = {
+ .data = VOID2U64(rsp->payload),
+ .id = req->queue_id,
+ .param = req->param,
+ .len = req->len,
+ };
+
+ rsp->ret =
+ drmCommandWriteRead(mctx->fd, DRM_MSM_SUBMITQUEUE_QUERY, &args, sizeof(args));
+
+ rsp->out_len = args.len;
+
+ return 0;
+}
+
+static int
+msm_ccmd_wait_fence(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_wait_fence_req *req = to_msm_ccmd_wait_fence_req(hdr);
+ struct msm_ccmd_wait_fence_rsp *rsp = msm_context_rsp(mctx, hdr, sizeof(*rsp));
+
+ if (!rsp)
+ return -ENOMEM;
+
+ struct timespec t;
+
+ /* Use current time as timeout, to avoid blocking: */
+ clock_gettime(CLOCK_MONOTONIC, &t);
+
+ struct drm_msm_wait_fence args = {
+ .fence = req->fence,
+ .queueid = req->queue_id,
+ .timeout =
+ {
+ .tv_sec = t.tv_sec,
+ .tv_nsec = t.tv_nsec,
+ },
+ };
+
+ rsp->ret = drmCommandWrite(mctx->fd, DRM_MSM_WAIT_FENCE, &args, sizeof(args));
+
+ return 0;
+}
+
+static int
+msm_ccmd_set_debuginfo(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ const struct msm_ccmd_set_debuginfo_req *req = to_msm_ccmd_set_debuginfo_req(hdr);
+
+ size_t sz = sizeof(*req);
+ sz = size_add(sz, req->comm_len);
+ sz = size_add(sz, req->cmdline_len);
+
+ if (sz > hdr->len) {
+ drm_log("out of bounds: comm_len=%u, cmdline_len=%u", req->comm_len, req->cmdline_len);
+ return -ENOSPC;
+ }
+
+ struct drm_msm_param set_comm = {
+ .pipe = MSM_PIPE_3D0,
+ .param = MSM_PARAM_COMM,
+ .value = VOID2U64(&req->payload[0]),
+ .len = req->comm_len,
+ };
+
+ drmCommandWriteRead(mctx->fd, DRM_MSM_SET_PARAM, &set_comm, sizeof(set_comm));
+
+ struct drm_msm_param set_cmdline = {
+ .pipe = MSM_PIPE_3D0,
+ .param = MSM_PARAM_CMDLINE,
+ .value = VOID2U64(&req->payload[req->comm_len]),
+ .len = req->cmdline_len,
+ };
+
+ drmCommandWriteRead(mctx->fd, DRM_MSM_SET_PARAM, &set_cmdline, sizeof(set_cmdline));
+
+ return 0;
+}
+
+static const struct ccmd {
+ const char *name;
+ int (*handler)(struct msm_context *mctx, const struct msm_ccmd_req *hdr);
+ size_t size;
+} ccmd_dispatch[] = {
+#define HANDLER(N, n) \
+ [MSM_CCMD_##N] = {#N, msm_ccmd_##n, sizeof(struct msm_ccmd_##n##_req)}
+ HANDLER(NOP, nop),
+ HANDLER(IOCTL_SIMPLE, ioctl_simple),
+ HANDLER(GEM_NEW, gem_new),
+ HANDLER(GEM_SET_IOVA, gem_set_iova),
+ HANDLER(GEM_CPU_PREP, gem_cpu_prep),
+ HANDLER(GEM_SET_NAME, gem_set_name),
+ HANDLER(GEM_SUBMIT, gem_submit),
+ HANDLER(GEM_UPLOAD, gem_upload),
+ HANDLER(SUBMITQUEUE_QUERY, submitqueue_query),
+ HANDLER(WAIT_FENCE, wait_fence),
+ HANDLER(SET_DEBUGINFO, set_debuginfo),
+};
+
+static int
+submit_cmd_dispatch(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
+{
+ int ret;
+
+ if (hdr->cmd >= ARRAY_SIZE(ccmd_dispatch)) {
+ drm_log("invalid cmd: %u", hdr->cmd);
+ return -EINVAL;
+ }
+
+ const struct ccmd *ccmd = &ccmd_dispatch[hdr->cmd];
+
+ if (!ccmd->handler) {
+ drm_log("no handler: %u", hdr->cmd);
+ return -EINVAL;
+ }
+
+ drm_dbg("%s: hdr={cmd=%u, len=%u, seqno=%u, rsp_off=0x%x)", ccmd->name, hdr->cmd,
+ hdr->len, hdr->seqno, hdr->rsp_off);
+
+ TRACE_SCOPE_BEGIN(ccmd->name);
+
+ /* If the request length from the guest is smaller than the expected
+ * size, ie. newer host and older guest, we need to make a copy of
+ * the request with the new fields at the end zero initialized.
+ */
+ if (ccmd->size > hdr->len) {
+ uint8_t buf[ccmd->size];
+
+ memcpy(&buf[0], hdr, hdr->len);
+ memset(&buf[hdr->len], 0, ccmd->size - hdr->len);
+
+ ret = ccmd->handler(mctx, (struct msm_ccmd_req *)buf);
+ } else {
+ ret = ccmd->handler(mctx, hdr);
+ }
+
+ TRACE_SCOPE_END(ccmd->name);
+
+ if (ret) {
+ drm_log("%s: dispatch failed: %d (%s)", ccmd->name, ret, strerror(errno));
+ return ret;
+ }
+
+ /* If the response length from the guest is smaller than the
+ * expected size, ie. newer host and older guest, then a shadow
+ * copy is used, and we need to copy back to the actual rsp
+ * buffer.
+ */
+ struct msm_ccmd_rsp *rsp = msm_context_rsp_noshadow(mctx, hdr);
+ if (mctx->current_rsp && (mctx->current_rsp != rsp)) {
+ unsigned len = rsp->len;
+ memcpy(rsp, mctx->current_rsp, len);
+ rsp->len = len;
+ free(mctx->current_rsp);
+ }
+ mctx->current_rsp = NULL;
+
+ /* Note that commands with no response, like SET_DEBUGINFO, could
+ * be sent before the shmem buffer is allocated:
+ */
+ if (mctx->shmem) {
+ /* TODO better way to do this? We need ACQ_REL semanatics (AFAIU)
+ * to ensure that writes to response buffer are visible to the
+ * guest process before the update of the seqno. Otherwise we
+ * could just use p_atomic_set.
+ */
+ uint32_t seqno = hdr->seqno;
+ p_atomic_xchg(&mctx->shmem->seqno, seqno);
+ }
+
+ return 0;
+}
+
+static int
+msm_renderer_submit_cmd(struct virgl_context *vctx, const void *_buffer, size_t size)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+ const uint8_t *buffer = _buffer;
+
+ while (size >= sizeof(struct msm_ccmd_req)) {
+ const struct msm_ccmd_req *hdr = (const struct msm_ccmd_req *)buffer;
+
+ /* Sanity check first: */
+ if ((hdr->len > size) || (hdr->len < sizeof(*hdr)) || (hdr->len % 4)) {
+ drm_log("bad size, %u vs %zu (%u)", hdr->len, size, hdr->cmd);
+ return -EINVAL;
+ }
+
+ if (hdr->rsp_off % 4) {
+ drm_log("bad rsp_off, %u", hdr->rsp_off);
+ return -EINVAL;
+ }
+
+ int ret = submit_cmd_dispatch(mctx, hdr);
+ if (ret) {
+ drm_log("dispatch failed: %d (%u)", ret, hdr->cmd);
+ return ret;
+ }
+
+ buffer += hdr->len;
+ size -= hdr->len;
+ }
+
+ if (size > 0) {
+ drm_log("bad size, %zu trailing bytes", size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+msm_renderer_get_fencing_fd(struct virgl_context *vctx)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+ return mctx->eventfd;
+}
+
+static void
+msm_renderer_retire_fences(UNUSED struct virgl_context *vctx)
+{
+ /* No-op as VIRGL_RENDERER_ASYNC_FENCE_CB is required */
+}
+
+static int
+msm_renderer_submit_fence(struct virgl_context *vctx, uint32_t flags, uint32_t ring_idx,
+ uint64_t fence_id)
+{
+ struct msm_context *mctx = to_msm_context(vctx);
+
+ drm_dbg("flags=0x%x, ring_idx=%" PRIu32 ", fence_id=%" PRIu64, flags,
+ ring_idx, fence_id);
+
+ /* timeline is ring_idx-1 (because ring_idx 0 is host CPU timeline) */
+ if (ring_idx > nr_timelines) {
+ drm_log("invalid ring_idx: %" PRIu32, ring_idx);
+ return -EINVAL;
+ }
+
+ /* ring_idx zero is used for the guest to synchronize with host CPU,
+ * meaning by the time ->submit_fence() is called, the fence has
+ * already passed.. so just immediate signal:
+ */
+ if (ring_idx == 0) {
+ vctx->fence_retire(vctx, ring_idx, fence_id);
+ return 0;
+ }
+
+ return drm_timeline_submit_fence(&mctx->timelines[ring_idx - 1], flags, fence_id);
+}
+
+struct virgl_context *
+msm_renderer_create(int fd)
+{
+ struct msm_context *mctx;
+
+ drm_log("");
+
+ mctx = calloc(1, sizeof(*mctx) + (nr_timelines * sizeof(mctx->timelines[0])));
+ if (!mctx)
+ return NULL;
+
+ mctx->fd = fd;
+
+ /* Indexed by blob_id, but only lower 32b of blob_id are used: */
+ mctx->blob_table = _mesa_hash_table_create_u32_keys(NULL);
+ /* Indexed by res_id: */
+ mctx->resource_table = _mesa_hash_table_create_u32_keys(NULL);
+ /* Indexed by submitqueue-id: */
+ mctx->sq_to_ring_idx_table = _mesa_hash_table_create_u32_keys(NULL);
+
+ mctx->eventfd = create_eventfd(0);
+
+ for (unsigned i = 0; i < nr_timelines; i++) {
+ unsigned ring_idx = i + 1; /* ring_idx 0 is host CPU */
+ drm_timeline_init(&mctx->timelines[i], &mctx->base, "msm-sync", mctx->eventfd,
+ ring_idx);
+ }
+
+ mctx->base.destroy = msm_renderer_destroy;
+ mctx->base.attach_resource = msm_renderer_attach_resource;
+ mctx->base.detach_resource = msm_renderer_detach_resource;
+ mctx->base.export_opaque_handle = msm_renderer_export_opaque_handle;
+ mctx->base.transfer_3d = msm_renderer_transfer_3d;
+ mctx->base.get_blob = msm_renderer_get_blob;
+ mctx->base.submit_cmd = msm_renderer_submit_cmd;
+ mctx->base.get_fencing_fd = msm_renderer_get_fencing_fd;
+ mctx->base.retire_fences = msm_renderer_retire_fences;
+ mctx->base.submit_fence = msm_renderer_submit_fence;
+
+ return &mctx->base;
+}
diff --git a/src/drm/msm/msm_renderer.h b/src/drm/msm/msm_renderer.h
new file mode 100644
index 00000000..0b4bc528
--- /dev/null
+++ b/src/drm/msm/msm_renderer.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef MSM_RENDERER_H_
+#define MSM_RENDERER_H_
+
+#include "config.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+
+#include "pipe/p_defines.h"
+
+#include "drm_util.h"
+#include "msm_drm.h"
+
+int msm_renderer_probe(int fd, struct virgl_renderer_capset_drm *capset);
+
+struct virgl_context *msm_renderer_create(int fd);
+
+#endif /* MSM_RENDERER_H_ */
diff --git a/src/drm_hw.h b/src/drm_hw.h
new file mode 100644
index 00000000..eb17d753
--- /dev/null
+++ b/src/drm_hw.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2022 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef DRM_HW_H_
+#define DRM_HW_H_
+
+struct virgl_renderer_capset_drm {
+ uint32_t wire_format_version;
+ /* Underlying drm device version: */
+ uint32_t version_major;
+ uint32_t version_minor;
+ uint32_t version_patchlevel;
+#define VIRTGPU_DRM_CONTEXT_MSM 1
+ uint32_t context_type;
+ uint32_t pad;
+ union {
+ struct {
+ uint32_t has_cached_coherent;
+ uint32_t priorities;
+ uint64_t va_start;
+ uint64_t va_size;
+ uint32_t gpu_id;
+ uint32_t gmem_size;
+ uint64_t gmem_base;
+ uint64_t chip_id;
+ uint32_t max_freq;
+ } msm; /* context_type == VIRTGPU_DRM_CONTEXT_MSM */
+ } u;
+};
+
+#endif /* DRM_HW_H_ */
diff --git a/src/gallium/auxiliary/cso_cache/cso_cache.h b/src/gallium/auxiliary/cso_cache/cso_cache.h
index 052245f9..90d9e9d5 100644
--- a/src/gallium/auxiliary/cso_cache/cso_cache.h
+++ b/src/gallium/auxiliary/cso_cache/cso_cache.h
@@ -73,7 +73,6 @@
#ifndef CSO_CACHE_H
#define CSO_CACHE_H
-#include "pipe/p_context.h"
#include "pipe/p_state.h"
/* cso_hash.h is necessary for cso_hash_iter, as MSVC requires structures
diff --git a/src/gallium/auxiliary/os/os_memory_debug.h b/src/gallium/auxiliary/os/os_memory_debug.h
deleted file mode 100644
index 9a487dec..00000000
--- a/src/gallium/auxiliary/os/os_memory_debug.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2010 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-/*
- * Debugging wrappers for OS memory management abstractions.
- */
-
-
-#ifndef _OS_MEMORY_H_
-#error "Must not be included directly. Include os_memory.h instead"
-#endif
-
-
-#include "pipe/p_compiler.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-void *
-debug_malloc(const char *file, unsigned line, const char *function,
- size_t size);
-
-void *
-debug_calloc(const char *file, unsigned line, const char *function,
- size_t count, size_t size );
-
-void
-debug_free(const char *file, unsigned line, const char *function,
- void *ptr);
-
-void *
-debug_realloc(const char *file, unsigned line, const char *function,
- void *old_ptr, size_t old_size, size_t new_size );
-
-void
-debug_memory_tag(void *ptr, unsigned tag);
-
-void
-debug_memory_check_block(void *ptr);
-
-void
-debug_memory_check(void);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#ifndef DEBUG_MEMORY_IMPLEMENTATION
-
-#define os_malloc( _size ) \
- debug_malloc( __FILE__, __LINE__, __FUNCTION__, _size )
-#define os_calloc( _count, _size ) \
- debug_calloc(__FILE__, __LINE__, __FUNCTION__, _count, _size )
-#define os_free( _ptr ) \
- debug_free( __FILE__, __LINE__, __FUNCTION__, _ptr )
-#define os_realloc( _ptr, _old_size, _new_size ) \
- debug_realloc( __FILE__, __LINE__, __FUNCTION__, _ptr, _old_size, _new_size )
-
-/* TODO: wrap os_malloc_aligned() and os_free_aligned() too */
-#include "os_memory_aligned.h"
-
-#endif /* !DEBUG_MEMORY_IMPLEMENTATION */
diff --git a/src/gallium/auxiliary/os/os_mman.h b/src/gallium/auxiliary/os/os_mman.h
deleted file mode 100644
index a7353096..00000000
--- a/src/gallium/auxiliary/os/os_mman.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2011 LunarG, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * OS independent memory mapping (with large file support).
- *
- * @author Chia-I Wu <olvaffe@gmail.com>
- */
-
-#ifndef _OS_MMAN_H_
-#define _OS_MMAN_H_
-
-
-#include "pipe/p_config.h"
-#include "pipe/p_compiler.h"
-
-#if defined(PIPE_OS_UNIX)
-# ifndef _FILE_OFFSET_BITS
-# error _FILE_OFFSET_BITS must be defined to 64
-# endif
-# include <sys/mman.h>
-#else
-# error Unsupported OS
-#endif
-
-#if defined(PIPE_OS_ANDROID)
-# include <errno.h> /* for EINVAL */
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#if defined(PIPE_OS_ANDROID)
-
-extern void *__mmap2(void *, size_t, int, int, int, size_t);
-
-static inline void *os_mmap(void *addr, size_t length, int prot, int flags, int fd, loff_t offset)
-{
- /* offset must be aligned to 4096 (not necessarily the page size) */
- if (unlikely(offset & 4095)) {
- errno = EINVAL;
- return MAP_FAILED;
- }
-
- return __mmap2(addr, length, prot, flags, fd, (size_t) (offset >> 12));
-}
-
-#else
-/* assume large file support exists */
-# define os_mmap(addr, length, prot, flags, fd, offset) mmap(addr, length, prot, flags, fd, offset)
-#endif
-
-#define os_munmap(addr, length) munmap(addr, length)
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _OS_MMAN_H_ */
diff --git a/src/gallium/auxiliary/os/os_thread.h b/src/gallium/auxiliary/os/os_thread.h
deleted file mode 100644
index a84ca640..00000000
--- a/src/gallium/auxiliary/os/os_thread.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/**************************************************************************
- *
- * Copyright 1999-2006 Brian Paul
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-/**
- * @file
- *
- * Thread, mutex, condition variable, barrier, semaphore and
- * thread-specific data functions.
- */
-
-
-#ifndef OS_THREAD_H_
-#define OS_THREAD_H_
-
-
-#include "pipe/p_compiler.h"
-#include "util/u_debug.h" /* for assert */
-
-#include "c11/threads.h"
-
-#ifdef HAVE_PTHREAD
-#include <signal.h>
-#endif
-
-#ifdef PIPE_OS_LINUX
-#include <sys/prctl.h>
-#endif
-
-/* pipe_thread
- */
-typedef thrd_t pipe_thread;
-
-#define PIPE_THREAD_ROUTINE( name, param ) \
- int name( void *param )
-
-static inline pipe_thread pipe_thread_create( PIPE_THREAD_ROUTINE((*routine), ), void *param )
-{
- pipe_thread thread;
-#ifdef HAVE_PTHREAD
- sigset_t saved_set, new_set;
- int ret;
-
- sigfillset(&new_set);
- pthread_sigmask(SIG_SETMASK, &new_set, &saved_set);
- ret = thrd_create( &thread, routine, param );
- pthread_sigmask(SIG_SETMASK, &saved_set, NULL);
-#else
- int ret;
- ret = thrd_create( &thread, routine, param );
-#endif
- if (ret)
- return 0;
-
- return thread;
-}
-
-static inline int pipe_thread_wait( pipe_thread thread )
-{
- return thrd_join( thread, NULL );
-}
-
-static inline int pipe_thread_destroy( pipe_thread thread )
-{
- return thrd_detach( thread );
-}
-
-static inline void pipe_thread_setname( const char *name )
-{
-#ifdef PIPE_OS_LINUX
- prctl(PR_SET_NAME, name, 0, 0, 0);
-#else
- (void)name;
-#endif
-}
-
-
-/* pipe_mutex
- */
-typedef mtx_t pipe_mutex;
-
-#define pipe_static_mutex(mutex) \
- static pipe_mutex mutex = _MTX_INITIALIZER_NP
-
-#define pipe_mutex_init(mutex) \
- (void) mtx_init(&(mutex), mtx_plain)
-
-#define pipe_mutex_destroy(mutex) \
- mtx_destroy(&(mutex))
-
-#define pipe_mutex_lock(mutex) \
- (void) mtx_lock(&(mutex))
-
-#define pipe_mutex_unlock(mutex) \
- (void) mtx_unlock(&(mutex))
-
-
-/* pipe_condvar
- */
-typedef cnd_t pipe_condvar;
-
-#define pipe_condvar_init(cond) \
- cnd_init(&(cond))
-
-#define pipe_condvar_destroy(cond) \
- cnd_destroy(&(cond))
-
-#define pipe_condvar_wait(cond, mutex) \
- cnd_wait(&(cond), &(mutex))
-
-#define pipe_condvar_signal(cond) \
- cnd_signal(&(cond))
-
-#define pipe_condvar_broadcast(cond) \
- cnd_broadcast(&(cond))
-
-
-/*
- * pipe_barrier
- */
-
-#if (defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HURD)) && !defined(PIPE_OS_ANDROID)
-
-typedef pthread_barrier_t pipe_barrier;
-
-static inline void pipe_barrier_init(pipe_barrier *barrier, unsigned count)
-{
- pthread_barrier_init(barrier, NULL, count);
-}
-
-static inline void pipe_barrier_destroy(pipe_barrier *barrier)
-{
- pthread_barrier_destroy(barrier);
-}
-
-static inline void pipe_barrier_wait(pipe_barrier *barrier)
-{
- pthread_barrier_wait(barrier);
-}
-
-
-#else /* If the OS doesn't have its own, implement barriers using a mutex and a condvar */
-
-typedef struct {
- unsigned count;
- unsigned waiters;
- uint64_t sequence;
- pipe_mutex mutex;
- pipe_condvar condvar;
-} pipe_barrier;
-
-static inline void pipe_barrier_init(pipe_barrier *barrier, unsigned count)
-{
- barrier->count = count;
- barrier->waiters = 0;
- barrier->sequence = 0;
- pipe_mutex_init(barrier->mutex);
- pipe_condvar_init(barrier->condvar);
-}
-
-static inline void pipe_barrier_destroy(pipe_barrier *barrier)
-{
- assert(barrier->waiters == 0);
- pipe_mutex_destroy(barrier->mutex);
- pipe_condvar_destroy(barrier->condvar);
-}
-
-static inline void pipe_barrier_wait(pipe_barrier *barrier)
-{
- pipe_mutex_lock(barrier->mutex);
-
- assert(barrier->waiters < barrier->count);
- barrier->waiters++;
-
- if (barrier->waiters < barrier->count) {
- uint64_t sequence = barrier->sequence;
-
- do {
- pipe_condvar_wait(barrier->condvar, barrier->mutex);
- } while (sequence == barrier->sequence);
- } else {
- barrier->waiters = 0;
- barrier->sequence++;
- pipe_condvar_broadcast(barrier->condvar);
- }
-
- pipe_mutex_unlock(barrier->mutex);
-}
-
-
-#endif
-
-
-/*
- * Semaphores
- */
-
-typedef struct
-{
- pipe_mutex mutex;
- pipe_condvar cond;
- int counter;
-} pipe_semaphore;
-
-
-static inline void
-pipe_semaphore_init(pipe_semaphore *sema, int init_val)
-{
- pipe_mutex_init(sema->mutex);
- pipe_condvar_init(sema->cond);
- sema->counter = init_val;
-}
-
-static inline void
-pipe_semaphore_destroy(pipe_semaphore *sema)
-{
- pipe_mutex_destroy(sema->mutex);
- pipe_condvar_destroy(sema->cond);
-}
-
-/** Signal/increment semaphore counter */
-static inline void
-pipe_semaphore_signal(pipe_semaphore *sema)
-{
- pipe_mutex_lock(sema->mutex);
- sema->counter++;
- pipe_condvar_signal(sema->cond);
- pipe_mutex_unlock(sema->mutex);
-}
-
-/** Wait for semaphore counter to be greater than zero */
-static inline void
-pipe_semaphore_wait(pipe_semaphore *sema)
-{
- pipe_mutex_lock(sema->mutex);
- while (sema->counter <= 0) {
- pipe_condvar_wait(sema->cond, sema->mutex);
- }
- sema->counter--;
- pipe_mutex_unlock(sema->mutex);
-}
-
-
-
-/*
- * Thread-specific data.
- */
-
-typedef struct {
- tss_t key;
- int initMagic;
-} pipe_tsd;
-
-
-#define PIPE_TSD_INIT_MAGIC 0xff8adc98
-
-
-static inline void
-pipe_tsd_init(pipe_tsd *tsd)
-{
- if (tss_create(&tsd->key, NULL/*free*/) != 0) {
- exit(-1);
- }
- tsd->initMagic = PIPE_TSD_INIT_MAGIC;
-}
-
-static inline void *
-pipe_tsd_get(pipe_tsd *tsd)
-{
- if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) {
- pipe_tsd_init(tsd);
- }
- return tss_get(tsd->key);
-}
-
-static inline void
-pipe_tsd_set(pipe_tsd *tsd, void *value)
-{
- if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) {
- pipe_tsd_init(tsd);
- }
- if (tss_set(tsd->key, value) != 0) {
- exit(-1);
- }
-}
-
-
-
-#endif /* OS_THREAD_H_ */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_build.c b/src/gallium/auxiliary/tgsi/tgsi_build.c
index b06806d5..e3b41575 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_build.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_build.c
@@ -379,6 +379,8 @@ tgsi_default_full_declaration( void )
{
struct tgsi_full_declaration full_declaration;
+ full_declaration.Dim.Index2D = 0;
+ full_declaration.Dim.Padding = 0;
full_declaration.Declaration = tgsi_default_declaration();
full_declaration.Range = tgsi_default_declaration_range();
full_declaration.Semantic = tgsi_default_declaration_semantic();
diff --git a/src/gallium/auxiliary/tgsi/tgsi_dump.c b/src/gallium/auxiliary/tgsi/tgsi_dump.c
index 64cd397f..c6003315 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_dump.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_dump.c
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#include <inttypes.h>
+
#include "util/u_debug.h"
#include "util/u_string.h"
#include "util/u_math.h"
@@ -87,6 +89,8 @@ dump_enum(
#define CHR(C) ctx->dump_printf( ctx, "%c", C )
#define UIX(I) ctx->dump_printf( ctx, "0x%x", I )
#define UID(I) ctx->dump_printf( ctx, "%u", I )
+#define SI64D(I) ctx->dump_printf( ctx, "%"PRId64, I )
+#define UI64D(I) ctx->dump_printf( ctx, "%"PRIu64, I )
#define INSTID(I) ctx->dump_printf( ctx, "% 3u", I )
#define SID(I) ctx->dump_printf( ctx, "%d", I )
#define FLT(F) ctx->dump_printf( ctx, "%10.4f", F )
@@ -254,6 +258,20 @@ dump_imm_data(struct tgsi_iterate_context *iter,
i++;
break;
}
+ case TGSI_IMM_INT64: {
+ union di d;
+ d.i = data[i].Uint | (uint64_t)data[i+1].Uint << 32;
+ SI64D( d.i );
+ i++;
+ break;
+ }
+ case TGSI_IMM_UINT64: {
+ union di d;
+ d.ui = data[i].Uint | (uint64_t)data[i+1].Uint << 32;
+ UI64D( d.ui );
+ i++;
+ break;
+ }
case TGSI_IMM_FLOAT32:
if (ctx->dump_float_as_hex)
HFLT( data[i].Float );
@@ -734,7 +752,7 @@ str_dump_ctx_printf(struct dump_ctx *ctx, const char *format, ...)
int written;
va_list ap;
va_start(ap, format);
- written = util_vsnprintf(sctx->ptr, sctx->left, format, ap);
+ written = vsnprintf(sctx->ptr, sctx->left, format, ap);
va_end(ap);
/* Some complicated logic needed to handle the return value of
diff --git a/src/gallium/auxiliary/tgsi/tgsi_info.c b/src/gallium/auxiliary/tgsi/tgsi_info.c
index 7cac984d..87b5c347 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_info.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_info.c
@@ -308,7 +308,7 @@ tgsi_get_opcode_info( uint opcode )
if (firsttime) {
unsigned i;
firsttime = 0;
- for (i = 0; i < Elements(opcode_info); i++)
+ for (i = 0; i < ARRAY_SIZE(opcode_info); i++)
assert(opcode_info[i].opcode == i);
}
@@ -385,6 +385,7 @@ tgsi_opcode_infer_type( uint opcode )
case TGSI_OPCODE_BREV:
case TGSI_OPCODE_D2U:
case TGSI_OPCODE_CLOCK:
+ case TGSI_OPCODE_UADD:
return TGSI_TYPE_UNSIGNED;
case TGSI_OPCODE_ARL:
case TGSI_OPCODE_ARR:
@@ -401,7 +402,6 @@ tgsi_opcode_infer_type( uint opcode )
case TGSI_OPCODE_ISGE:
case TGSI_OPCODE_ISHR:
case TGSI_OPCODE_ISLT:
- case TGSI_OPCODE_UADD:
case TGSI_OPCODE_UARL:
case TGSI_OPCODE_IABS:
case TGSI_OPCODE_ISSG:
diff --git a/src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h b/src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h
deleted file mode 100644
index 92aab158..00000000
--- a/src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-#ifndef OP12_TEX
-#define OP12_TEX(a) OP12(a)
-#endif
-
-#ifndef OP14_TEX
-#define OP14_TEX(a) OP14(a)
-#endif
-
-#ifndef OP12_SAMPLE
-#define OP12_SAMPLE(a) OP12(a)
-#endif
-
-#ifndef OP13_SAMPLE
-#define OP13_SAMPLE(a) OP13(a)
-#endif
-
-#ifndef OP14_SAMPLE
-#define OP14_SAMPLE(a) OP14(a)
-#endif
-
-#ifndef OP15_SAMPLE
-#define OP15_SAMPLE(a) OP15(a)
-#endif
-
-#ifndef OP00_LBL
-#define OP00_LBL(a) OP00(a)
-#endif
-
-#ifndef OP01_LBL
-#define OP01_LBL(a) OP01(a)
-#endif
-
-OP11(ARL)
-OP11(MOV)
-OP11(LIT)
-OP11(RCP)
-OP11(RSQ)
-OP11(EXP)
-OP11(LOG)
-OP12(MUL)
-OP12(ADD)
-OP12(DP3)
-OP12(DP4)
-OP12(DST)
-OP12(MIN)
-OP12(MAX)
-OP12(SLT)
-OP12(SGE)
-OP13(MAD)
-OP12(SUB)
-OP13(LRP)
-OP11(SQRT)
-OP11(FRC)
-OP11(FLR)
-OP11(ROUND)
-OP11(EX2)
-OP11(LG2)
-OP12(POW)
-OP12(XPD)
-OP11(ABS)
-OP12(DPH)
-OP11(COS)
-OP11(DDX)
-OP11(DDY)
-OP00(KILL)
-OP11(PK2H)
-OP11(PK2US)
-OP11(PK4B)
-OP11(PK4UB)
-OP12(SEQ)
-OP12(SGT)
-OP11(SIN)
-OP12(SLE)
-OP12(SNE)
-OP12_TEX(TEX)
-OP14_TEX(TXD)
-OP12_TEX(TXP)
-OP11(UP2H)
-OP11(UP2US)
-OP11(UP4B)
-OP11(UP4UB)
-OP11(ARR)
-OP00_LBL(CAL)
-OP00(RET)
-OP11(SSG)
-OP13(CMP)
-OP11(SCS)
-OP12_TEX(TXB)
-OP12(DIV)
-OP12(DP2)
-OP12_TEX(TXL)
-OP00(BRK)
-OP01_LBL(IF)
-OP01_LBL(UIF)
-OP00_LBL(ELSE)
-OP00(ENDIF)
-OP11(CEIL)
-OP11(I2F)
-OP11(NOT)
-OP11(TRUNC)
-OP12(SHL)
-OP12(AND)
-OP12(OR)
-OP12(MOD)
-OP12(XOR)
-OP12_TEX(TXF)
-OP12_TEX(TXQ)
-OP00(CONT)
-OP01(EMIT)
-OP01(ENDPRIM)
-OP00_LBL(BGNLOOP)
-OP00(BGNSUB)
-OP00_LBL(ENDLOOP)
-OP00(ENDSUB)
-OP00(NOP)
-OP01(KILL_IF)
-OP00(END)
-OP11(F2I)
-OP12(FSEQ)
-OP12(FSGE)
-OP12(FSLT)
-OP12(FSNE)
-OP12(IDIV)
-OP12(IMAX)
-OP12(IMIN)
-OP11(INEG)
-OP12(ISGE)
-OP12(ISHR)
-OP12(ISLT)
-OP11(F2U)
-OP11(U2F)
-OP12(UADD)
-OP12(UDIV)
-OP13(UMAD)
-OP12(UMAX)
-OP12(UMIN)
-OP12(UMOD)
-OP12(UMUL)
-OP12(USEQ)
-OP12(USGE)
-OP12(USHR)
-OP12(USLT)
-OP12(USNE)
-OP01(SWITCH)
-OP01(CASE)
-OP00(DEFAULT)
-OP00(ENDSWITCH)
-
-OP13_SAMPLE(SAMPLE)
-OP12_SAMPLE(SAMPLE_I)
-OP13_SAMPLE(SAMPLE_I_MS)
-OP14_SAMPLE(SAMPLE_B)
-OP14_SAMPLE(SAMPLE_C)
-OP14_SAMPLE(SAMPLE_C_LZ)
-OP15_SAMPLE(SAMPLE_D)
-OP14_SAMPLE(SAMPLE_L)
-OP13_SAMPLE(GATHER4)
-OP12(SVIEWINFO)
-OP13(SAMPLE_POS)
-OP12(SAMPLE_INFO)
-OP11(UARL)
-
-OP13(UCMP)
-
-OP12(IMUL_HI)
-OP12(UMUL_HI)
-
-#undef OP00
-#undef OP01
-#undef OP10
-#undef OP11
-#undef OP12
-#undef OP13
-
-#ifdef OP14
-#undef OP14
-#endif
-
-#ifdef OP15
-#undef OP15
-#endif
-
-#undef OP00_LBL
-#undef OP01_LBL
-
-#undef OP12_TEX
-#undef OP14_TEX
-
-#undef OP12_SAMPLE
-#undef OP13_SAMPLE
-#undef OP14_SAMPLE
-#undef OP15_SAMPLE
diff --git a/src/gallium/auxiliary/tgsi/tgsi_parse.c b/src/gallium/auxiliary/tgsi/tgsi_parse.c
index 58f31b3c..52af201f 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_parse.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_parse.c
@@ -117,11 +117,11 @@ tgsi_parse_token(
next_token(ctx, &decl->Dim);
}
- if( decl->Declaration.Interpolate ) {
+ if (decl->Declaration.Interpolate) {
next_token( ctx, &decl->Interp );
}
- if( decl->Declaration.Semantic ) {
+ if (decl->Declaration.Semantic) {
next_token( ctx, &decl->Semantic );
}
@@ -133,7 +133,7 @@ tgsi_parse_token(
next_token(ctx, &decl->SamplerView);
}
- if( decl->Declaration.Array ) {
+ if (decl->Declaration.Array) {
next_token(ctx, &decl->Array);
}
@@ -152,19 +152,21 @@ tgsi_parse_token(
switch (imm->Immediate.DataType) {
case TGSI_IMM_FLOAT32:
+ case TGSI_IMM_FLOAT64:
for (i = 0; i < imm_count; i++) {
next_token(ctx, &imm->u[i].Float);
}
break;
case TGSI_IMM_UINT32:
- case TGSI_IMM_FLOAT64:
+ case TGSI_IMM_UINT64:
for (i = 0; i < imm_count; i++) {
next_token(ctx, &imm->u[i].Uint);
}
break;
case TGSI_IMM_INT32:
+ case TGSI_IMM_INT64:
for (i = 0; i < imm_count; i++) {
next_token(ctx, &imm->u[i].Int);
}
@@ -190,7 +192,7 @@ tgsi_parse_token(
if (inst->Instruction.Texture) {
next_token( ctx, &inst->Texture);
- for( i = 0; i < inst->Texture.NumOffsets; i++ ) {
+ for (i = 0; i < inst->Texture.NumOffsets; i++) {
next_token( ctx, &inst->TexOffsets[i] );
}
}
@@ -201,14 +203,14 @@ tgsi_parse_token(
assert( inst->Instruction.NumDstRegs <= TGSI_FULL_MAX_DST_REGISTERS );
- for( i = 0; i < inst->Instruction.NumDstRegs; i++ ) {
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
next_token( ctx, &inst->Dst[i].Register );
- if( inst->Dst[i].Register.Indirect )
+ if (inst->Dst[i].Register.Indirect)
next_token( ctx, &inst->Dst[i].Indirect );
- if( inst->Dst[i].Register.Dimension ) {
+ if (inst->Dst[i].Register.Dimension) {
next_token( ctx, &inst->Dst[i].Dimension );
/*
@@ -216,21 +218,21 @@ tgsi_parse_token(
*/
assert( !inst->Dst[i].Dimension.Dimension );
- if( inst->Dst[i].Dimension.Indirect )
+ if (inst->Dst[i].Dimension.Indirect)
next_token( ctx, &inst->Dst[i].DimIndirect );
}
}
assert( inst->Instruction.NumSrcRegs <= TGSI_FULL_MAX_SRC_REGISTERS );
- for( i = 0; i < inst->Instruction.NumSrcRegs; i++ ) {
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
next_token( ctx, &inst->Src[i].Register );
- if( inst->Src[i].Register.Indirect )
+ if (inst->Src[i].Register.Indirect)
next_token( ctx, &inst->Src[i].Indirect );
- if( inst->Src[i].Register.Dimension ) {
+ if (inst->Src[i].Register.Dimension) {
next_token( ctx, &inst->Src[i].Dimension );
/*
@@ -238,7 +240,7 @@ tgsi_parse_token(
*/
assert( !inst->Src[i].Dimension.Dimension );
- if( inst->Src[i].Dimension.Indirect )
+ if (inst->Src[i].Dimension.Indirect)
next_token( ctx, &inst->Src[i].DimIndirect );
}
}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_scan.c b/src/gallium/auxiliary/tgsi/tgsi_scan.c
index df7adb15..f88ac7cb 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_scan.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_scan.c
@@ -61,7 +61,7 @@ tgsi_scan_shader(const struct tgsi_token *tokens,
memset(info, 0, sizeof(*info));
for (i = 0; i < TGSI_FILE_COUNT; i++)
info->file_max[i] = -1;
- for (i = 0; i < Elements(info->const_file_max); i++)
+ for (i = 0; i < ARRAY_SIZE(info->const_file_max); i++)
info->const_file_max[i] = -1;
info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = 1;
@@ -209,7 +209,7 @@ tgsi_scan_shader(const struct tgsi_token *tokens,
/* MSAA samplers */
if (src->Register.File == TGSI_FILE_SAMPLER) {
assert(fullinst->Instruction.Texture);
- assert((unsigned)src->Register.Index < Elements(info->is_msaa_sampler));
+ assert((unsigned)src->Register.Index < ARRAY_SIZE(info->is_msaa_sampler));
if (fullinst->Instruction.Texture &&
(fullinst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
@@ -430,7 +430,7 @@ tgsi_scan_shader(const struct tgsi_token *tokens,
unsigned name = fullprop->Property.PropertyName;
unsigned value = fullprop->u[0].Data;
- assert(name < Elements(info->properties));
+ assert(name < ARRAY_SIZE(info->properties));
info->properties[name] = value;
switch (name) {
diff --git a/src/gallium/auxiliary/tgsi/tgsi_strings.c b/src/gallium/auxiliary/tgsi/tgsi_strings.c
index ae6efb73..9eacb248 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_strings.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_strings.c
@@ -154,6 +154,7 @@ const char *tgsi_property_names[TGSI_PROPERTY_COUNT] =
"CS_USER_DATA_COMPONENTS_AMD",
"LAYER_VIEWPORT_RELATIVE",
"FS_BLEND_EQUATION_ADVANCED",
+ "SEPARABLE_PROGRAM",
};
const char *tgsi_return_type_names[TGSI_RETURN_TYPE_COUNT] =
@@ -213,12 +214,14 @@ const char *tgsi_fs_coord_pixel_center_names[2] =
"INTEGER"
};
-const char *tgsi_immediate_type_names[4] =
+const char *tgsi_immediate_type_names[6] =
{
"FLT32",
"UINT32",
"INT32",
- "FLT64"
+ "FLT64",
+ "UINT64",
+ "INT64",
};
const char *tgsi_memory_names[3] =
@@ -231,12 +234,12 @@ const char *tgsi_memory_names[3] =
static inline void UNUSED
tgsi_strings_check(void)
{
- STATIC_ASSERT(Elements(tgsi_semantic_names) == TGSI_SEMANTIC_COUNT);
- STATIC_ASSERT(Elements(tgsi_texture_names) == TGSI_TEXTURE_COUNT);
- STATIC_ASSERT(Elements(tgsi_property_names) == TGSI_PROPERTY_COUNT);
- STATIC_ASSERT(Elements(tgsi_primitive_names) == PIPE_PRIM_MAX);
- STATIC_ASSERT(Elements(tgsi_interpolate_names) == TGSI_INTERPOLATE_COUNT);
- STATIC_ASSERT(Elements(tgsi_return_type_names) == TGSI_RETURN_TYPE_COUNT);
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_semantic_names) == TGSI_SEMANTIC_COUNT);
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_texture_names) == TGSI_TEXTURE_COUNT);
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_property_names) == TGSI_PROPERTY_COUNT);
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_primitive_names) == PIPE_PRIM_MAX);
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_interpolate_names) == TGSI_INTERPOLATE_COUNT);
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_return_type_names) == TGSI_RETURN_TYPE_COUNT);
(void) tgsi_processor_type_names;
(void) tgsi_return_type_names;
(void) tgsi_immediate_type_names;
@@ -248,8 +251,8 @@ tgsi_strings_check(void)
const char *
tgsi_file_name(unsigned file)
{
- STATIC_ASSERT(Elements(tgsi_file_names) == TGSI_FILE_COUNT);
- if (file < Elements(tgsi_file_names))
+ STATIC_ASSERT(ARRAY_SIZE(tgsi_file_names) == TGSI_FILE_COUNT);
+ if (file < ARRAY_SIZE(tgsi_file_names))
return tgsi_file_names[file];
else
return "invalid file";
diff --git a/src/gallium/auxiliary/tgsi/tgsi_strings.h b/src/gallium/auxiliary/tgsi/tgsi_strings.h
index bab883d8..6f3908f3 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_strings.h
+++ b/src/gallium/auxiliary/tgsi/tgsi_strings.h
@@ -60,7 +60,7 @@ extern const char *tgsi_fs_coord_origin_names[2];
extern const char *tgsi_fs_coord_pixel_center_names[2];
-extern const char *tgsi_immediate_type_names[4];
+extern const char *tgsi_immediate_type_names[6];
extern const char *tgsi_memory_names[3];
diff --git a/src/gallium/auxiliary/tgsi/tgsi_text.c b/src/gallium/auxiliary/tgsi/tgsi_text.c
index 0d0a9a37..84443fd7 100644
--- a/src/gallium/auxiliary/tgsi/tgsi_text.c
+++ b/src/gallium/auxiliary/tgsi/tgsi_text.c
@@ -314,6 +314,42 @@ static boolean parse_double( const char **pcur, uint32_t *val0, uint32_t *val1)
return TRUE;
}
+static boolean parse_int64( const char **pcur, uint32_t *val0, uint32_t *val1)
+{
+ const char *cur = *pcur;
+ union {
+ int64_t i64val;
+ uint32_t uval[2];
+ } v;
+
+ v.i64val = strtoll(cur, (char**)pcur, 0);
+ if (*pcur == cur)
+ return FALSE;
+
+ *val0 = v.uval[0];
+ *val1 = v.uval[1];
+
+ return TRUE;
+}
+
+static boolean parse_uint64( const char **pcur, uint32_t *val0, uint32_t *val1)
+{
+ const char *cur = *pcur;
+ union {
+ uint64_t u64val;
+ uint32_t uval[2];
+ } v;
+
+ v.u64val = strtoull(cur, (char**)pcur, 0);
+ if (*pcur == cur)
+ return FALSE;
+
+ *val0 = v.uval[0];
+ *val1 = v.uval[1];
+
+ return TRUE;
+}
+
struct translate_ctx
{
const char *text;
@@ -544,7 +580,7 @@ parse_register_bracket(
struct parsed_bracket *brackets)
{
const char *cur;
- uint uindex;
+ int index;
memset(brackets, 0, sizeof(struct parsed_bracket));
@@ -588,11 +624,11 @@ parse_register_bracket(
brackets->index = 0;
}
else {
- if (!parse_uint( &ctx->cur, &uindex )) {
- report_error( ctx, "Expected literal unsigned integer" );
+ if (!parse_int( &ctx->cur, &index )) {
+ report_error( ctx, "Expected literal integer" );
return FALSE;
}
- brackets->index = (int) uindex;
+ brackets->index = index;
brackets->ind_file = TGSI_FILE_NULL;
brackets->ind_index = 0;
}
@@ -1225,6 +1261,14 @@ static boolean parse_immediate_data(struct translate_ctx *ctx, unsigned type,
ret = parse_double(&ctx->cur, &values[i].Uint, &values[i+1].Uint);
i++;
break;
+ case TGSI_IMM_INT64:
+ ret = parse_int64(&ctx->cur, &values[i].Uint, &values[i+1].Uint);
+ i++;
+ break;
+ case TGSI_IMM_UINT64:
+ ret = parse_uint64(&ctx->cur, &values[i].Uint, &values[i+1].Uint);
+ i++;
+ break;
case TGSI_IMM_FLOAT32:
ret = parse_float(&ctx->cur, &values[i].Float);
break;
@@ -1642,11 +1686,11 @@ static boolean parse_immediate( struct translate_ctx *ctx )
report_error( ctx, "Syntax error" );
return FALSE;
}
- for (type = 0; type < Elements(tgsi_immediate_type_names); ++type) {
+ for (type = 0; type < ARRAY_SIZE(tgsi_immediate_type_names); ++type) {
if (str_match_nocase_whole(&ctx->cur, tgsi_immediate_type_names[type]))
break;
}
- if (type == Elements(tgsi_immediate_type_names)) {
+ if (type == ARRAY_SIZE(tgsi_immediate_type_names)) {
report_error( ctx, "Expected immediate type" );
return FALSE;
}
@@ -1692,7 +1736,7 @@ parse_fs_coord_origin( const char **pcur, uint *fs_coord_origin )
{
uint i;
- for (i = 0; i < Elements(tgsi_fs_coord_origin_names); i++) {
+ for (i = 0; i < ARRAY_SIZE(tgsi_fs_coord_origin_names); i++) {
const char *cur = *pcur;
if (str_match_nocase_whole( &cur, tgsi_fs_coord_origin_names[i])) {
@@ -1709,7 +1753,7 @@ parse_fs_coord_pixel_center( const char **pcur, uint *fs_coord_pixel_center )
{
uint i;
- for (i = 0; i < Elements(tgsi_fs_coord_pixel_center_names); i++) {
+ for (i = 0; i < ARRAY_SIZE(tgsi_fs_coord_pixel_center_names); i++) {
const char *cur = *pcur;
if (str_match_nocase_whole( &cur, tgsi_fs_coord_pixel_center_names[i])) {
diff --git a/src/gallium/auxiliary/tgsi/tgsi_transform.c b/src/gallium/auxiliary/tgsi/tgsi_transform.c
deleted file mode 100644
index b16d2966..00000000
--- a/src/gallium/auxiliary/tgsi/tgsi_transform.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * TGSI program transformation utility.
- *
- * Authors: Brian Paul
- */
-
-#include "util/u_debug.h"
-
-#include "tgsi_transform.h"
-
-
-
-static void
-emit_instruction(struct tgsi_transform_context *ctx,
- const struct tgsi_full_instruction *inst)
-{
- uint ti = ctx->ti;
-
- ti += tgsi_build_full_instruction(inst,
- ctx->tokens_out + ti,
- ctx->header,
- ctx->max_tokens_out - ti);
- ctx->ti = ti;
-}
-
-
-static void
-emit_declaration(struct tgsi_transform_context *ctx,
- const struct tgsi_full_declaration *decl)
-{
- uint ti = ctx->ti;
-
- ti += tgsi_build_full_declaration(decl,
- ctx->tokens_out + ti,
- ctx->header,
- ctx->max_tokens_out - ti);
- ctx->ti = ti;
-}
-
-
-static void
-emit_immediate(struct tgsi_transform_context *ctx,
- const struct tgsi_full_immediate *imm)
-{
- uint ti = ctx->ti;
-
- ti += tgsi_build_full_immediate(imm,
- ctx->tokens_out + ti,
- ctx->header,
- ctx->max_tokens_out - ti);
- ctx->ti = ti;
-}
-
-
-static void
-emit_property(struct tgsi_transform_context *ctx,
- const struct tgsi_full_property *prop)
-{
- uint ti = ctx->ti;
-
- ti += tgsi_build_full_property(prop,
- ctx->tokens_out + ti,
- ctx->header,
- ctx->max_tokens_out - ti);
- ctx->ti = ti;
-}
-
-
-/**
- * Apply user-defined transformations to the input shader to produce
- * the output shader.
- * For example, a register search-and-replace operation could be applied
- * by defining a transform_instruction() callback that examined and changed
- * the instruction src/dest regs.
- *
- * \return number of tokens emitted
- */
-int
-tgsi_transform_shader(const struct tgsi_token *tokens_in,
- struct tgsi_token *tokens_out,
- uint max_tokens_out,
- struct tgsi_transform_context *ctx)
-{
- uint procType;
-
- /* input shader */
- struct tgsi_parse_context parse;
-
- /* output shader */
- struct tgsi_processor *processor;
-
-
- /**
- ** callback context init
- **/
- ctx->emit_instruction = emit_instruction;
- ctx->emit_declaration = emit_declaration;
- ctx->emit_immediate = emit_immediate;
- ctx->emit_property = emit_property;
- ctx->tokens_out = tokens_out;
- ctx->max_tokens_out = max_tokens_out;
-
-
- /**
- ** Setup to begin parsing input shader
- **/
- if (tgsi_parse_init( &parse, tokens_in ) != TGSI_PARSE_OK) {
- debug_printf("tgsi_parse_init() failed in tgsi_transform_shader()!\n");
- return -1;
- }
- procType = parse.FullHeader.Processor.Processor;
- assert(procType == TGSI_PROCESSOR_FRAGMENT ||
- procType == TGSI_PROCESSOR_VERTEX ||
- procType == TGSI_PROCESSOR_GEOMETRY);
-
-
- /**
- ** Setup output shader
- **/
- ctx->header = (struct tgsi_header *)tokens_out;
- *ctx->header = tgsi_build_header();
-
- processor = (struct tgsi_processor *) (tokens_out + 1);
- *processor = tgsi_build_processor( procType, ctx->header );
-
- ctx->ti = 2;
-
-
- /**
- ** Loop over incoming program tokens/instructions
- */
- while( !tgsi_parse_end_of_tokens( &parse ) ) {
-
- tgsi_parse_token( &parse );
-
- switch( parse.FullToken.Token.Type ) {
- case TGSI_TOKEN_TYPE_INSTRUCTION:
- {
- struct tgsi_full_instruction *fullinst
- = &parse.FullToken.FullInstruction;
-
- if (ctx->transform_instruction)
- ctx->transform_instruction(ctx, fullinst);
- else
- ctx->emit_instruction(ctx, fullinst);
- }
- break;
-
- case TGSI_TOKEN_TYPE_DECLARATION:
- {
- struct tgsi_full_declaration *fulldecl
- = &parse.FullToken.FullDeclaration;
-
- if (ctx->transform_declaration)
- ctx->transform_declaration(ctx, fulldecl);
- else
- ctx->emit_declaration(ctx, fulldecl);
- }
- break;
-
- case TGSI_TOKEN_TYPE_IMMEDIATE:
- {
- struct tgsi_full_immediate *fullimm
- = &parse.FullToken.FullImmediate;
-
- if (ctx->transform_immediate)
- ctx->transform_immediate(ctx, fullimm);
- else
- ctx->emit_immediate(ctx, fullimm);
- }
- break;
- case TGSI_TOKEN_TYPE_PROPERTY:
- {
- struct tgsi_full_property *fullprop
- = &parse.FullToken.FullProperty;
-
- if (ctx->transform_property)
- ctx->transform_property(ctx, fullprop);
- else
- ctx->emit_property(ctx, fullprop);
- }
- break;
-
- default:
- assert( 0 );
- }
- }
-
- if (ctx->epilog) {
- ctx->epilog(ctx);
- }
-
- tgsi_parse_free (&parse);
-
- return ctx->ti;
-}
-
-
-#include "tgsi_text.h"
-
-extern int tgsi_transform_foo( struct tgsi_token *tokens_out,
- uint max_tokens_out );
-
-/* This function exists only so that tgsi_text_translate() doesn't get
- * magic-ed out of the libtgsi.a archive by the build system. Don't
- * remove unless you know this has been fixed - check on mingw/scons
- * builds as well.
- */
-int
-tgsi_transform_foo( struct tgsi_token *tokens_out,
- uint max_tokens_out )
-{
- const char *text =
- "FRAG\n"
- "DCL IN[0], COLOR, CONSTANT\n"
- "DCL OUT[0], COLOR\n"
- " 0: MOV OUT[0], IN[0]\n"
- " 1: END";
-
- return tgsi_text_translate( text,
- tokens_out,
- max_tokens_out );
-}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_transform.h b/src/gallium/auxiliary/tgsi/tgsi_transform.h
deleted file mode 100644
index 8b96664a..00000000
--- a/src/gallium/auxiliary/tgsi/tgsi_transform.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TGSI_TRANSFORM_H
-#define TGSI_TRANSFORM_H
-
-
-#include "pipe/p_shader_tokens.h"
-#include "tgsi/tgsi_parse.h"
-#include "tgsi/tgsi_build.h"
-
-
-
-/**
- * Subclass this to add caller-specific data
- */
-struct tgsi_transform_context
-{
-/**** PUBLIC ***/
-
- /**
- * User-defined callbacks invoked per instruction.
- */
- void (*transform_instruction)(struct tgsi_transform_context *ctx,
- struct tgsi_full_instruction *inst);
-
- void (*transform_declaration)(struct tgsi_transform_context *ctx,
- struct tgsi_full_declaration *decl);
-
- void (*transform_immediate)(struct tgsi_transform_context *ctx,
- struct tgsi_full_immediate *imm);
- void (*transform_property)(struct tgsi_transform_context *ctx,
- struct tgsi_full_property *prop);
-
- /**
- * Called at end of input program to allow caller to append extra
- * instructions. Return number of tokens emitted.
- */
- void (*epilog)(struct tgsi_transform_context *ctx);
-
-
-/*** PRIVATE ***/
-
- /**
- * These are setup by tgsi_transform_shader() and cannot be overridden.
- * Meant to be called from in the above user callback functions.
- */
- void (*emit_instruction)(struct tgsi_transform_context *ctx,
- const struct tgsi_full_instruction *inst);
- void (*emit_declaration)(struct tgsi_transform_context *ctx,
- const struct tgsi_full_declaration *decl);
- void (*emit_immediate)(struct tgsi_transform_context *ctx,
- const struct tgsi_full_immediate *imm);
- void (*emit_property)(struct tgsi_transform_context *ctx,
- const struct tgsi_full_property *prop);
-
- struct tgsi_header *header;
- uint max_tokens_out;
- struct tgsi_token *tokens_out;
- uint ti;
-};
-
-
-
-extern int
-tgsi_transform_shader(const struct tgsi_token *tokens_in,
- struct tgsi_token *tokens_out,
- uint max_tokens_out,
- struct tgsi_transform_context *ctx);
-
-
-#endif /* TGSI_TRANSFORM_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.c b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
deleted file mode 100644
index ded78c2b..00000000
--- a/src/gallium/auxiliary/tgsi/tgsi_ureg.c
+++ /dev/null
@@ -1,1736 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009-2010 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE, INC AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "pipe/p_context.h"
-#include "pipe/p_state.h"
-#include "tgsi/tgsi_ureg.h"
-#include "tgsi/tgsi_build.h"
-#include "tgsi/tgsi_info.h"
-#include "tgsi/tgsi_dump.h"
-#include "tgsi/tgsi_sanity.h"
-#include "util/u_debug.h"
-#include "util/u_memory.h"
-#include "util/u_math.h"
-#include "util/u_bitmask.h"
-
-union tgsi_any_token {
- struct tgsi_header header;
- struct tgsi_processor processor;
- struct tgsi_token token;
- struct tgsi_property prop;
- struct tgsi_property_data prop_data;
- struct tgsi_declaration decl;
- struct tgsi_declaration_range decl_range;
- struct tgsi_declaration_dimension decl_dim;
- struct tgsi_declaration_interp decl_interp;
- struct tgsi_declaration_semantic decl_semantic;
- struct tgsi_declaration_sampler_view decl_sampler_view;
- struct tgsi_declaration_array array;
- struct tgsi_immediate imm;
- union tgsi_immediate_data imm_data;
- struct tgsi_instruction insn;
- struct tgsi_instruction_label insn_label;
- struct tgsi_instruction_texture insn_texture;
- struct tgsi_texture_offset insn_texture_offset;
- struct tgsi_src_register src;
- struct tgsi_ind_register ind;
- struct tgsi_dimension dim;
- struct tgsi_dst_register dst;
- unsigned value;
-};
-
-
-struct ureg_tokens {
- union tgsi_any_token *tokens;
- unsigned size;
- unsigned order;
- unsigned count;
-};
-
-#define UREG_MAX_INPUT PIPE_MAX_ATTRIBS
-#define UREG_MAX_SYSTEM_VALUE PIPE_MAX_ATTRIBS
-#define UREG_MAX_OUTPUT PIPE_MAX_SHADER_OUTPUTS
-#define UREG_MAX_CONSTANT_RANGE 32
-#define UREG_MAX_HW_ATOMIC_RANGE 32
-#define UREG_MAX_IMMEDIATE 4096
-#define UREG_MAX_ADDR 3
-#define UREG_MAX_PRED 1
-#define UREG_MAX_ARRAY_TEMPS 256
-
-struct const_decl {
- struct {
- unsigned first;
- unsigned last;
- } constant_range[UREG_MAX_CONSTANT_RANGE];
- unsigned nr_constant_ranges;
-};
-
-struct hw_atomic_decl {
- struct {
- unsigned first;
- unsigned last;
- unsigned array_id;
- } hw_atomic_range[UREG_MAX_HW_ATOMIC_RANGE];
- unsigned nr_hw_atomic_ranges;
-};
-
-#define DOMAIN_DECL 0
-#define DOMAIN_INSN 1
-
-struct ureg_program
-{
- unsigned processor;
- struct pipe_context *pipe;
-
- struct {
- unsigned semantic_name;
- unsigned semantic_index;
- unsigned interp;
- unsigned char cylindrical_wrap;
- unsigned interp_location;
- } fs_input[UREG_MAX_INPUT];
- unsigned nr_fs_inputs;
-
- unsigned vs_inputs[UREG_MAX_INPUT/32];
-
- struct {
- unsigned index;
- unsigned semantic_name;
- unsigned semantic_index;
- } gs_input[UREG_MAX_INPUT];
- unsigned nr_gs_inputs;
-
- struct {
- unsigned index;
- unsigned semantic_name;
- unsigned semantic_index;
- } system_value[UREG_MAX_SYSTEM_VALUE];
- unsigned nr_system_values;
-
- struct {
- unsigned semantic_name;
- unsigned semantic_index;
- unsigned usage_mask; /* = TGSI_WRITEMASK_* */
- } output[UREG_MAX_OUTPUT];
- unsigned nr_outputs;
-
- struct {
- union {
- float f[4];
- unsigned u[4];
- int i[4];
- } value;
- unsigned nr;
- unsigned type;
- } immediate[UREG_MAX_IMMEDIATE];
- unsigned nr_immediates;
-
- struct ureg_src sampler[PIPE_MAX_SAMPLERS];
- unsigned nr_samplers;
-
- struct {
- unsigned index;
- unsigned target;
- unsigned return_type_x;
- unsigned return_type_y;
- unsigned return_type_z;
- unsigned return_type_w;
- } sampler_view[PIPE_MAX_SHADER_SAMPLER_VIEWS];
- unsigned nr_sampler_views;
-
- struct util_bitmask *free_temps;
- struct util_bitmask *local_temps;
- struct util_bitmask *decl_temps;
- unsigned nr_temps;
-
- unsigned array_temps[UREG_MAX_ARRAY_TEMPS];
- unsigned nr_array_temps;
-
- struct const_decl const_decls;
- struct const_decl const_decls2D[PIPE_MAX_CONSTANT_BUFFERS];
-
- struct hw_atomic_decl hw_atomic_decls[PIPE_MAX_HW_ATOMIC_BUFFERS];
-
- unsigned properties[TGSI_PROPERTY_COUNT];
-
- unsigned nr_addrs;
- unsigned nr_preds;
- unsigned nr_instructions;
-
- struct ureg_tokens domain[2];
-};
-
-static union tgsi_any_token error_tokens[32];
-
-static void tokens_error( struct ureg_tokens *tokens )
-{
- if (tokens->tokens && tokens->tokens != error_tokens)
- FREE(tokens->tokens);
-
- tokens->tokens = error_tokens;
- tokens->size = ARRAY_SIZE(error_tokens);
- tokens->count = 0;
-}
-
-
-static void tokens_expand( struct ureg_tokens *tokens,
- unsigned count )
-{
- unsigned old_size = tokens->size * sizeof(unsigned);
-
- if (tokens->tokens == error_tokens) {
- return;
- }
-
- while (tokens->count + count > tokens->size) {
- tokens->size = (1 << ++tokens->order);
- }
-
- tokens->tokens = REALLOC(tokens->tokens,
- old_size,
- tokens->size * sizeof(unsigned));
- if (tokens->tokens == NULL) {
- tokens_error(tokens);
- }
-}
-
-static void set_bad( struct ureg_program *ureg )
-{
- tokens_error(&ureg->domain[0]);
-}
-
-
-
-static union tgsi_any_token *get_tokens( struct ureg_program *ureg,
- unsigned domain,
- unsigned count )
-{
- struct ureg_tokens *tokens = &ureg->domain[domain];
- union tgsi_any_token *result;
-
- if (tokens->count + count > tokens->size)
- tokens_expand(tokens, count);
-
- result = &tokens->tokens[tokens->count];
- tokens->count += count;
- return result;
-}
-
-
-static union tgsi_any_token *retrieve_token( struct ureg_program *ureg,
- unsigned domain,
- unsigned nr )
-{
- if (ureg->domain[domain].tokens == error_tokens)
- return &error_tokens[0];
-
- return &ureg->domain[domain].tokens[nr];
-}
-
-
-
-static inline struct ureg_dst
-ureg_dst_register( unsigned file,
- unsigned index )
-{
- struct ureg_dst dst;
-
- dst.File = file;
- dst.WriteMask = TGSI_WRITEMASK_XYZW;
- dst.Indirect = 0;
- dst.IndirectFile = TGSI_FILE_NULL;
- dst.IndirectIndex = 0;
- dst.IndirectSwizzle = 0;
- dst.Saturate = 0;
- dst.PredNegate = 0;
- dst.PredSwizzleX = TGSI_SWIZZLE_X;
- dst.PredSwizzleY = TGSI_SWIZZLE_Y;
- dst.PredSwizzleZ = TGSI_SWIZZLE_Z;
- dst.PredSwizzleW = TGSI_SWIZZLE_W;
- dst.Index = index;
- dst.ArrayID = 0;
-
- return dst;
-}
-
-
-void
-ureg_property(struct ureg_program *ureg, unsigned name, unsigned value)
-{
- assert(name < ARRAY_SIZE(ureg->properties));
- ureg->properties[name] = value;
-}
-
-struct ureg_src
-ureg_DECL_fs_input_cyl_centroid(struct ureg_program *ureg,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned interp_mode,
- unsigned cylindrical_wrap,
- unsigned interp_location)
-{
- unsigned i;
-
- for (i = 0; i < ureg->nr_fs_inputs; i++) {
- if (ureg->fs_input[i].semantic_name == semantic_name &&
- ureg->fs_input[i].semantic_index == semantic_index) {
- goto out;
- }
- }
-
- if (ureg->nr_fs_inputs < UREG_MAX_INPUT) {
- ureg->fs_input[i].semantic_name = semantic_name;
- ureg->fs_input[i].semantic_index = semantic_index;
- ureg->fs_input[i].interp = interp_mode;
- ureg->fs_input[i].cylindrical_wrap = cylindrical_wrap;
- ureg->fs_input[i].interp_location = interp_location;
- ureg->nr_fs_inputs++;
- } else {
- set_bad(ureg);
- }
-
-out:
- return ureg_src_register(TGSI_FILE_INPUT, i);
-}
-
-
-struct ureg_src
-ureg_DECL_vs_input( struct ureg_program *ureg,
- unsigned index )
-{
- assert(ureg->processor == TGSI_PROCESSOR_VERTEX);
-
- ureg->vs_inputs[index/32] |= 1 << (index % 32);
- return ureg_src_register( TGSI_FILE_INPUT, index );
-}
-
-
-struct ureg_src
-ureg_DECL_gs_input(struct ureg_program *ureg,
- unsigned index,
- unsigned semantic_name,
- unsigned semantic_index)
-{
- if (ureg->nr_gs_inputs < UREG_MAX_INPUT) {
- ureg->gs_input[ureg->nr_gs_inputs].index = index;
- ureg->gs_input[ureg->nr_gs_inputs].semantic_name = semantic_name;
- ureg->gs_input[ureg->nr_gs_inputs].semantic_index = semantic_index;
- ureg->nr_gs_inputs++;
- } else {
- set_bad(ureg);
- }
-
- /* XXX: Add suport for true 2D input registers. */
- return ureg_src_register(TGSI_FILE_INPUT, index);
-}
-
-
-struct ureg_src
-ureg_DECL_system_value(struct ureg_program *ureg,
- unsigned index,
- unsigned semantic_name,
- unsigned semantic_index)
-{
- if (ureg->nr_system_values < UREG_MAX_SYSTEM_VALUE) {
- ureg->system_value[ureg->nr_system_values].index = index;
- ureg->system_value[ureg->nr_system_values].semantic_name = semantic_name;
- ureg->system_value[ureg->nr_system_values].semantic_index = semantic_index;
- ureg->nr_system_values++;
- } else {
- set_bad(ureg);
- }
-
- return ureg_src_register(TGSI_FILE_SYSTEM_VALUE, index);
-}
-
-
-struct ureg_dst
-ureg_DECL_output_masked( struct ureg_program *ureg,
- unsigned name,
- unsigned index,
- unsigned usage_mask )
-{
- unsigned i;
-
- assert(usage_mask != 0);
-
- for (i = 0; i < ureg->nr_outputs; i++) {
- if (ureg->output[i].semantic_name == name &&
- ureg->output[i].semantic_index == index) {
- ureg->output[i].usage_mask |= usage_mask;
- goto out;
- }
- }
-
- if (ureg->nr_outputs < UREG_MAX_OUTPUT) {
- ureg->output[i].semantic_name = name;
- ureg->output[i].semantic_index = index;
- ureg->output[i].usage_mask = usage_mask;
- ureg->nr_outputs++;
- }
- else {
- set_bad( ureg );
- }
-
-out:
- return ureg_dst_register( TGSI_FILE_OUTPUT, i );
-}
-
-
-struct ureg_dst
-ureg_DECL_output( struct ureg_program *ureg,
- unsigned name,
- unsigned index )
-{
- return ureg_DECL_output_masked(ureg, name, index, TGSI_WRITEMASK_XYZW);
-}
-
-
-/* Returns a new constant register. Keep track of which have been
- * referred to so that we can emit decls later.
- *
- * Constant operands declared with this function must be addressed
- * with a two-dimensional index.
- *
- * There is nothing in this code to bind this constant to any tracked
- * value or manage any constant_buffer contents -- that's the
- * resposibility of the calling code.
- */
-void
-ureg_DECL_constant2D(struct ureg_program *ureg,
- unsigned first,
- unsigned last,
- unsigned index2D)
-{
- struct const_decl *decl = &ureg->const_decls2D[index2D];
-
- assert(index2D < PIPE_MAX_CONSTANT_BUFFERS);
-
- if (decl->nr_constant_ranges < UREG_MAX_CONSTANT_RANGE) {
- uint i = decl->nr_constant_ranges++;
-
- decl->constant_range[i].first = first;
- decl->constant_range[i].last = last;
- }
-}
-
-
-/* A one-dimensional, depricated version of ureg_DECL_constant2D().
- *
- * Constant operands declared with this function must be addressed
- * with a one-dimensional index.
- */
-struct ureg_src
-ureg_DECL_constant(struct ureg_program *ureg,
- unsigned index)
-{
- struct const_decl *decl = &ureg->const_decls;
- unsigned minconst = index, maxconst = index;
- unsigned i;
-
- /* Inside existing range?
- */
- for (i = 0; i < decl->nr_constant_ranges; i++) {
- if (decl->constant_range[i].first <= index &&
- decl->constant_range[i].last >= index) {
- goto out;
- }
- }
-
- /* Extend existing range?
- */
- for (i = 0; i < decl->nr_constant_ranges; i++) {
- if (decl->constant_range[i].last == index - 1) {
- decl->constant_range[i].last = index;
- goto out;
- }
-
- if (decl->constant_range[i].first == index + 1) {
- decl->constant_range[i].first = index;
- goto out;
- }
-
- minconst = MIN2(minconst, decl->constant_range[i].first);
- maxconst = MAX2(maxconst, decl->constant_range[i].last);
- }
-
- /* Create new range?
- */
- if (decl->nr_constant_ranges < UREG_MAX_CONSTANT_RANGE) {
- i = decl->nr_constant_ranges++;
- decl->constant_range[i].first = index;
- decl->constant_range[i].last = index;
- goto out;
- }
-
- /* Collapse all ranges down to one:
- */
- i = 0;
- decl->constant_range[0].first = minconst;
- decl->constant_range[0].last = maxconst;
- decl->nr_constant_ranges = 1;
-
-out:
- assert(i < decl->nr_constant_ranges);
- assert(decl->constant_range[i].first <= index);
- assert(decl->constant_range[i].last >= index);
- return ureg_src_register(TGSI_FILE_CONSTANT, index);
-}
-
-
-/* Returns a new hw atomic register. Keep track of which have been
- * referred to so that we can emit decls later.
- */
-void
-ureg_DECL_hw_atomic(struct ureg_program *ureg,
- unsigned first,
- unsigned last,
- unsigned buffer_id,
- unsigned array_id)
-{
- struct hw_atomic_decl *decl = &ureg->hw_atomic_decls[buffer_id];
-
- if (decl->nr_hw_atomic_ranges < UREG_MAX_HW_ATOMIC_RANGE) {
- uint i = decl->nr_hw_atomic_ranges++;
-
- decl->hw_atomic_range[i].first = first;
- decl->hw_atomic_range[i].last = last;
- decl->hw_atomic_range[i].array_id = array_id;
- } else {
- set_bad(ureg);
- }
-}
-
-static struct ureg_dst alloc_temporary( struct ureg_program *ureg,
- boolean local )
-{
- unsigned i;
-
- /* Look for a released temporary.
- */
- for (i = util_bitmask_get_first_index(ureg->free_temps);
- i != UTIL_BITMASK_INVALID_INDEX;
- i = util_bitmask_get_next_index(ureg->free_temps, i + 1)) {
- if (util_bitmask_get(ureg->local_temps, i) == local)
- break;
- }
-
- /* Or allocate a new one.
- */
- if (i == UTIL_BITMASK_INVALID_INDEX) {
- i = ureg->nr_temps++;
-
- if (local)
- util_bitmask_set(ureg->local_temps, i);
-
- /* Start a new declaration when the local flag changes */
- if (!i || util_bitmask_get(ureg->local_temps, i - 1) != local)
- util_bitmask_set(ureg->decl_temps, i);
- }
-
- util_bitmask_clear(ureg->free_temps, i);
-
- return ureg_dst_register( TGSI_FILE_TEMPORARY, i );
-}
-
-struct ureg_dst ureg_DECL_temporary( struct ureg_program *ureg )
-{
- return alloc_temporary(ureg, FALSE);
-}
-
-struct ureg_dst ureg_DECL_local_temporary( struct ureg_program *ureg )
-{
- return alloc_temporary(ureg, TRUE);
-}
-
-struct ureg_dst ureg_DECL_array_temporary( struct ureg_program *ureg,
- unsigned size,
- boolean local )
-{
- unsigned i = ureg->nr_temps;
- struct ureg_dst dst = ureg_dst_register( TGSI_FILE_TEMPORARY, i );
-
- if (local)
- util_bitmask_set(ureg->local_temps, i);
-
- /* Always start a new declaration at the start */
- util_bitmask_set(ureg->decl_temps, i);
-
- ureg->nr_temps += size;
-
- /* and also at the end of the array */
- util_bitmask_set(ureg->decl_temps, ureg->nr_temps);
-
- if (ureg->nr_array_temps < UREG_MAX_ARRAY_TEMPS) {
- ureg->array_temps[ureg->nr_array_temps++] = i;
- dst.ArrayID = ureg->nr_array_temps;
- }
-
- return dst;
-}
-
-void ureg_release_temporary( struct ureg_program *ureg,
- struct ureg_dst tmp )
-{
- if(tmp.File == TGSI_FILE_TEMPORARY)
- util_bitmask_set(ureg->free_temps, tmp.Index);
-}
-
-
-/* Allocate a new address register.
- */
-struct ureg_dst ureg_DECL_address( struct ureg_program *ureg )
-{
- if (ureg->nr_addrs < UREG_MAX_ADDR)
- return ureg_dst_register( TGSI_FILE_ADDRESS, ureg->nr_addrs++ );
-
- assert( 0 );
- return ureg_dst_register( TGSI_FILE_ADDRESS, 0 );
-}
-
-/* Allocate a new predicate register.
- */
-struct ureg_dst
-ureg_DECL_predicate(struct ureg_program *ureg)
-{
- if (ureg->nr_preds < UREG_MAX_PRED) {
- return ureg_dst_register(TGSI_FILE_PREDICATE, ureg->nr_preds++);
- }
-
- assert(0);
- return ureg_dst_register(TGSI_FILE_PREDICATE, 0);
-}
-
-/* Allocate a new sampler.
- */
-struct ureg_src ureg_DECL_sampler( struct ureg_program *ureg,
- int nr )
-{
- unsigned i;
-
- for (i = 0; i < ureg->nr_samplers; i++)
- if (ureg->sampler[i].Index == nr)
- return ureg->sampler[i];
-
- if (i < PIPE_MAX_SAMPLERS) {
- ureg->sampler[i] = ureg_src_register( TGSI_FILE_SAMPLER, nr );
- ureg->nr_samplers++;
- return ureg->sampler[i];
- }
-
- assert( 0 );
- return ureg->sampler[0];
-}
-
-/*
- * Allocate a new shader sampler view.
- */
-struct ureg_src
-ureg_DECL_sampler_view(struct ureg_program *ureg,
- unsigned index,
- unsigned target,
- unsigned return_type_x,
- unsigned return_type_y,
- unsigned return_type_z,
- unsigned return_type_w)
-{
- struct ureg_src reg = ureg_src_register(TGSI_FILE_SAMPLER_VIEW, index);
- uint i;
-
- for (i = 0; i < ureg->nr_sampler_views; i++) {
- if (ureg->sampler_view[i].index == index) {
- return reg;
- }
- }
-
- if (i < PIPE_MAX_SHADER_SAMPLER_VIEWS) {
- ureg->sampler_view[i].index = index;
- ureg->sampler_view[i].target = target;
- ureg->sampler_view[i].return_type_x = return_type_x;
- ureg->sampler_view[i].return_type_y = return_type_y;
- ureg->sampler_view[i].return_type_z = return_type_z;
- ureg->sampler_view[i].return_type_w = return_type_w;
- ureg->nr_sampler_views++;
- return reg;
- }
-
- assert(0);
- return reg;
-}
-
-static int
-match_or_expand_immediate( const unsigned *v,
- unsigned nr,
- unsigned *v2,
- unsigned *pnr2,
- unsigned *swizzle )
-{
- unsigned nr2 = *pnr2;
- unsigned i, j;
-
- *swizzle = 0;
-
- for (i = 0; i < nr; i++) {
- boolean found = FALSE;
-
- for (j = 0; j < nr2 && !found; j++) {
- if (v[i] == v2[j]) {
- *swizzle |= j << (i * 2);
- found = TRUE;
- }
- }
-
- if (!found) {
- if (nr2 >= 4) {
- return FALSE;
- }
-
- v2[nr2] = v[i];
- *swizzle |= nr2 << (i * 2);
- nr2++;
- }
- }
-
- /* Actually expand immediate only when fully succeeded.
- */
- *pnr2 = nr2;
- return TRUE;
-}
-
-
-static struct ureg_src
-decl_immediate( struct ureg_program *ureg,
- const unsigned *v,
- unsigned nr,
- unsigned type )
-{
- unsigned i, j;
- unsigned swizzle = 0;
-
- /* Could do a first pass where we examine all existing immediates
- * without expanding.
- */
-
- for (i = 0; i < ureg->nr_immediates; i++) {
- if (ureg->immediate[i].type != type) {
- continue;
- }
- if (match_or_expand_immediate(v,
- nr,
- ureg->immediate[i].value.u,
- &ureg->immediate[i].nr,
- &swizzle)) {
- goto out;
- }
- }
-
- if (ureg->nr_immediates < UREG_MAX_IMMEDIATE) {
- i = ureg->nr_immediates++;
- ureg->immediate[i].type = type;
- if (match_or_expand_immediate(v,
- nr,
- ureg->immediate[i].value.u,
- &ureg->immediate[i].nr,
- &swizzle)) {
- goto out;
- }
- }
-
- set_bad(ureg);
-
-out:
- /* Make sure that all referenced elements are from this immediate.
- * Has the effect of making size-one immediates into scalars.
- */
- for (j = nr; j < 4; j++) {
- swizzle |= (swizzle & 0x3) << (j * 2);
- }
-
- return ureg_swizzle(ureg_src_register(TGSI_FILE_IMMEDIATE, i),
- (swizzle >> 0) & 0x3,
- (swizzle >> 2) & 0x3,
- (swizzle >> 4) & 0x3,
- (swizzle >> 6) & 0x3);
-}
-
-
-struct ureg_src
-ureg_DECL_immediate( struct ureg_program *ureg,
- const float *v,
- unsigned nr )
-{
- union {
- float f[4];
- unsigned u[4];
- } fu;
- unsigned int i;
-
- for (i = 0; i < nr; i++) {
- fu.f[i] = v[i];
- }
-
- return decl_immediate(ureg, fu.u, nr, TGSI_IMM_FLOAT32);
-}
-
-
-struct ureg_src
-ureg_DECL_immediate_uint( struct ureg_program *ureg,
- const unsigned *v,
- unsigned nr )
-{
- return decl_immediate(ureg, v, nr, TGSI_IMM_UINT32);
-}
-
-
-struct ureg_src
-ureg_DECL_immediate_block_uint( struct ureg_program *ureg,
- const unsigned *v,
- unsigned nr )
-{
- uint index;
- uint i;
-
- if (ureg->nr_immediates + (nr + 3) / 4 > UREG_MAX_IMMEDIATE) {
- set_bad(ureg);
- return ureg_src_register(TGSI_FILE_IMMEDIATE, 0);
- }
-
- index = ureg->nr_immediates;
- ureg->nr_immediates += (nr + 3) / 4;
-
- for (i = index; i < ureg->nr_immediates; i++) {
- ureg->immediate[i].type = TGSI_IMM_UINT32;
- ureg->immediate[i].nr = nr > 4 ? 4 : nr;
- memcpy(ureg->immediate[i].value.u,
- &v[(i - index) * 4],
- ureg->immediate[i].nr * sizeof(uint));
- nr -= 4;
- }
-
- return ureg_src_register(TGSI_FILE_IMMEDIATE, index);
-}
-
-
-struct ureg_src
-ureg_DECL_immediate_int( struct ureg_program *ureg,
- const int *v,
- unsigned nr )
-{
- return decl_immediate(ureg, (const unsigned *)v, nr, TGSI_IMM_INT32);
-}
-
-
-void
-ureg_emit_src( struct ureg_program *ureg,
- struct ureg_src src )
-{
- unsigned size = 1 + (src.Indirect ? 1 : 0) +
- (src.Dimension ? (src.DimIndirect ? 2 : 1) : 0);
-
- union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
- unsigned n = 0;
-
- assert(src.File != TGSI_FILE_NULL);
- assert(src.File < TGSI_FILE_COUNT);
-
- out[n].value = 0;
- out[n].src.File = src.File;
- out[n].src.SwizzleX = src.SwizzleX;
- out[n].src.SwizzleY = src.SwizzleY;
- out[n].src.SwizzleZ = src.SwizzleZ;
- out[n].src.SwizzleW = src.SwizzleW;
- out[n].src.Index = src.Index;
- out[n].src.Negate = src.Negate;
- out[0].src.Absolute = src.Absolute;
- n++;
-
- if (src.Indirect) {
- out[0].src.Indirect = 1;
- out[n].value = 0;
- out[n].ind.File = src.IndirectFile;
- out[n].ind.Swizzle = src.IndirectSwizzle;
- out[n].ind.Index = src.IndirectIndex;
- out[n].ind.ArrayID = src.ArrayID;
- n++;
- }
-
- if (src.Dimension) {
- out[0].src.Dimension = 1;
- out[n].dim.Dimension = 0;
- out[n].dim.Padding = 0;
- if (src.DimIndirect) {
- out[n].dim.Indirect = 1;
- out[n].dim.Index = src.DimensionIndex;
- n++;
- out[n].value = 0;
- out[n].ind.File = src.DimIndFile;
- out[n].ind.Swizzle = src.DimIndSwizzle;
- out[n].ind.Index = src.DimIndIndex;
- out[n].ind.ArrayID = src.ArrayID;
- } else {
- out[n].dim.Indirect = 0;
- out[n].dim.Index = src.DimensionIndex;
- }
- n++;
- }
-
- assert(n == size);
-}
-
-
-void
-ureg_emit_dst( struct ureg_program *ureg,
- struct ureg_dst dst )
-{
- unsigned size = (1 +
- (dst.Indirect ? 1 : 0));
-
- union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
- unsigned n = 0;
-
- assert(dst.File != TGSI_FILE_NULL);
- assert(dst.File != TGSI_FILE_CONSTANT);
- assert(dst.File != TGSI_FILE_INPUT);
- assert(dst.File != TGSI_FILE_SAMPLER);
- assert(dst.File != TGSI_FILE_SAMPLER_VIEW);
- assert(dst.File != TGSI_FILE_IMMEDIATE);
- assert(dst.File < TGSI_FILE_COUNT);
-
- out[n].value = 0;
- out[n].dst.File = dst.File;
- out[n].dst.WriteMask = dst.WriteMask;
- out[n].dst.Indirect = dst.Indirect;
- out[n].dst.Index = dst.Index;
- n++;
-
- if (dst.Indirect) {
- out[n].value = 0;
- out[n].ind.File = dst.IndirectFile;
- out[n].ind.Swizzle = dst.IndirectSwizzle;
- out[n].ind.Index = dst.IndirectIndex;
- out[n].ind.ArrayID = dst.ArrayID;
- n++;
- }
-
- assert(n == size);
-}
-
-
-static void validate( unsigned opcode,
- unsigned nr_dst,
- unsigned nr_src )
-{
-#ifdef DEBUG
- const struct tgsi_opcode_info *info = tgsi_get_opcode_info( opcode );
- assert(info);
- if(info) {
- assert(nr_dst == info->num_dst);
- assert(nr_src == info->num_src);
- }
-#else
- (void)opcode;
- (void)nr_dst;
- (void)nr_src;
-#endif
-}
-
-struct ureg_emit_insn_result
-ureg_emit_insn(struct ureg_program *ureg,
- unsigned opcode,
- boolean saturate,
- unsigned precise,
- unsigned num_dst,
- unsigned num_src )
-{
- union tgsi_any_token *out;
- uint count = 1;
- struct ureg_emit_insn_result result;
-
- validate( opcode, num_dst, num_src );
-
- out = get_tokens( ureg, DOMAIN_INSN, count );
- out[0].insn = tgsi_default_instruction();
- out[0].insn.Opcode = opcode;
- out[0].insn.Saturate = saturate;
- out[0].insn.Precise = precise;
- out[0].insn.NumDstRegs = num_dst;
- out[0].insn.NumSrcRegs = num_src;
-
- result.insn_token = ureg->domain[DOMAIN_INSN].count - count;
- result.extended_token = result.insn_token;
-
- ureg->nr_instructions++;
-
- return result;
-}
-
-
-void
-ureg_emit_label(struct ureg_program *ureg,
- unsigned extended_token,
- unsigned *label_token )
-{
- union tgsi_any_token *out, *insn;
-
- if(!label_token)
- return;
-
- out = get_tokens( ureg, DOMAIN_INSN, 1 );
- out[0].value = 0;
-
- insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
- insn->insn.Label = 1;
-
- *label_token = ureg->domain[DOMAIN_INSN].count - 1;
-}
-
-/* Will return a number which can be used in a label to point to the
- * next instruction to be emitted.
- */
-unsigned
-ureg_get_instruction_number( struct ureg_program *ureg )
-{
- return ureg->nr_instructions;
-}
-
-/* Patch a given label (expressed as a token number) to point to a
- * given instruction (expressed as an instruction number).
- */
-void
-ureg_fixup_label(struct ureg_program *ureg,
- unsigned label_token,
- unsigned instruction_number )
-{
- union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, label_token );
-
- out->insn_label.Label = instruction_number;
-}
-
-
-void
-ureg_emit_texture(struct ureg_program *ureg,
- unsigned extended_token,
- unsigned target, unsigned num_offsets)
-{
- union tgsi_any_token *out, *insn;
-
- out = get_tokens( ureg, DOMAIN_INSN, 1 );
- insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
-
- insn->insn.Texture = 1;
-
- out[0].value = 0;
- out[0].insn_texture.Texture = target;
- out[0].insn_texture.NumOffsets = num_offsets;
-}
-
-void
-ureg_emit_texture_offset(struct ureg_program *ureg,
- const struct tgsi_texture_offset *offset)
-{
- union tgsi_any_token *out;
-
- out = get_tokens( ureg, DOMAIN_INSN, 1);
-
- out[0].value = 0;
- out[0].insn_texture_offset = *offset;
-
-}
-
-
-void
-ureg_fixup_insn_size(struct ureg_program *ureg,
- unsigned insn )
-{
- union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, insn );
-
- assert(out->insn.Type == TGSI_TOKEN_TYPE_INSTRUCTION);
- out->insn.NrTokens = ureg->domain[DOMAIN_INSN].count - insn - 1;
-}
-
-
-void
-ureg_insn(struct ureg_program *ureg,
- unsigned opcode,
- const struct ureg_dst *dst,
- unsigned nr_dst,
- const struct ureg_src *src,
- unsigned nr_src,
- unsigned precise )
-{
- struct ureg_emit_insn_result insn;
- unsigned i;
- boolean saturate;
-
- if (nr_dst && ureg_dst_is_empty(dst[0])) {
- return;
- }
-
- saturate = nr_dst ? dst[0].Saturate : FALSE;
-
- insn = ureg_emit_insn(ureg,
- opcode,
- saturate,
- precise,
- nr_dst,
- nr_src);
-
- for (i = 0; i < nr_dst; i++)
- ureg_emit_dst( ureg, dst[i] );
-
- for (i = 0; i < nr_src; i++)
- ureg_emit_src( ureg, src[i] );
-
- ureg_fixup_insn_size( ureg, insn.insn_token );
-}
-
-void
-ureg_tex_insn(struct ureg_program *ureg,
- unsigned opcode,
- const struct ureg_dst *dst,
- unsigned nr_dst,
- unsigned target,
- const struct tgsi_texture_offset *texoffsets,
- unsigned nr_offset,
- const struct ureg_src *src,
- unsigned nr_src )
-{
- struct ureg_emit_insn_result insn;
- unsigned i;
- boolean saturate;
-
- if (nr_dst && ureg_dst_is_empty(dst[0])) {
- return;
- }
-
- saturate = nr_dst ? dst[0].Saturate : FALSE;
-
- insn = ureg_emit_insn(ureg,
- opcode,
- saturate,
- 0,
- nr_dst,
- nr_src);
-
- ureg_emit_texture( ureg, insn.extended_token, target, nr_offset );
-
- for (i = 0; i < nr_offset; i++)
- ureg_emit_texture_offset( ureg, &texoffsets[i]);
-
- for (i = 0; i < nr_dst; i++)
- ureg_emit_dst( ureg, dst[i] );
-
- for (i = 0; i < nr_src; i++)
- ureg_emit_src( ureg, src[i] );
-
- ureg_fixup_insn_size( ureg, insn.insn_token );
-}
-
-
-void
-ureg_label_insn(struct ureg_program *ureg,
- unsigned opcode,
- const struct ureg_src *src,
- unsigned nr_src,
- unsigned *label_token )
-{
- struct ureg_emit_insn_result insn;
- unsigned i;
-
- insn = ureg_emit_insn(ureg,
- opcode,
- FALSE,
- 0,
- 0,
- nr_src);
-
- ureg_emit_label( ureg, insn.extended_token, label_token );
-
- for (i = 0; i < nr_src; i++)
- ureg_emit_src( ureg, src[i] );
-
- ureg_fixup_insn_size( ureg, insn.insn_token );
-}
-
-
-static void
-emit_decl_semantic(struct ureg_program *ureg,
- unsigned file,
- unsigned index,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned usage_mask)
-{
- union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 3;
- out[0].decl.File = file;
- out[0].decl.UsageMask = usage_mask;
- out[0].decl.Semantic = 1;
-
- out[1].value = 0;
- out[1].decl_range.First = index;
- out[1].decl_range.Last = index;
-
- out[2].value = 0;
- out[2].decl_semantic.Name = semantic_name;
- out[2].decl_semantic.Index = semantic_index;
-}
-
-
-static void
-emit_decl_fs(struct ureg_program *ureg,
- unsigned file,
- unsigned index,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned interpolate,
- unsigned cylindrical_wrap,
- unsigned interpolate_location)
-{
- union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 4);
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 4;
- out[0].decl.File = file;
- out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW; /* FIXME! */
- out[0].decl.Interpolate = 1;
- out[0].decl.Semantic = 1;
-
- out[1].value = 0;
- out[1].decl_range.First = index;
- out[1].decl_range.Last = index;
-
- out[2].value = 0;
- out[2].decl_interp.Interpolate = interpolate;
- out[2].decl_interp.CylindricalWrap = cylindrical_wrap;
- out[2].decl_interp.Location = interpolate_location;
-
- out[3].value = 0;
- out[3].decl_semantic.Name = semantic_name;
- out[3].decl_semantic.Index = semantic_index;
-}
-
-static void
-emit_decl_temps( struct ureg_program *ureg,
- unsigned first, unsigned last,
- boolean local,
- unsigned arrayid )
-{
- union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL,
- arrayid ? 3 : 2 );
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 2;
- out[0].decl.File = TGSI_FILE_TEMPORARY;
- out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
- out[0].decl.Local = local;
-
- out[1].value = 0;
- out[1].decl_range.First = first;
- out[1].decl_range.Last = last;
-
- if (arrayid) {
- out[0].decl.Array = 1;
- out[2].value = 0;
- out[2].array.ArrayID = arrayid;
- }
-}
-
-static void emit_decl_range( struct ureg_program *ureg,
- unsigned file,
- unsigned first,
- unsigned count )
-{
- union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 2;
- out[0].decl.File = file;
- out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
- out[0].decl.Semantic = 0;
-
- out[1].value = 0;
- out[1].decl_range.First = first;
- out[1].decl_range.Last = first + count - 1;
-}
-
-static void
-emit_decl_range2D(struct ureg_program *ureg,
- unsigned file,
- unsigned first,
- unsigned last,
- unsigned index2D)
-{
- union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 3;
- out[0].decl.File = file;
- out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
- out[0].decl.Dimension = 1;
-
- out[1].value = 0;
- out[1].decl_range.First = first;
- out[1].decl_range.Last = last;
-
- out[2].value = 0;
- out[2].decl_dim.Index2D = index2D;
-}
-
-static void
-emit_decl_sampler_view(struct ureg_program *ureg,
- unsigned index,
- unsigned target,
- unsigned return_type_x,
- unsigned return_type_y,
- unsigned return_type_z,
- unsigned return_type_w )
-{
- union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 3;
- out[0].decl.File = TGSI_FILE_SAMPLER_VIEW;
- out[0].decl.UsageMask = 0xf;
-
- out[1].value = 0;
- out[1].decl_range.First = index;
- out[1].decl_range.Last = index;
-
- out[2].value = 0;
- out[2].decl_sampler_view.Resource = target;
- out[2].decl_sampler_view.ReturnTypeX = return_type_x;
- out[2].decl_sampler_view.ReturnTypeY = return_type_y;
- out[2].decl_sampler_view.ReturnTypeZ = return_type_z;
- out[2].decl_sampler_view.ReturnTypeW = return_type_w;
-}
-
-static void
-emit_immediate( struct ureg_program *ureg,
- const unsigned *v,
- unsigned type )
-{
- union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 5 );
-
- out[0].value = 0;
- out[0].imm.Type = TGSI_TOKEN_TYPE_IMMEDIATE;
- out[0].imm.NrTokens = 5;
- out[0].imm.DataType = type;
- out[0].imm.Padding = 0;
-
- out[1].imm_data.Uint = v[0];
- out[2].imm_data.Uint = v[1];
- out[3].imm_data.Uint = v[2];
- out[4].imm_data.Uint = v[3];
-}
-
-static void
-emit_property(struct ureg_program *ureg,
- unsigned name,
- unsigned data)
-{
- union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
-
- out[0].value = 0;
- out[0].prop.Type = TGSI_TOKEN_TYPE_PROPERTY;
- out[0].prop.NrTokens = 2;
- out[0].prop.PropertyName = name;
-
- out[1].prop_data.Data = data;
-}
-
-static void
-emit_decl_atomic_2d(struct ureg_program *ureg,
- unsigned first,
- unsigned last,
- unsigned index2D,
- unsigned array_id)
-{
- union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, array_id ? 4 : 3);
-
- out[0].value = 0;
- out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
- out[0].decl.NrTokens = 3;
- out[0].decl.File = TGSI_FILE_HW_ATOMIC;
- out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
- out[0].decl.Dimension = 1;
- out[0].decl.Array = array_id != 0;
-
- out[1].value = 0;
- out[1].decl_range.First = first;
- out[1].decl_range.Last = last;
-
- out[2].value = 0;
- out[2].decl_dim.Index2D = index2D;
-
- if (array_id) {
- out[3].value = 0;
- out[3].array.ArrayID = array_id;
- }
-}
-
-static void emit_decls( struct ureg_program *ureg )
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
- if (ureg->properties[i] != ~0u)
- emit_property(ureg, i, ureg->properties[i]);
-
- if (ureg->processor == TGSI_PROCESSOR_VERTEX) {
- for (i = 0; i < UREG_MAX_INPUT; i++) {
- if (ureg->vs_inputs[i/32] & (1 << (i%32))) {
- emit_decl_range( ureg, TGSI_FILE_INPUT, i, 1 );
- }
- }
- } else if (ureg->processor == TGSI_PROCESSOR_FRAGMENT) {
- for (i = 0; i < ureg->nr_fs_inputs; i++) {
- emit_decl_fs(ureg,
- TGSI_FILE_INPUT,
- i,
- ureg->fs_input[i].semantic_name,
- ureg->fs_input[i].semantic_index,
- ureg->fs_input[i].interp,
- ureg->fs_input[i].cylindrical_wrap,
- ureg->fs_input[i].interp_location);
- }
- } else {
- for (i = 0; i < ureg->nr_gs_inputs; i++) {
- emit_decl_semantic(ureg,
- TGSI_FILE_INPUT,
- ureg->gs_input[i].index,
- ureg->gs_input[i].semantic_name,
- ureg->gs_input[i].semantic_index,
- TGSI_WRITEMASK_XYZW);
- }
- }
-
- for (i = 0; i < ureg->nr_system_values; i++) {
- emit_decl_semantic(ureg,
- TGSI_FILE_SYSTEM_VALUE,
- ureg->system_value[i].index,
- ureg->system_value[i].semantic_name,
- ureg->system_value[i].semantic_index,
- TGSI_WRITEMASK_XYZW);
- }
-
- for (i = 0; i < ureg->nr_outputs; i++) {
- emit_decl_semantic(ureg,
- TGSI_FILE_OUTPUT,
- i,
- ureg->output[i].semantic_name,
- ureg->output[i].semantic_index,
- ureg->output[i].usage_mask);
- }
-
- for (i = 0; i < ureg->nr_samplers; i++) {
- emit_decl_range( ureg,
- TGSI_FILE_SAMPLER,
- ureg->sampler[i].Index, 1 );
- }
-
- for (i = 0; i < ureg->nr_sampler_views; i++) {
- emit_decl_sampler_view(ureg,
- ureg->sampler_view[i].index,
- ureg->sampler_view[i].target,
- ureg->sampler_view[i].return_type_x,
- ureg->sampler_view[i].return_type_y,
- ureg->sampler_view[i].return_type_z,
- ureg->sampler_view[i].return_type_w);
- }
-
- if (ureg->const_decls.nr_constant_ranges) {
- for (i = 0; i < ureg->const_decls.nr_constant_ranges; i++) {
- emit_decl_range(ureg,
- TGSI_FILE_CONSTANT,
- ureg->const_decls.constant_range[i].first,
- ureg->const_decls.constant_range[i].last - ureg->const_decls.constant_range[i].first + 1);
- }
- }
-
- for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
- struct const_decl *decl = &ureg->const_decls2D[i];
-
- if (decl->nr_constant_ranges) {
- uint j;
-
- for (j = 0; j < decl->nr_constant_ranges; j++) {
- emit_decl_range2D(ureg,
- TGSI_FILE_CONSTANT,
- decl->constant_range[j].first,
- decl->constant_range[j].last,
- i);
- }
- }
- }
-
- for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
- struct hw_atomic_decl *decl = &ureg->hw_atomic_decls[i];
-
- if (decl->nr_hw_atomic_ranges) {
- uint j;
-
- for (j = 0; j < decl->nr_hw_atomic_ranges; j++) {
- emit_decl_atomic_2d(ureg,
- decl->hw_atomic_range[j].first,
- decl->hw_atomic_range[j].last,
- i,
- decl->hw_atomic_range[j].array_id);
- }
- }
- }
-
- if (ureg->nr_temps) {
- unsigned array = 0;
- for (i = 0; i < ureg->nr_temps;) {
- boolean local = util_bitmask_get(ureg->local_temps, i);
- unsigned first = i;
- i = util_bitmask_get_next_index(ureg->decl_temps, i + 1);
- if (i == UTIL_BITMASK_INVALID_INDEX)
- i = ureg->nr_temps;
-
- if (array < ureg->nr_array_temps && ureg->array_temps[array] == first)
- emit_decl_temps( ureg, first, i - 1, local, ++array );
- else
- emit_decl_temps( ureg, first, i - 1, local, 0 );
- }
- }
-
- if (ureg->nr_addrs) {
- emit_decl_range( ureg,
- TGSI_FILE_ADDRESS,
- 0, ureg->nr_addrs );
- }
-
- if (ureg->nr_preds) {
- emit_decl_range(ureg,
- TGSI_FILE_PREDICATE,
- 0,
- ureg->nr_preds);
- }
-
- for (i = 0; i < ureg->nr_immediates; i++) {
- emit_immediate( ureg,
- ureg->immediate[i].value.u,
- ureg->immediate[i].type );
- }
-}
-
-/* Append the instruction tokens onto the declarations to build a
- * contiguous stream suitable to send to the driver.
- */
-static void copy_instructions( struct ureg_program *ureg )
-{
- unsigned nr_tokens = ureg->domain[DOMAIN_INSN].count;
- union tgsi_any_token *out = get_tokens( ureg,
- DOMAIN_DECL,
- nr_tokens );
-
- memcpy(out,
- ureg->domain[DOMAIN_INSN].tokens,
- nr_tokens * sizeof out[0] );
-}
-
-static void
-fixup_header_size(struct ureg_program *ureg)
-{
- union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_DECL, 0 );
-
- out->header.BodySize = ureg->domain[DOMAIN_DECL].count - 2;
-}
-
-
-static void
-emit_header( struct ureg_program *ureg )
-{
- union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
-
- out[0].header.HeaderSize = 2;
- out[0].header.BodySize = 0;
-
- out[1].processor.Processor = ureg->processor;
- out[1].processor.Padding = 0;
-}
-
-
-const struct tgsi_token *ureg_finalize( struct ureg_program *ureg )
-{
- const struct tgsi_token *tokens;
-
- emit_header( ureg );
- emit_decls( ureg );
- copy_instructions( ureg );
- fixup_header_size( ureg );
-
- if (ureg->domain[0].tokens == error_tokens ||
- ureg->domain[1].tokens == error_tokens) {
- debug_printf("%s: error in generated shader\n", __FUNCTION__);
- assert(0);
- return NULL;
- }
-
- tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
-
- if (0) {
- debug_printf("%s: emitted shader %d tokens:\n", __FUNCTION__,
- ureg->domain[DOMAIN_DECL].count);
- tgsi_dump( tokens, 0 );
- }
-
-#if DEBUG
- if (tokens && !tgsi_sanity_check(tokens)) {
- debug_printf("tgsi_ureg.c, sanity check failed on generated tokens:\n");
- tgsi_dump(tokens, 0);
- assert(0);
- }
-#endif
-
-
- return tokens;
-}
-
-
-void *ureg_create_shader( struct ureg_program *ureg,
- struct pipe_context *pipe,
- const struct pipe_stream_output_info *so )
-{
- struct pipe_shader_state state;
-
- state.tokens = ureg_finalize(ureg);
- if(!state.tokens)
- return NULL;
-
- if (so)
- state.stream_output = *so;
- else
- memset(&state.stream_output, 0, sizeof(state.stream_output));
-
- if (ureg->processor == TGSI_PROCESSOR_VERTEX)
- return pipe->create_vs_state( pipe, &state );
- else
- return pipe->create_fs_state( pipe, &state );
-}
-
-
-const struct tgsi_token *ureg_get_tokens( struct ureg_program *ureg,
- unsigned *nr_tokens )
-{
- const struct tgsi_token *tokens;
-
- ureg_finalize(ureg);
-
- tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
-
- if (nr_tokens)
- *nr_tokens = ureg->domain[DOMAIN_DECL].size;
-
- ureg->domain[DOMAIN_DECL].tokens = 0;
- ureg->domain[DOMAIN_DECL].size = 0;
- ureg->domain[DOMAIN_DECL].order = 0;
- ureg->domain[DOMAIN_DECL].count = 0;
-
- return tokens;
-}
-
-
-void ureg_free_tokens( const struct tgsi_token *tokens )
-{
- FREE((struct tgsi_token *)tokens);
-}
-
-
-struct ureg_program *ureg_create( unsigned processor )
-{
- unsigned i;
- struct ureg_program *ureg = CALLOC_STRUCT( ureg_program );
- if (ureg == NULL)
- goto no_ureg;
-
- ureg->processor = processor;
-
- for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
- ureg->properties[i] = ~0;
-
- ureg->free_temps = util_bitmask_create();
- if (ureg->free_temps == NULL)
- goto no_free_temps;
-
- ureg->local_temps = util_bitmask_create();
- if (ureg->local_temps == NULL)
- goto no_local_temps;
-
- ureg->decl_temps = util_bitmask_create();
- if (ureg->decl_temps == NULL)
- goto no_decl_temps;
-
- return ureg;
-
-no_decl_temps:
- util_bitmask_destroy(ureg->local_temps);
-no_local_temps:
- util_bitmask_destroy(ureg->free_temps);
-no_free_temps:
- FREE(ureg);
-no_ureg:
- return NULL;
-}
-
-
-unsigned
-ureg_get_nr_outputs( const struct ureg_program *ureg )
-{
- if (!ureg)
- return 0;
- return ureg->nr_outputs;
-}
-
-
-void ureg_destroy( struct ureg_program *ureg )
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(ureg->domain); i++) {
- if (ureg->domain[i].tokens &&
- ureg->domain[i].tokens != error_tokens)
- FREE(ureg->domain[i].tokens);
- }
-
- util_bitmask_destroy(ureg->free_temps);
- util_bitmask_destroy(ureg->local_temps);
- util_bitmask_destroy(ureg->decl_temps);
-
- FREE(ureg);
-}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.h b/src/gallium/auxiliary/tgsi/tgsi_ureg.h
deleted file mode 100644
index a68af19b..00000000
--- a/src/gallium/auxiliary/tgsi/tgsi_ureg.h
+++ /dev/null
@@ -1,1206 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE, INC AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TGSI_UREG_H
-#define TGSI_UREG_H
-
-#include "pipe/p_compiler.h"
-#include "pipe/p_shader_tokens.h"
-#include "util/u_debug.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct ureg_program;
-struct pipe_stream_output_info;
-
-/* Almost a tgsi_src_register, but we need to pull in the Absolute
- * flag from the _ext token. Indirect flag always implies ADDR[0].
- */
-struct ureg_src
-{
- unsigned File : 4; /* TGSI_FILE_ */
- unsigned SwizzleX : 2; /* TGSI_SWIZZLE_ */
- unsigned SwizzleY : 2; /* TGSI_SWIZZLE_ */
- unsigned SwizzleZ : 2; /* TGSI_SWIZZLE_ */
- unsigned SwizzleW : 2; /* TGSI_SWIZZLE_ */
- unsigned Indirect : 1; /* BOOL */
- unsigned DimIndirect : 1; /* BOOL */
- unsigned Dimension : 1; /* BOOL */
- unsigned Absolute : 1; /* BOOL */
- unsigned Negate : 1; /* BOOL */
- unsigned IndirectFile : 4; /* TGSI_FILE_ */
- unsigned IndirectSwizzle : 2; /* TGSI_SWIZZLE_ */
- unsigned DimIndFile : 4; /* TGSI_FILE_ */
- unsigned DimIndSwizzle : 2; /* TGSI_SWIZZLE_ */
- int Index : 16; /* SINT */
- int IndirectIndex : 16; /* SINT */
- int DimensionIndex : 16; /* SINT */
- int DimIndIndex : 16; /* SINT */
- unsigned ArrayID : 10; /* UINT */
-};
-
-/* Very similar to a tgsi_dst_register, removing unsupported fields
- * and adding a Saturate flag. It's easier to push saturate into the
- * destination register than to try and create a _SAT variant of each
- * instruction function.
- */
-struct ureg_dst
-{
- unsigned File : 4; /* TGSI_FILE_ */
- unsigned WriteMask : 4; /* TGSI_WRITEMASK_ */
- unsigned Indirect : 1; /* BOOL */
- unsigned Saturate : 1; /* BOOL */
- unsigned Predicate : 1;
- unsigned PredNegate : 1; /* BOOL */
- unsigned PredSwizzleX : 2; /* TGSI_SWIZZLE_ */
- unsigned PredSwizzleY : 2; /* TGSI_SWIZZLE_ */
- unsigned PredSwizzleZ : 2; /* TGSI_SWIZZLE_ */
- unsigned PredSwizzleW : 2; /* TGSI_SWIZZLE_ */
- int Index : 16; /* SINT */
- int IndirectIndex : 16; /* SINT */
- unsigned IndirectFile : 4; /* TGSI_FILE_ */
- int IndirectSwizzle : 2; /* TGSI_SWIZZLE_ */
- unsigned ArrayID : 10; /* UINT */
-};
-
-struct pipe_context;
-
-struct ureg_program *
-ureg_create( unsigned processor );
-
-const struct tgsi_token *
-ureg_finalize( struct ureg_program * );
-
-/* Create and return a shader:
- */
-void *
-ureg_create_shader( struct ureg_program *,
- struct pipe_context *pipe,
- const struct pipe_stream_output_info *so );
-
-
-/* Alternately, return the built token stream and hand ownership of
- * that memory to the caller:
- */
-const struct tgsi_token *
-ureg_get_tokens( struct ureg_program *ureg,
- unsigned *nr_tokens );
-
-/*
- * Returns the number of currently declared outputs.
- */
-unsigned
-ureg_get_nr_outputs( const struct ureg_program *ureg );
-
-
-/* Free the tokens created by ureg_get_tokens() */
-void ureg_free_tokens( const struct tgsi_token *tokens );
-
-
-void
-ureg_destroy( struct ureg_program * );
-
-
-/***********************************************************************
- * Convenience routine:
- */
-static inline void *
-ureg_create_shader_with_so_and_destroy( struct ureg_program *p,
- struct pipe_context *pipe,
- const struct pipe_stream_output_info *so )
-{
- void *result = ureg_create_shader( p, pipe, so );
- ureg_destroy( p );
- return result;
-}
-
-static inline void *
-ureg_create_shader_and_destroy( struct ureg_program *p,
- struct pipe_context *pipe )
-{
- return ureg_create_shader_with_so_and_destroy(p, pipe, NULL);
-}
-
-
-/***********************************************************************
- * Build shader properties:
- */
-
-void
-ureg_property(struct ureg_program *ureg, unsigned name, unsigned value);
-
-
-/***********************************************************************
- * Build shader declarations:
- */
-
-struct ureg_src
-ureg_DECL_fs_input_cyl_centroid(struct ureg_program *,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned interp_mode,
- unsigned cylindrical_wrap,
- unsigned interp_location);
-
-static inline struct ureg_src
-ureg_DECL_fs_input_cyl(struct ureg_program *ureg,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned interp_mode,
- unsigned cylindrical_wrap)
-{
- return ureg_DECL_fs_input_cyl_centroid(ureg,
- semantic_name,
- semantic_index,
- interp_mode,
- cylindrical_wrap,
- 0);
-}
-
-static inline struct ureg_src
-ureg_DECL_fs_input(struct ureg_program *ureg,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned interp_mode)
-{
- return ureg_DECL_fs_input_cyl_centroid(ureg,
- semantic_name,
- semantic_index,
- interp_mode,
- 0, 0);
-}
-
-struct ureg_src
-ureg_DECL_vs_input( struct ureg_program *,
- unsigned index );
-
-struct ureg_src
-ureg_DECL_gs_input(struct ureg_program *,
- unsigned index,
- unsigned semantic_name,
- unsigned semantic_index);
-
-struct ureg_src
-ureg_DECL_system_value(struct ureg_program *,
- unsigned index,
- unsigned semantic_name,
- unsigned semantic_index);
-
-struct ureg_dst
-ureg_DECL_output_masked( struct ureg_program *,
- unsigned semantic_name,
- unsigned semantic_index,
- unsigned usage_mask );
-
-struct ureg_dst
-ureg_DECL_output( struct ureg_program *,
- unsigned semantic_name,
- unsigned semantic_index );
-
-struct ureg_src
-ureg_DECL_immediate( struct ureg_program *,
- const float *v,
- unsigned nr );
-
-struct ureg_src
-ureg_DECL_immediate_uint( struct ureg_program *,
- const unsigned *v,
- unsigned nr );
-
-struct ureg_src
-ureg_DECL_immediate_block_uint( struct ureg_program *,
- const unsigned *v,
- unsigned nr );
-
-struct ureg_src
-ureg_DECL_immediate_int( struct ureg_program *,
- const int *v,
- unsigned nr );
-
-void
-ureg_DECL_constant2D(struct ureg_program *ureg,
- unsigned first,
- unsigned last,
- unsigned index2D);
-
-struct ureg_src
-ureg_DECL_constant( struct ureg_program *,
- unsigned index );
-
-void
-ureg_DECL_hw_atomic(struct ureg_program *ureg,
- unsigned first,
- unsigned last,
- unsigned buffer_id,
- unsigned array_id);
-
-struct ureg_dst
-ureg_DECL_temporary( struct ureg_program * );
-
-/**
- * Emit a temporary with the LOCAL declaration flag set. For use when
- * the register value is not required to be preserved across
- * subroutine boundaries.
- */
-struct ureg_dst
-ureg_DECL_local_temporary( struct ureg_program * );
-
-/**
- * Declare "size" continuous temporary registers.
- */
-struct ureg_dst
-ureg_DECL_array_temporary( struct ureg_program *,
- unsigned size,
- boolean local );
-
-void
-ureg_release_temporary( struct ureg_program *ureg,
- struct ureg_dst tmp );
-
-struct ureg_dst
-ureg_DECL_address( struct ureg_program * );
-
-struct ureg_dst
-ureg_DECL_predicate(struct ureg_program *);
-
-/* Supply an index to the sampler declaration as this is the hook to
- * the external pipe_sampler state. Users of this function probably
- * don't want just any sampler, but a specific one which they've set
- * up state for in the context.
- */
-struct ureg_src
-ureg_DECL_sampler(struct ureg_program *,
- int index );
-
-struct ureg_src
-ureg_DECL_sampler_view(struct ureg_program *,
- unsigned index,
- unsigned target,
- unsigned return_type_x,
- unsigned return_type_y,
- unsigned return_type_z,
- unsigned return_type_w );
-
-
-static inline struct ureg_src
-ureg_imm4f( struct ureg_program *ureg,
- float a, float b,
- float c, float d)
-{
- float v[4];
- v[0] = a;
- v[1] = b;
- v[2] = c;
- v[3] = d;
- return ureg_DECL_immediate( ureg, v, 4 );
-}
-
-static inline struct ureg_src
-ureg_imm3f( struct ureg_program *ureg,
- float a, float b,
- float c)
-{
- float v[3];
- v[0] = a;
- v[1] = b;
- v[2] = c;
- return ureg_DECL_immediate( ureg, v, 3 );
-}
-
-static inline struct ureg_src
-ureg_imm2f( struct ureg_program *ureg,
- float a, float b)
-{
- float v[2];
- v[0] = a;
- v[1] = b;
- return ureg_DECL_immediate( ureg, v, 2 );
-}
-
-static inline struct ureg_src
-ureg_imm1f( struct ureg_program *ureg,
- float a)
-{
- float v[1];
- v[0] = a;
- return ureg_DECL_immediate( ureg, v, 1 );
-}
-
-static inline struct ureg_src
-ureg_imm4u( struct ureg_program *ureg,
- unsigned a, unsigned b,
- unsigned c, unsigned d)
-{
- unsigned v[4];
- v[0] = a;
- v[1] = b;
- v[2] = c;
- v[3] = d;
- return ureg_DECL_immediate_uint( ureg, v, 4 );
-}
-
-static inline struct ureg_src
-ureg_imm3u( struct ureg_program *ureg,
- unsigned a, unsigned b,
- unsigned c)
-{
- unsigned v[3];
- v[0] = a;
- v[1] = b;
- v[2] = c;
- return ureg_DECL_immediate_uint( ureg, v, 3 );
-}
-
-static inline struct ureg_src
-ureg_imm2u( struct ureg_program *ureg,
- unsigned a, unsigned b)
-{
- unsigned v[2];
- v[0] = a;
- v[1] = b;
- return ureg_DECL_immediate_uint( ureg, v, 2 );
-}
-
-static inline struct ureg_src
-ureg_imm1u( struct ureg_program *ureg,
- unsigned a)
-{
- return ureg_DECL_immediate_uint( ureg, &a, 1 );
-}
-
-static inline struct ureg_src
-ureg_imm4i( struct ureg_program *ureg,
- int a, int b,
- int c, int d)
-{
- int v[4];
- v[0] = a;
- v[1] = b;
- v[2] = c;
- v[3] = d;
- return ureg_DECL_immediate_int( ureg, v, 4 );
-}
-
-static inline struct ureg_src
-ureg_imm3i( struct ureg_program *ureg,
- int a, int b,
- int c)
-{
- int v[3];
- v[0] = a;
- v[1] = b;
- v[2] = c;
- return ureg_DECL_immediate_int( ureg, v, 3 );
-}
-
-static inline struct ureg_src
-ureg_imm2i( struct ureg_program *ureg,
- int a, int b)
-{
- int v[2];
- v[0] = a;
- v[1] = b;
- return ureg_DECL_immediate_int( ureg, v, 2 );
-}
-
-static inline struct ureg_src
-ureg_imm1i( struct ureg_program *ureg,
- int a)
-{
- return ureg_DECL_immediate_int( ureg, &a, 1 );
-}
-
-/* Where the destination register has a valid file, but an empty
- * writemask.
- */
-static inline boolean
-ureg_dst_is_empty( struct ureg_dst dst )
-{
- return dst.File != TGSI_FILE_NULL &&
- dst.WriteMask == 0;
-}
-
-/***********************************************************************
- * Functions for patching up labels
- */
-
-
-/* Will return a number which can be used in a label to point to the
- * next instruction to be emitted.
- */
-unsigned
-ureg_get_instruction_number( struct ureg_program *ureg );
-
-
-/* Patch a given label (expressed as a token number) to point to a
- * given instruction (expressed as an instruction number).
- *
- * Labels are obtained from instruction emitters, eg ureg_CAL().
- * Instruction numbers are obtained from ureg_get_instruction_number(),
- * above.
- */
-void
-ureg_fixup_label(struct ureg_program *ureg,
- unsigned label_token,
- unsigned instruction_number );
-
-
-/* Generic instruction emitter. Use if you need to pass the opcode as
- * a parameter, rather than using the emit_OP() variants below.
- */
-void
-ureg_insn(struct ureg_program *ureg,
- unsigned opcode,
- const struct ureg_dst *dst,
- unsigned nr_dst,
- const struct ureg_src *src,
- unsigned nr_src,
- unsigned precise );
-
-
-void
-ureg_tex_insn(struct ureg_program *ureg,
- unsigned opcode,
- const struct ureg_dst *dst,
- unsigned nr_dst,
- unsigned target,
- const struct tgsi_texture_offset *texoffsets,
- unsigned nr_offset,
- const struct ureg_src *src,
- unsigned nr_src );
-
-
-void
-ureg_label_insn(struct ureg_program *ureg,
- unsigned opcode,
- const struct ureg_src *src,
- unsigned nr_src,
- unsigned *label);
-
-
-/***********************************************************************
- * Internal instruction helpers, don't call these directly:
- */
-
-struct ureg_emit_insn_result {
- unsigned insn_token; /*< Used to fixup insn size. */
- unsigned extended_token; /*< Used to set the Extended bit, usually the same as insn_token. */
-};
-
-struct ureg_emit_insn_result
-ureg_emit_insn(struct ureg_program *ureg,
- unsigned opcode,
- boolean saturate,
- unsigned precise,
- unsigned num_dst,
- unsigned num_src );
-
-void
-ureg_emit_label(struct ureg_program *ureg,
- unsigned insn_token,
- unsigned *label_token );
-
-void
-ureg_emit_texture(struct ureg_program *ureg,
- unsigned insn_token,
- unsigned target, unsigned num_offsets);
-
-void
-ureg_emit_texture_offset(struct ureg_program *ureg,
- const struct tgsi_texture_offset *offset);
-
-void
-ureg_emit_dst( struct ureg_program *ureg,
- struct ureg_dst dst );
-
-void
-ureg_emit_src( struct ureg_program *ureg,
- struct ureg_src src );
-
-void
-ureg_fixup_insn_size(struct ureg_program *ureg,
- unsigned insn );
-
-
-#define OP00( op ) \
-static inline void ureg_##op( struct ureg_program *ureg ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- FALSE, \
- 0, \
- 0, \
- 0); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP01( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_src src ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- FALSE, \
- 0, \
- 0, \
- 1); \
- ureg_emit_src( ureg, src ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP00_LBL( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- unsigned *label_token ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- FALSE, \
- 0, \
- 0, \
- 0); \
- ureg_emit_label( ureg, insn.extended_token, label_token ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP01_LBL( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_src src, \
- unsigned *label_token ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- FALSE, \
- 0, \
- 0, \
- 1); \
- ureg_emit_label( ureg, insn.extended_token, label_token ); \
- ureg_emit_src( ureg, src ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP10( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 0); \
- ureg_emit_dst( ureg, dst ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-
-#define OP11( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 1); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP12( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 2); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP12_TEX( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- unsigned target, \
- struct ureg_src src0, \
- struct ureg_src src1 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 2); \
- ureg_emit_texture( ureg, insn.extended_token, target, 0 ); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP12_SAMPLE( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- unsigned target = TGSI_TEXTURE_UNKNOWN; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 2); \
- ureg_emit_texture( ureg, insn.extended_token, target, 0 ); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP13( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 3); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP13_SAMPLE( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- unsigned target = TGSI_TEXTURE_UNKNOWN; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 3); \
- ureg_emit_texture( ureg, insn.extended_token, target, 0 ); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP14_TEX( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- unsigned target, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2, \
- struct ureg_src src3 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 4); \
- ureg_emit_texture( ureg, insn.extended_token, target, 0 ); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_emit_src( ureg, src3 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP14_SAMPLE( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2, \
- struct ureg_src src3 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- unsigned target = TGSI_TEXTURE_UNKNOWN; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 4); \
- ureg_emit_texture( ureg, insn.extended_token, target, 0 ); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_emit_src( ureg, src3 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-
-#define OP14( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2, \
- struct ureg_src src3 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 4); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_emit_src( ureg, src3 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-
-#define OP15( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2, \
- struct ureg_src src3, \
- struct ureg_src src4 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 5); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_emit_src( ureg, src3 ); \
- ureg_emit_src( ureg, src4 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-#define OP15_SAMPLE( op ) \
-static inline void ureg_##op( struct ureg_program *ureg, \
- struct ureg_dst dst, \
- struct ureg_src src0, \
- struct ureg_src src1, \
- struct ureg_src src2, \
- struct ureg_src src3, \
- struct ureg_src src4 ) \
-{ \
- unsigned opcode = TGSI_OPCODE_##op; \
- unsigned target = TGSI_TEXTURE_UNKNOWN; \
- struct ureg_emit_insn_result insn; \
- if (ureg_dst_is_empty(dst)) \
- return; \
- insn = ureg_emit_insn(ureg, \
- opcode, \
- dst.Saturate, \
- 0, \
- 1, \
- 5); \
- ureg_emit_texture( ureg, insn.extended_token, target, 0 ); \
- ureg_emit_dst( ureg, dst ); \
- ureg_emit_src( ureg, src0 ); \
- ureg_emit_src( ureg, src1 ); \
- ureg_emit_src( ureg, src2 ); \
- ureg_emit_src( ureg, src3 ); \
- ureg_emit_src( ureg, src4 ); \
- ureg_fixup_insn_size( ureg, insn.insn_token ); \
-}
-
-/* Use a template include to generate a correctly-typed ureg_OP()
- * function for each TGSI opcode:
- */
-#include "tgsi_opcode_tmp.h"
-
-
-/***********************************************************************
- * Inline helpers for manipulating register structs:
- */
-static inline struct ureg_src
-ureg_negate( struct ureg_src reg )
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.Negate ^= 1;
- return reg;
-}
-
-static inline struct ureg_src
-ureg_abs( struct ureg_src reg )
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.Absolute = 1;
- reg.Negate = 0;
- return reg;
-}
-
-static inline struct ureg_src
-ureg_swizzle( struct ureg_src reg,
- int x, int y, int z, int w )
-{
- unsigned swz = ( (reg.SwizzleX << 0) |
- (reg.SwizzleY << 2) |
- (reg.SwizzleZ << 4) |
- (reg.SwizzleW << 6));
-
- assert(reg.File != TGSI_FILE_NULL);
- assert(x < 4);
- assert(y < 4);
- assert(z < 4);
- assert(w < 4);
-
- reg.SwizzleX = (swz >> (x*2)) & 0x3;
- reg.SwizzleY = (swz >> (y*2)) & 0x3;
- reg.SwizzleZ = (swz >> (z*2)) & 0x3;
- reg.SwizzleW = (swz >> (w*2)) & 0x3;
- return reg;
-}
-
-static inline struct ureg_src
-ureg_scalar( struct ureg_src reg, int x )
-{
- return ureg_swizzle(reg, x, x, x, x);
-}
-
-static inline struct ureg_dst
-ureg_writemask( struct ureg_dst reg,
- unsigned writemask )
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.WriteMask &= writemask;
- return reg;
-}
-
-static inline struct ureg_dst
-ureg_saturate( struct ureg_dst reg )
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.Saturate = 1;
- return reg;
-}
-
-static inline struct ureg_dst
-ureg_predicate(struct ureg_dst reg,
- boolean negate,
- unsigned swizzle_x,
- unsigned swizzle_y,
- unsigned swizzle_z,
- unsigned swizzle_w)
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.Predicate = 1;
- reg.PredNegate = negate;
- reg.PredSwizzleX = swizzle_x;
- reg.PredSwizzleY = swizzle_y;
- reg.PredSwizzleZ = swizzle_z;
- reg.PredSwizzleW = swizzle_w;
- return reg;
-}
-
-static inline struct ureg_dst
-ureg_dst_indirect( struct ureg_dst reg, struct ureg_src addr )
-{
- assert(reg.File != TGSI_FILE_NULL);
- assert(addr.File == TGSI_FILE_ADDRESS || addr.File == TGSI_FILE_TEMPORARY);
- reg.Indirect = 1;
- reg.IndirectFile = addr.File;
- reg.IndirectIndex = addr.Index;
- reg.IndirectSwizzle = addr.SwizzleX;
- return reg;
-}
-
-static inline struct ureg_src
-ureg_src_indirect( struct ureg_src reg, struct ureg_src addr )
-{
- assert(reg.File != TGSI_FILE_NULL);
- assert(addr.File == TGSI_FILE_ADDRESS || addr.File == TGSI_FILE_TEMPORARY);
- reg.Indirect = 1;
- reg.IndirectFile = addr.File;
- reg.IndirectIndex = addr.Index;
- reg.IndirectSwizzle = addr.SwizzleX;
- return reg;
-}
-
-static inline struct ureg_src
-ureg_src_dimension( struct ureg_src reg, int index )
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.Dimension = 1;
- reg.DimIndirect = 0;
- reg.DimensionIndex = index;
- return reg;
-}
-
-
-static inline struct ureg_src
-ureg_src_dimension_indirect( struct ureg_src reg, struct ureg_src addr,
- int index )
-{
- assert(reg.File != TGSI_FILE_NULL);
- reg.Dimension = 1;
- reg.DimIndirect = 1;
- reg.DimensionIndex = index;
- reg.DimIndFile = addr.File;
- reg.DimIndIndex = addr.Index;
- reg.DimIndSwizzle = addr.SwizzleX;
- return reg;
-}
-
-static inline struct ureg_dst
-ureg_dst_array_offset( struct ureg_dst reg, int offset )
-{
- assert(reg.File == TGSI_FILE_TEMPORARY);
- reg.Index += offset;
- return reg;
-}
-
-static inline struct ureg_dst
-ureg_dst( struct ureg_src src )
-{
- struct ureg_dst dst;
-
- assert(!src.Indirect ||
- (src.IndirectFile == TGSI_FILE_ADDRESS ||
- src.IndirectFile == TGSI_FILE_TEMPORARY));
-
- dst.File = src.File;
- dst.WriteMask = TGSI_WRITEMASK_XYZW;
- dst.IndirectFile = src.IndirectFile;
- dst.Indirect = src.Indirect;
- dst.IndirectIndex = src.IndirectIndex;
- dst.IndirectSwizzle = src.IndirectSwizzle;
- dst.Saturate = 0;
- dst.Index = src.Index;
- dst.ArrayID = src.ArrayID;
-
- return dst;
-}
-
-static inline struct ureg_src
-ureg_src_register(unsigned file,
- unsigned index)
-{
- struct ureg_src src;
-
- src.File = file;
- src.SwizzleX = TGSI_SWIZZLE_X;
- src.SwizzleY = TGSI_SWIZZLE_Y;
- src.SwizzleZ = TGSI_SWIZZLE_Z;
- src.SwizzleW = TGSI_SWIZZLE_W;
- src.Indirect = 0;
- src.IndirectFile = TGSI_FILE_NULL;
- src.IndirectIndex = 0;
- src.IndirectSwizzle = 0;
- src.Absolute = 0;
- src.Index = index;
- src.Negate = 0;
- src.Dimension = 0;
- src.DimensionIndex = 0;
- src.DimIndirect = 0;
- src.DimIndFile = TGSI_FILE_NULL;
- src.DimIndIndex = 0;
- src.DimIndSwizzle = 0;
- src.ArrayID = 0;
-
- return src;
-}
-
-static inline struct ureg_src
-ureg_src( struct ureg_dst dst )
-{
- struct ureg_src src;
-
- src.File = dst.File;
- src.SwizzleX = TGSI_SWIZZLE_X;
- src.SwizzleY = TGSI_SWIZZLE_Y;
- src.SwizzleZ = TGSI_SWIZZLE_Z;
- src.SwizzleW = TGSI_SWIZZLE_W;
- src.Indirect = dst.Indirect;
- src.IndirectFile = dst.IndirectFile;
- src.IndirectIndex = dst.IndirectIndex;
- src.IndirectSwizzle = dst.IndirectSwizzle;
- src.Absolute = 0;
- src.Index = dst.Index;
- src.Negate = 0;
- src.Dimension = 0;
- src.DimensionIndex = 0;
- src.DimIndirect = 0;
- src.DimIndFile = TGSI_FILE_NULL;
- src.DimIndIndex = 0;
- src.DimIndSwizzle = 0;
- src.ArrayID = dst.ArrayID;
-
- return src;
-}
-
-
-
-static inline struct ureg_dst
-ureg_dst_undef( void )
-{
- struct ureg_dst dst;
-
- dst.File = TGSI_FILE_NULL;
- dst.WriteMask = 0;
- dst.Indirect = 0;
- dst.IndirectFile = TGSI_FILE_NULL;
- dst.IndirectIndex = 0;
- dst.IndirectSwizzle = 0;
- dst.Saturate = 0;
- dst.Index = 0;
- dst.ArrayID = 0;
-
- return dst;
-}
-
-static inline struct ureg_src
-ureg_src_undef( void )
-{
- struct ureg_src src;
-
- src.File = TGSI_FILE_NULL;
- src.SwizzleX = 0;
- src.SwizzleY = 0;
- src.SwizzleZ = 0;
- src.SwizzleW = 0;
- src.Indirect = 0;
- src.IndirectFile = TGSI_FILE_NULL;
- src.IndirectIndex = 0;
- src.IndirectSwizzle = 0;
- src.Absolute = 0;
- src.Index = 0;
- src.Negate = 0;
- src.Dimension = 0;
- src.DimensionIndex = 0;
- src.DimIndirect = 0;
- src.DimIndFile = TGSI_FILE_NULL;
- src.DimIndIndex = 0;
- src.DimIndSwizzle = 0;
- src.ArrayID = 0;
-
- return src;
-}
-
-static inline boolean
-ureg_src_is_undef( struct ureg_src src )
-{
- return src.File == TGSI_FILE_NULL;
-}
-
-static inline boolean
-ureg_dst_is_undef( struct ureg_dst dst )
-{
- return dst.File == TGSI_FILE_NULL;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/src/gallium/auxiliary/util/rgtc.c b/src/gallium/auxiliary/util/rgtc.c
deleted file mode 100644
index 6886ac07..00000000
--- a/src/gallium/auxiliary/util/rgtc.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2011 Red Hat Inc.
- *
- * block compression parts are:
- * Copyright (C) 2004 Roland Scheidegger All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author:
- * Dave Airlie
- */
-
-#include <inttypes.h>
-#include "macros.h"
-
-#include "rgtc.h"
-
-#define RGTC_DEBUG 0
-
-#define TAG(x) util_format_unsigned_##x
-
-#define TYPE unsigned char
-#define T_MIN 0
-#define T_MAX 0xff
-
-#include "texcompress_rgtc_tmp.h"
-
-#undef TAG
-#undef TYPE
-#undef T_MIN
-#undef T_MAX
-
-#define TAG(x) util_format_signed_##x
-#define TYPE signed char
-#define T_MIN (signed char)-128
-#define T_MAX (signed char)127
-
-#include "texcompress_rgtc_tmp.h"
-
-#undef TAG
-#undef TYPE
-#undef T_MIN
-#undef T_MAX
-
diff --git a/src/gallium/auxiliary/util/rgtc.h b/src/gallium/auxiliary/util/rgtc.h
deleted file mode 100644
index 6bba9f2e..00000000
--- a/src/gallium/auxiliary/util/rgtc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright © 2014 Red Hat
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _RGTC_H
-#define _RGTC_H
-
-void util_format_unsigned_fetch_texel_rgtc(unsigned srcRowStride, const unsigned char *pixdata,
- unsigned i, unsigned j, unsigned char *value, unsigned comps);
-
-void util_format_signed_fetch_texel_rgtc(unsigned srcRowStride, const signed char *pixdata,
- unsigned i, unsigned j, signed char *value, unsigned comps);
-
-void util_format_unsigned_encode_rgtc_ubyte(unsigned char *blkaddr, unsigned char srccolors[4][4],
- int numxpixels, int numypixels);
-
-void util_format_signed_encode_rgtc_ubyte(signed char *blkaddr, signed char srccolors[4][4],
- int numxpixels, int numypixels);
-#endif /* _RGTC_H */
diff --git a/src/gallium/auxiliary/util/u_atomic.h b/src/gallium/auxiliary/util/u_atomic.h
deleted file mode 100644
index 13484564..00000000
--- a/src/gallium/auxiliary/util/u_atomic.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/**
- * Many similar implementations exist. See for example libwsbm
- * or the linux kernel include/atomic.h
- *
- * No copyright claimed on this file.
- *
- */
-
-#ifndef U_ATOMIC_H
-#define U_ATOMIC_H
-
-#include "pipe/p_compiler.h"
-#include "pipe/p_defines.h"
-
-/* Favor OS-provided implementations.
- *
- * Where no OS-provided implementation is available, fall back to
- * locally coded assembly, compiler intrinsic or ultimately a
- * mutex-based implementation.
- */
-#if defined(PIPE_OS_SOLARIS)
-#define PIPE_ATOMIC_OS_SOLARIS
-#elif defined(PIPE_CC_MSVC)
-#define PIPE_ATOMIC_MSVC_INTRINSIC
-#elif (defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86))
-#define PIPE_ATOMIC_ASM_MSVC_X86
-#elif (defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86))
-#define PIPE_ATOMIC_ASM_GCC_X86
-#elif (defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64))
-#define PIPE_ATOMIC_ASM_GCC_X86_64
-#elif defined(PIPE_CC_GCC) && (PIPE_CC_GCC_VERSION >= 401)
-#define PIPE_ATOMIC_GCC_INTRINSIC
-#else
-#error "Unsupported platform"
-#endif
-
-
-#if defined(PIPE_ATOMIC_ASM_GCC_X86_64)
-#define PIPE_ATOMIC "GCC x86_64 assembly"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-
-static inline boolean
-p_atomic_dec_zero(int32_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__("lock; decl %0; sete %1":"+m"(*v), "=qm"(c)
- ::"memory");
-
- return c != 0;
-}
-
-static inline void
-p_atomic_inc(int32_t *v)
-{
- __asm__ __volatile__("lock; incl %0":"+m"(*v));
-}
-
-static inline void
-p_atomic_dec(int32_t *v)
-{
- __asm__ __volatile__("lock; decl %0":"+m"(*v));
-}
-
-static inline int32_t
-p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
-{
- return __sync_val_compare_and_swap(v, old, _new);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* PIPE_ATOMIC_ASM_GCC_X86_64 */
-
-
-#if defined(PIPE_ATOMIC_ASM_GCC_X86)
-
-#define PIPE_ATOMIC "GCC x86 assembly"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-
-static inline boolean
-p_atomic_dec_zero(int32_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__("lock; decl %0; sete %1":"+m"(*v), "=qm"(c)
- ::"memory");
-
- return c != 0;
-}
-
-static inline void
-p_atomic_inc(int32_t *v)
-{
- __asm__ __volatile__("lock; incl %0":"+m"(*v));
-}
-
-static inline void
-p_atomic_dec(int32_t *v)
-{
- __asm__ __volatile__("lock; decl %0":"+m"(*v));
-}
-
-static inline int32_t
-p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
-{
- return __sync_val_compare_and_swap(v, old, _new);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
-
-/* Implementation using GCC-provided synchronization intrinsics
- */
-#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
-
-#define PIPE_ATOMIC "GCC Sync Intrinsics"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-
-static inline boolean
-p_atomic_dec_zero(int32_t *v)
-{
- return (__sync_sub_and_fetch(v, 1) == 0);
-}
-
-static inline void
-p_atomic_inc(int32_t *v)
-{
- (void) __sync_add_and_fetch(v, 1);
-}
-
-static inline void
-p_atomic_dec(int32_t *v)
-{
- (void) __sync_sub_and_fetch(v, 1);
-}
-
-static inline int32_t
-p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
-{
- return __sync_val_compare_and_swap(v, old, _new);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
-
-/* Unlocked version for single threaded environments, such as some
- * windows kernel modules.
- */
-#if defined(PIPE_ATOMIC_OS_UNLOCKED)
-
-#define PIPE_ATOMIC "Unlocked"
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-#define p_atomic_dec_zero(_v) ((boolean) --(*(_v)))
-#define p_atomic_inc(_v) ((void) (*(_v))++)
-#define p_atomic_dec(_v) ((void) (*(_v))--)
-#define p_atomic_cmpxchg(_v, old, _new) (*(_v) == old ? *(_v) = (_new) : *(_v))
-
-#endif
-
-
-/* Locally coded assembly for MSVC on x86:
- */
-#if defined(PIPE_ATOMIC_ASM_MSVC_X86)
-
-#define PIPE_ATOMIC "MSVC x86 assembly"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-
-static inline boolean
-p_atomic_dec_zero(int32_t *v)
-{
- unsigned char c;
-
- __asm {
- mov eax, [v]
- lock dec dword ptr [eax]
- sete byte ptr [c]
- }
-
- return c != 0;
-}
-
-static inline void
-p_atomic_inc(int32_t *v)
-{
- __asm {
- mov eax, [v]
- lock inc dword ptr [eax]
- }
-}
-
-static inline void
-p_atomic_dec(int32_t *v)
-{
- __asm {
- mov eax, [v]
- lock dec dword ptr [eax]
- }
-}
-
-static inline int32_t
-p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
-{
- int32_t orig;
-
- __asm {
- mov ecx, [v]
- mov eax, [old]
- mov edx, [_new]
- lock cmpxchg [ecx], edx
- mov [orig], eax
- }
-
- return orig;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
-#if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
-
-#define PIPE_ATOMIC "MSVC Intrinsics"
-
-#include <intrin.h>
-
-#pragma intrinsic(_InterlockedIncrement)
-#pragma intrinsic(_InterlockedDecrement)
-#pragma intrinsic(_InterlockedCompareExchange)
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-
-static inline boolean
-p_atomic_dec_zero(int32_t *v)
-{
- return _InterlockedDecrement((long *)v) == 0;
-}
-
-static inline void
-p_atomic_inc(int32_t *v)
-{
- _InterlockedIncrement((long *)v);
-}
-
-static inline void
-p_atomic_dec(int32_t *v)
-{
- _InterlockedDecrement((long *)v);
-}
-
-static inline int32_t
-p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
-{
- return _InterlockedCompareExchange((long *)v, _new, old);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-#if defined(PIPE_ATOMIC_OS_SOLARIS)
-
-#define PIPE_ATOMIC "Solaris OS atomic functions"
-
-#include <atomic.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define p_atomic_set(_v, _i) (*(_v) = (_i))
-#define p_atomic_read(_v) (*(_v))
-
-static inline boolean
-p_atomic_dec_zero(int32_t *v)
-{
- uint32_t n = atomic_dec_32_nv((uint32_t *) v);
-
- return n != 0;
-}
-
-#define p_atomic_inc(_v) atomic_inc_32((uint32_t *) _v)
-#define p_atomic_dec(_v) atomic_dec_32((uint32_t *) _v)
-
-#define p_atomic_cmpxchg(_v, _old, _new) \
- atomic_cas_32( (uint32_t *) _v, (uint32_t) _old, (uint32_t) _new)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
-#ifndef PIPE_ATOMIC
-#error "No pipe_atomic implementation selected"
-#endif
-
-
-
-#endif /* U_ATOMIC_H */
diff --git a/src/gallium/auxiliary/util/u_bitmask.c b/src/gallium/auxiliary/util/u_bitmask.c
deleted file mode 100644
index b19be29a..00000000
--- a/src/gallium/auxiliary/util/u_bitmask.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * Generic bitmask implementation.
- *
- * @author Jose Fonseca <jfonseca@vmware.com>
- */
-
-
-#include "pipe/p_compiler.h"
-#include "util/u_debug.h"
-
-#include "util/u_memory.h"
-#include "util/u_bitmask.h"
-
-
-typedef uint32_t util_bitmask_word;
-
-
-#define UTIL_BITMASK_INITIAL_WORDS 16
-#define UTIL_BITMASK_BITS_PER_BYTE 8
-#define UTIL_BITMASK_BITS_PER_WORD (sizeof(util_bitmask_word) * UTIL_BITMASK_BITS_PER_BYTE)
-
-
-struct util_bitmask
-{
- util_bitmask_word *words;
-
- /** Number of bits we can currently hold */
- unsigned size;
-
- /** Number of consecutive bits set at the start of the bitmask */
- unsigned filled;
-};
-
-
-struct util_bitmask *
-util_bitmask_create(void)
-{
- struct util_bitmask *bm;
-
- bm = MALLOC_STRUCT(util_bitmask);
- if(!bm)
- return NULL;
-
- bm->words = (util_bitmask_word *)CALLOC(UTIL_BITMASK_INITIAL_WORDS, sizeof(util_bitmask_word));
- if(!bm->words) {
- FREE(bm);
- return NULL;
- }
-
- bm->size = UTIL_BITMASK_INITIAL_WORDS * UTIL_BITMASK_BITS_PER_WORD;
- bm->filled = 0;
-
- return bm;
-}
-
-
-/**
- * Resize the bitmask if necessary
- */
-static inline boolean
-util_bitmask_resize(struct util_bitmask *bm,
- unsigned minimum_index)
-{
- unsigned minimum_size = minimum_index + 1;
- unsigned new_size;
- util_bitmask_word *new_words;
-
- /* Check integer overflow */
- if(!minimum_size)
- return FALSE;
-
- if(bm->size >= minimum_size)
- return TRUE;
-
- assert(bm->size % UTIL_BITMASK_BITS_PER_WORD == 0);
- new_size = bm->size;
- while(new_size < minimum_size) {
- new_size *= 2;
- /* Check integer overflow */
- if(new_size < bm->size)
- return FALSE;
- }
- assert(new_size);
- assert(new_size % UTIL_BITMASK_BITS_PER_WORD == 0);
-
- new_words = (util_bitmask_word *)REALLOC((void *)bm->words,
- bm->size / UTIL_BITMASK_BITS_PER_BYTE,
- new_size / UTIL_BITMASK_BITS_PER_BYTE);
- if(!new_words)
- return FALSE;
-
- memset(new_words + bm->size/UTIL_BITMASK_BITS_PER_WORD,
- 0,
- (new_size - bm->size)/UTIL_BITMASK_BITS_PER_BYTE);
-
- bm->size = new_size;
- bm->words = new_words;
-
- return TRUE;
-}
-
-
-/**
- * Lazily update the filled.
- */
-static inline void
-util_bitmask_filled_set(struct util_bitmask *bm,
- unsigned index)
-{
- assert(bm->filled <= bm->size);
- assert(index < bm->size);
-
- if(index == bm->filled) {
- ++bm->filled;
- assert(bm->filled <= bm->size);
- }
-}
-
-static inline void
-util_bitmask_filled_unset(struct util_bitmask *bm,
- unsigned index)
-{
- assert(bm->filled <= bm->size);
- assert(index < bm->size);
-
- if(index < bm->filled)
- bm->filled = index;
-}
-
-
-unsigned
-util_bitmask_add(struct util_bitmask *bm)
-{
- unsigned word;
- unsigned bit;
- util_bitmask_word mask;
-
- assert(bm);
-
- /* linear search for an empty index */
- word = bm->filled / UTIL_BITMASK_BITS_PER_WORD;
- bit = bm->filled % UTIL_BITMASK_BITS_PER_WORD;
- mask = 1 << bit;
- while(word < bm->size / UTIL_BITMASK_BITS_PER_WORD) {
- while(bit < UTIL_BITMASK_BITS_PER_WORD) {
- if(!(bm->words[word] & mask))
- goto found;
- ++bm->filled;
- ++bit;
- mask <<= 1;
- }
- ++word;
- bit = 0;
- mask = 1;
- }
-found:
-
- /* grow the bitmask if necessary */
- if(!util_bitmask_resize(bm, bm->filled))
- return UTIL_BITMASK_INVALID_INDEX;
-
- assert(!(bm->words[word] & mask));
- bm->words[word] |= mask;
-
- return bm->filled++;
-}
-
-
-unsigned
-util_bitmask_set(struct util_bitmask *bm,
- unsigned index)
-{
- unsigned word;
- unsigned bit;
- util_bitmask_word mask;
-
- assert(bm);
-
- /* grow the bitmask if necessary */
- if(!util_bitmask_resize(bm, index))
- return UTIL_BITMASK_INVALID_INDEX;
-
- word = index / UTIL_BITMASK_BITS_PER_WORD;
- bit = index % UTIL_BITMASK_BITS_PER_WORD;
- mask = 1 << bit;
-
- bm->words[word] |= mask;
-
- util_bitmask_filled_set(bm, index);
-
- return index;
-}
-
-
-void
-util_bitmask_clear(struct util_bitmask *bm,
- unsigned index)
-{
- unsigned word;
- unsigned bit;
- util_bitmask_word mask;
-
- assert(bm);
-
- if(index >= bm->size)
- return;
-
- word = index / UTIL_BITMASK_BITS_PER_WORD;
- bit = index % UTIL_BITMASK_BITS_PER_WORD;
- mask = 1 << bit;
-
- bm->words[word] &= ~mask;
-
- util_bitmask_filled_unset(bm, index);
-}
-
-
-boolean
-util_bitmask_get(struct util_bitmask *bm,
- unsigned index)
-{
- unsigned word = index / UTIL_BITMASK_BITS_PER_WORD;
- unsigned bit = index % UTIL_BITMASK_BITS_PER_WORD;
- util_bitmask_word mask = 1 << bit;
-
- assert(bm);
-
- if(index < bm->filled) {
- assert(bm->words[word] & mask);
- return TRUE;
- }
-
- if(index >= bm->size)
- return FALSE;
-
- if(bm->words[word] & mask) {
- util_bitmask_filled_set(bm, index);
- return TRUE;
- }
- else
- return FALSE;
-}
-
-
-unsigned
-util_bitmask_get_next_index(struct util_bitmask *bm,
- unsigned index)
-{
- unsigned word = index / UTIL_BITMASK_BITS_PER_WORD;
- unsigned bit = index % UTIL_BITMASK_BITS_PER_WORD;
- util_bitmask_word mask = 1 << bit;
-
- if(index < bm->filled) {
- assert(bm->words[word] & mask);
- return index;
- }
-
- if(index >= bm->size) {
- return UTIL_BITMASK_INVALID_INDEX;
- }
-
- /* Do a linear search */
- while(word < bm->size / UTIL_BITMASK_BITS_PER_WORD) {
- while(bit < UTIL_BITMASK_BITS_PER_WORD) {
- if(bm->words[word] & mask) {
- if(index == bm->filled) {
- ++bm->filled;
- assert(bm->filled <= bm->size);
- }
- return index;
- }
- ++index;
- ++bit;
- mask <<= 1;
- }
- ++word;
- bit = 0;
- mask = 1;
- }
-
- return UTIL_BITMASK_INVALID_INDEX;
-}
-
-
-unsigned
-util_bitmask_get_first_index(struct util_bitmask *bm)
-{
- return util_bitmask_get_next_index(bm, 0);
-}
-
-
-void
-util_bitmask_destroy(struct util_bitmask *bm)
-{
- assert(bm);
-
- FREE(bm->words);
- FREE(bm);
-}
-
diff --git a/src/gallium/auxiliary/util/u_bitmask.h b/src/gallium/auxiliary/util/u_bitmask.h
deleted file mode 100644
index 98b85dde..00000000
--- a/src/gallium/auxiliary/util/u_bitmask.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * Generic bitmask.
- *
- * @author Jose Fonseca <jfonseca@vmware.com>
- */
-
-#ifndef U_HANDLE_BITMASK_H_
-#define U_HANDLE_BITMASK_H_
-
-
-#include "pipe/p_compiler.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#define UTIL_BITMASK_INVALID_INDEX (~0U)
-
-
-/**
- * Abstract data type to represent arbitrary set of bits.
- */
-struct util_bitmask;
-
-
-struct util_bitmask *
-util_bitmask_create(void);
-
-
-/**
- * Search a cleared bit and set it.
- *
- * It searches for the first cleared bit.
- *
- * Returns the bit index on success, or UTIL_BITMASK_INVALID_INDEX on out of
- * memory growing the bitmask.
- */
-unsigned
-util_bitmask_add(struct util_bitmask *bm);
-
-/**
- * Set a bit.
- *
- * Returns the input index on success, or UTIL_BITMASK_INVALID_INDEX on out of
- * memory growing the bitmask.
- */
-unsigned
-util_bitmask_set(struct util_bitmask *bm,
- unsigned index);
-
-void
-util_bitmask_clear(struct util_bitmask *bm,
- unsigned index);
-
-boolean
-util_bitmask_get(struct util_bitmask *bm,
- unsigned index);
-
-
-void
-util_bitmask_destroy(struct util_bitmask *bm);
-
-
-/**
- * Search for the first set bit.
- *
- * Returns UTIL_BITMASK_INVALID_INDEX if a set bit cannot be found.
- */
-unsigned
-util_bitmask_get_first_index(struct util_bitmask *bm);
-
-
-/**
- * Search for the first set bit, starting from the giving index.
- *
- * Returns UTIL_BITMASK_INVALID_INDEX if a set bit cannot be found.
- */
-unsigned
-util_bitmask_get_next_index(struct util_bitmask *bm,
- unsigned index);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* U_HANDLE_BITMASK_H_ */
diff --git a/src/gallium/auxiliary/util/u_box.h b/src/gallium/auxiliary/util/u_box.h
deleted file mode 100644
index 2fa6017d..00000000
--- a/src/gallium/auxiliary/util/u_box.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef UTIL_BOX_INLINES_H
-#define UTIL_BOX_INLINES_H
-
-#include "pipe/p_state.h"
-
-static inline
-void u_box_1d( unsigned x,
- unsigned w,
- struct pipe_box *box )
-{
- box->x = x;
- box->y = 0;
- box->z = 0;
- box->width = w;
- box->height = 1;
- box->depth = 1;
-}
-
-static inline
-void u_box_2d( unsigned x,
- unsigned y,
- unsigned w,
- unsigned h,
- struct pipe_box *box )
-{
- box->x = x;
- box->y = y;
- box->z = 0;
- box->width = w;
- box->height = h;
- box->depth = 1;
-}
-
-static inline
-void u_box_origin_2d( unsigned w,
- unsigned h,
- struct pipe_box *box )
-{
- box->x = 0;
- box->y = 0;
- box->z = 0;
- box->width = w;
- box->height = h;
- box->depth = 1;
-}
-
-static inline
-void u_box_2d_zslice( unsigned x,
- unsigned y,
- unsigned z,
- unsigned w,
- unsigned h,
- struct pipe_box *box )
-{
- box->x = x;
- box->y = y;
- box->z = z;
- box->width = w;
- box->height = h;
- box->depth = 1;
-}
-
-static inline
-void u_box_3d( unsigned x,
- unsigned y,
- unsigned z,
- unsigned w,
- unsigned h,
- unsigned d,
- struct pipe_box *box )
-{
- box->x = x;
- box->y = y;
- box->z = z;
- box->width = w;
- box->height = h;
- box->depth = d;
-}
-
-#endif
diff --git a/src/gallium/auxiliary/util/u_cpu_detect.c b/src/gallium/auxiliary/util/u_cpu_detect.c
deleted file mode 100644
index 0b4b83aa..00000000
--- a/src/gallium/auxiliary/util/u_cpu_detect.c
+++ /dev/null
@@ -1,458 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 Dennis Smit
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * on the rights to use, copy, modify, merge, publish, distribute, sub
- * license, and/or sell copies of the Software, and to permit persons to whom
- * the Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * CPU feature detection.
- *
- * @author Dennis Smit
- * @author Based on the work of Eric Anholt <anholt@FreeBSD.org>
- */
-
-#include "pipe/p_config.h"
-
-#include "u_debug.h"
-#include "u_cpu_detect.h"
-
-#if defined(PIPE_ARCH_PPC)
-#if defined(PIPE_OS_APPLE)
-#include <sys/sysctl.h>
-#else
-#include <signal.h>
-#include <setjmp.h>
-#endif
-#endif
-
-#if defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD)
-#include <sys/param.h>
-#include <sys/sysctl.h>
-#include <machine/cpu.h>
-#endif
-
-#if defined(PIPE_OS_FREEBSD)
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#endif
-
-#if defined(PIPE_OS_LINUX)
-#include <signal.h>
-#endif
-
-#ifdef PIPE_OS_UNIX
-#include <unistd.h>
-#endif
-
-#if defined(PIPE_OS_WINDOWS)
-#include <windows.h>
-#if defined(PIPE_CC_MSVC)
-#include <intrin.h>
-#endif
-#endif
-
-
-#ifdef DEBUG
-DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu, "GALLIUM_DUMP_CPU", FALSE)
-#endif
-
-
-struct util_cpu_caps util_cpu_caps;
-
-#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
-static int has_cpuid(void);
-#endif
-
-
-#if defined(PIPE_ARCH_PPC) && !defined(PIPE_OS_APPLE)
-static jmp_buf __lv_powerpc_jmpbuf;
-static volatile sig_atomic_t __lv_powerpc_canjump = 0;
-
-static void
-sigill_handler(int sig)
-{
- if (!__lv_powerpc_canjump) {
- signal (sig, SIG_DFL);
- raise (sig);
- }
-
- __lv_powerpc_canjump = 0;
- longjmp(__lv_powerpc_jmpbuf, 1);
-}
-#endif
-
-#if defined(PIPE_ARCH_PPC)
-static void
-check_os_altivec_support(void)
-{
-#if defined(PIPE_OS_APPLE)
- int sels[2] = {CTL_HW, HW_VECTORUNIT};
- int has_vu = 0;
- int len = sizeof (has_vu);
- int err;
-
- err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
-
- if (err == 0) {
- if (has_vu != 0) {
- util_cpu_caps.has_altivec = 1;
- }
- }
-#else /* !PIPE_OS_APPLE */
- /* not on Apple/Darwin, do it the brute-force way */
- /* this is borrowed from the libmpeg2 library */
- signal(SIGILL, sigill_handler);
- if (setjmp(__lv_powerpc_jmpbuf)) {
- signal(SIGILL, SIG_DFL);
- } else {
- __lv_powerpc_canjump = 1;
-
- __asm __volatile
- ("mtspr 256, %0\n\t"
- "vand %%v0, %%v0, %%v0"
- :
- : "r" (-1));
-
- signal(SIGILL, SIG_DFL);
- util_cpu_caps.has_altivec = 1;
- }
-#endif /* !PIPE_OS_APPLE */
-}
-#endif /* PIPE_ARCH_PPC */
-
-
-#if defined(PIPE_ARCH_X86) || defined (PIPE_ARCH_X86_64)
-static int has_cpuid(void)
-{
-#if defined(PIPE_ARCH_X86)
-#if defined(PIPE_OS_GCC)
- int a, c;
-
- __asm __volatile
- ("pushf\n"
- "popl %0\n"
- "movl %0, %1\n"
- "xorl $0x200000, %0\n"
- "push %0\n"
- "popf\n"
- "pushf\n"
- "popl %0\n"
- : "=a" (a), "=c" (c)
- :
- : "cc");
-
- return a != c;
-#else
- /* FIXME */
- return 1;
-#endif
-#elif defined(PIPE_ARCH_X86_64)
- return 1;
-#else
- return 0;
-#endif
-}
-
-
-/**
- * @sa cpuid.h included in gcc-4.3 onwards.
- * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
- */
-static inline void
-cpuid(uint32_t ax, uint32_t *p)
-{
-#if (defined(PIPE_CC_GCC) || defined(PIPE_CC_SUNPRO)) && defined(PIPE_ARCH_X86)
- __asm __volatile (
- "xchgl %%ebx, %1\n\t"
- "cpuid\n\t"
- "xchgl %%ebx, %1"
- : "=a" (p[0]),
- "=S" (p[1]),
- "=c" (p[2]),
- "=d" (p[3])
- : "0" (ax)
- );
-#elif (defined(PIPE_CC_GCC) || defined(PIPE_CC_SUNPRO)) && defined(PIPE_ARCH_X86_64)
- __asm __volatile (
- "cpuid\n\t"
- : "=a" (p[0]),
- "=b" (p[1]),
- "=c" (p[2]),
- "=d" (p[3])
- : "0" (ax)
- );
-#elif defined(PIPE_CC_MSVC)
- __cpuid(p, ax);
-#else
- p[0] = 0;
- p[1] = 0;
- p[2] = 0;
- p[3] = 0;
-#endif
-}
-
-/**
- * @sa cpuid.h included in gcc-4.4 onwards.
- * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
- */
-static inline void
-cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p)
-{
-#if (defined(PIPE_CC_GCC) || defined(PIPE_CC_SUNPRO)) && defined(PIPE_ARCH_X86)
- __asm __volatile (
- "xchgl %%ebx, %1\n\t"
- "cpuid\n\t"
- "xchgl %%ebx, %1"
- : "=a" (p[0]),
- "=S" (p[1]),
- "=c" (p[2]),
- "=d" (p[3])
- : "0" (ax), "2" (cx)
- );
-#elif (defined(PIPE_CC_GCC) || defined(PIPE_CC_SUNPRO)) && defined(PIPE_ARCH_X86_64)
- __asm __volatile (
- "cpuid\n\t"
- : "=a" (p[0]),
- "=b" (p[1]),
- "=c" (p[2]),
- "=d" (p[3])
- : "0" (ax), "2" (cx)
- );
-#elif defined(PIPE_CC_MSVC)
- __cpuidex(p, ax, cx);
-#else
- p[0] = 0;
- p[1] = 0;
- p[2] = 0;
- p[3] = 0;
-#endif
-}
-
-
-static inline uint64_t xgetbv(void)
-{
-#if defined(PIPE_CC_GCC)
- uint32_t eax, edx;
-
- __asm __volatile (
- ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
- : "=a"(eax),
- "=d"(edx)
- : "c"(0)
- );
-
- return ((uint64_t)edx << 32) | eax;
-#elif defined(PIPE_CC_MSVC) && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
- return _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
-#else
- return 0;
-#endif
-}
-
-
-#if defined(PIPE_ARCH_X86)
-static inline boolean sse2_has_daz(void)
-{
- struct {
- uint32_t pad1[7];
- uint32_t mxcsr_mask;
- uint32_t pad2[128-8];
- } PIPE_ALIGN_VAR(16) fxarea;
-
- fxarea.mxcsr_mask = 0;
-#if (defined(PIPE_CC_GCC) || defined(PIPE_CC_SUNPRO))
- __asm __volatile ("fxsave %0" : "+m" (fxarea));
-#elif (defined(PIPE_CC_MSVC) && _MSC_VER >= 1700) || defined(PIPE_CC_ICL)
- /* 1700 = Visual Studio 2012 */
- _fxsave(&fxarea);
-#else
- fxarea.mxcsr_mask = 0;
-#endif
- return !!(fxarea.mxcsr_mask & (1 << 6));
-}
-#endif
-
-#endif /* X86 or X86_64 */
-
-void
-util_cpu_detect(void)
-{
- static boolean util_cpu_detect_initialized = FALSE;
-
- if(util_cpu_detect_initialized)
- return;
-
- memset(&util_cpu_caps, 0, sizeof util_cpu_caps);
-
- /* Count the number of CPUs in system */
-#if defined(PIPE_OS_WINDOWS)
- {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- util_cpu_caps.nr_cpus = system_info.dwNumberOfProcessors;
- }
-#elif defined(PIPE_OS_UNIX) && defined(_SC_NPROCESSORS_ONLN)
- util_cpu_caps.nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- if (util_cpu_caps.nr_cpus == -1)
- util_cpu_caps.nr_cpus = 1;
-#elif defined(PIPE_OS_BSD)
- {
- int mib[2], ncpu;
- int len;
-
- mib[0] = CTL_HW;
- mib[1] = HW_NCPU;
-
- len = sizeof (ncpu);
- sysctl(mib, 2, &ncpu, &len, NULL, 0);
- util_cpu_caps.nr_cpus = ncpu;
- }
-#else
- util_cpu_caps.nr_cpus = 1;
-#endif
-
- /* Make the fallback cacheline size nonzero so that it can be
- * safely passed to align().
- */
- util_cpu_caps.cacheline = sizeof(void *);
-
-#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
- if (has_cpuid()) {
- uint32_t regs[4];
- uint32_t regs2[4];
-
- util_cpu_caps.cacheline = 32;
-
- /* Get max cpuid level */
- cpuid(0x00000000, regs);
-
- if (regs[0] >= 0x00000001) {
- unsigned int cacheline;
-
- cpuid (0x00000001, regs2);
-
- util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf;
- if (util_cpu_caps.x86_cpu_type == 0xf)
- util_cpu_caps.x86_cpu_type = 8 + ((regs2[0] >> 20) & 255); /* use extended family (P4, IA64) */
-
- /* general feature flags */
- util_cpu_caps.has_tsc = (regs2[3] >> 4) & 1; /* 0x0000010 */
- util_cpu_caps.has_mmx = (regs2[3] >> 23) & 1; /* 0x0800000 */
- util_cpu_caps.has_sse = (regs2[3] >> 25) & 1; /* 0x2000000 */
- util_cpu_caps.has_sse2 = (regs2[3] >> 26) & 1; /* 0x4000000 */
- util_cpu_caps.has_sse3 = (regs2[2] >> 0) & 1; /* 0x0000001 */
- util_cpu_caps.has_ssse3 = (regs2[2] >> 9) & 1; /* 0x0000020 */
- util_cpu_caps.has_sse4_1 = (regs2[2] >> 19) & 1;
- util_cpu_caps.has_sse4_2 = (regs2[2] >> 20) & 1;
- util_cpu_caps.has_popcnt = (regs2[2] >> 23) & 1;
- util_cpu_caps.has_avx = ((regs2[2] >> 28) & 1) && // AVX
- ((regs2[2] >> 27) & 1) && // OSXSAVE
- ((xgetbv() & 6) == 6); // XMM & YMM
- util_cpu_caps.has_f16c = (regs2[2] >> 29) & 1;
- util_cpu_caps.has_mmx2 = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */
-#if defined(PIPE_ARCH_X86_64)
- util_cpu_caps.has_daz = 1;
-#else
- util_cpu_caps.has_daz = util_cpu_caps.has_sse3 ||
- (util_cpu_caps.has_sse2 && sse2_has_daz());
-#endif
-
- cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
- if (cacheline > 0)
- util_cpu_caps.cacheline = cacheline;
- }
- if (util_cpu_caps.has_avx && regs[0] >= 0x00000007) {
- uint32_t regs7[4];
- cpuid_count(0x00000007, 0x00000000, regs7);
- util_cpu_caps.has_avx2 = (regs7[1] >> 5) & 1;
- }
-
- if (regs[1] == 0x756e6547 && regs[2] == 0x6c65746e && regs[3] == 0x49656e69) {
- /* GenuineIntel */
- util_cpu_caps.has_intel = 1;
- }
-
- cpuid(0x80000000, regs);
-
- if (regs[0] >= 0x80000001) {
-
- cpuid(0x80000001, regs2);
-
- util_cpu_caps.has_mmx |= (regs2[3] >> 23) & 1;
- util_cpu_caps.has_mmx2 |= (regs2[3] >> 22) & 1;
- util_cpu_caps.has_3dnow = (regs2[3] >> 31) & 1;
- util_cpu_caps.has_3dnow_ext = (regs2[3] >> 30) & 1;
-
- util_cpu_caps.has_xop = util_cpu_caps.has_avx &&
- ((regs2[2] >> 11) & 1);
- }
-
- if (regs[0] >= 0x80000006) {
- cpuid(0x80000006, regs2);
- util_cpu_caps.cacheline = regs2[2] & 0xFF;
- }
-
- if (!util_cpu_caps.has_sse) {
- util_cpu_caps.has_sse2 = 0;
- util_cpu_caps.has_sse3 = 0;
- util_cpu_caps.has_ssse3 = 0;
- util_cpu_caps.has_sse4_1 = 0;
- }
- }
-#endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */
-
-#if defined(PIPE_ARCH_PPC)
- check_os_altivec_support();
-#endif /* PIPE_ARCH_PPC */
-
-#ifdef DEBUG
- if (debug_get_option_dump_cpu()) {
- debug_printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus);
-
- debug_printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type);
- debug_printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline);
-
- debug_printf("util_cpu_caps.has_tsc = %u\n", util_cpu_caps.has_tsc);
- debug_printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx);
- debug_printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2);
- debug_printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse);
- debug_printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2);
- debug_printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3);
- debug_printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3);
- debug_printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1);
- debug_printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps.has_sse4_2);
- debug_printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps.has_avx);
- debug_printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps.has_avx2);
- debug_printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps.has_f16c);
- debug_printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps.has_popcnt);
- debug_printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow);
- debug_printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext);
- debug_printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps.has_xop);
- debug_printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec);
- debug_printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps.has_daz);
- }
-#endif
-
- util_cpu_detect_initialized = TRUE;
-}
diff --git a/src/gallium/auxiliary/util/u_debug_describe.c b/src/gallium/auxiliary/util/u_debug_describe.c
index 499c3603..e9618781 100644
--- a/src/gallium/auxiliary/util/u_debug_describe.c
+++ b/src/gallium/auxiliary/util/u_debug_describe.c
@@ -41,25 +41,25 @@ debug_describe_resource(char* buf, const struct pipe_resource *ptr)
switch(ptr->target)
{
case PIPE_BUFFER:
- util_sprintf(buf, "pipe_buffer<%u>", (unsigned)util_format_get_stride(ptr->format, ptr->width0));
+ sprintf(buf, "pipe_buffer<%u>", (unsigned)util_format_get_stride(ptr->format, ptr->width0));
break;
case PIPE_TEXTURE_1D:
- util_sprintf(buf, "pipe_texture1d<%u,%s,%u>", ptr->width0, util_format_short_name(ptr->format), ptr->last_level);
+ sprintf(buf, "pipe_texture1d<%u,%s,%u>", ptr->width0, util_format_short_name(ptr->format), ptr->last_level);
break;
case PIPE_TEXTURE_2D:
- util_sprintf(buf, "pipe_texture2d<%u,%u,%s,%u>", ptr->width0, ptr->height0, util_format_short_name(ptr->format), ptr->last_level);
+ sprintf(buf, "pipe_texture2d<%u,%u,%s,%u>", ptr->width0, ptr->height0, util_format_short_name(ptr->format), ptr->last_level);
break;
case PIPE_TEXTURE_RECT:
- util_sprintf(buf, "pipe_texture_rect<%u,%u,%s>", ptr->width0, ptr->height0, util_format_short_name(ptr->format));
+ sprintf(buf, "pipe_texture_rect<%u,%u,%s>", ptr->width0, ptr->height0, util_format_short_name(ptr->format));
break;
case PIPE_TEXTURE_CUBE:
- util_sprintf(buf, "pipe_texture_cube<%u,%u,%s,%u>", ptr->width0, ptr->height0, util_format_short_name(ptr->format), ptr->last_level);
+ sprintf(buf, "pipe_texture_cube<%u,%u,%s,%u>", ptr->width0, ptr->height0, util_format_short_name(ptr->format), ptr->last_level);
break;
case PIPE_TEXTURE_3D:
- util_sprintf(buf, "pipe_texture3d<%u,%u,%u,%s,%u>", ptr->width0, ptr->height0, ptr->depth0, util_format_short_name(ptr->format), ptr->last_level);
+ sprintf(buf, "pipe_texture3d<%u,%u,%u,%s,%u>", ptr->width0, ptr->height0, ptr->depth0, util_format_short_name(ptr->format), ptr->last_level);
break;
default:
- util_sprintf(buf, "pipe_martian_resource<%u>", ptr->target);
+ sprintf(buf, "pipe_martian_resource<%u>", ptr->target);
break;
}
}
@@ -69,7 +69,7 @@ debug_describe_surface(char* buf, const struct pipe_surface *ptr)
{
char res[128];
debug_describe_resource(res, ptr->texture);
- util_sprintf(buf, "pipe_surface<%s,%u,%u,%u>", res, ptr->u.tex.level, ptr->u.tex.first_layer, ptr->u.tex.last_layer);
+ sprintf(buf, "pipe_surface<%s,%u,%u,%u>", res, ptr->u.tex.level, ptr->u.tex.first_layer, ptr->u.tex.last_layer);
}
void
@@ -77,7 +77,7 @@ debug_describe_sampler_view(char* buf, const struct pipe_sampler_view *ptr)
{
char res[128];
debug_describe_resource(res, ptr->texture);
- util_sprintf(buf, "pipe_sampler_view<%s,%s>", res, util_format_short_name(ptr->format));
+ sprintf(buf, "pipe_sampler_view<%s,%s>", res, util_format_short_name(ptr->format));
}
void
@@ -86,6 +86,6 @@ debug_describe_so_target(char* buf,
{
char res[128];
debug_describe_resource(res, ptr->buffer);
- util_sprintf(buf, "pipe_stream_output_target<%s,%u,%u>", res,
+ sprintf(buf, "pipe_stream_output_target<%s,%u,%u>", res,
ptr->buffer_offset, ptr->buffer_size);
}
diff --git a/src/gallium/auxiliary/util/u_double_list.h b/src/gallium/auxiliary/util/u_double_list.h
index 01b87e3a..c170f285 100644
--- a/src/gallium/auxiliary/util/u_double_list.h
+++ b/src/gallium/auxiliary/util/u_double_list.h
@@ -25,123 +25,11 @@
*
**************************************************************************/
-/**
- * \file
- * List macros heavily inspired by the Linux kernel
- * list handling. No list looping yet.
- *
- * Is not threadsafe, so common operations need to
- * be protected using an external mutex.
- */
-
#ifndef _U_DOUBLE_LIST_H_
#define _U_DOUBLE_LIST_H_
+#include "util/list.h"
-#include <stddef.h>
-#include "pipe/p_compiler.h"
-
-
-struct list_head
-{
- struct list_head *prev;
- struct list_head *next;
-};
-
-static inline void list_inithead(struct list_head *item)
-{
- item->prev = item;
- item->next = item;
-}
-
-static inline void list_add(struct list_head *item, struct list_head *list)
-{
- item->prev = list;
- item->next = list->next;
- list->next->prev = item;
- list->next = item;
-}
-
-static inline void list_addtail(struct list_head *item, struct list_head *list)
-{
- item->next = list;
- item->prev = list->prev;
- list->prev->next = item;
- list->prev = item;
-}
-
-static inline void list_replace(struct list_head *from, struct list_head *to)
-{
- to->prev = from->prev;
- to->next = from->next;
- from->next->prev = to;
- from->prev->next = to;
-}
-
-static inline void list_del(struct list_head *item)
-{
- item->prev->next = item->next;
- item->next->prev = item->prev;
- item->prev = item->next = NULL;
-}
-
-static inline void list_delinit(struct list_head *item)
-{
- item->prev->next = item->next;
- item->next->prev = item->prev;
- item->next = item;
- item->prev = item;
-}
-
-#define LIST_INITHEAD(__item) list_inithead(__item)
-#define LIST_ADD(__item, __list) list_add(__item, __list)
-#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
-#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
-#define LIST_DEL(__item) list_del(__item)
-#define LIST_DELINIT(__item) list_delinit(__item)
-
-#define LIST_ENTRY(__type, __item, __field) \
- ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
-
-#define LIST_IS_EMPTY(__list) \
- ((__list)->next == (__list))
-
-/**
- * Cast from a pointer to a member of a struct back to the containing struct.
- *
- * 'sample' MUST be initialized, or else the result is undefined!
- */
-#ifndef container_of
-#define container_of(ptr, sample, member) \
- (void *)((char *)(ptr) \
- - ((char *)&(sample)->member - (char *)(sample)))
-#endif
-
-#define LIST_FOR_EACH_ENTRY(pos, head, member) \
- for (pos = NULL, pos = container_of((head)->next, pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.next, pos, member))
-
-#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
- for (pos = NULL, pos = container_of((head)->next, pos, member), \
- storage = container_of(pos->member.next, pos, member); \
- &pos->member != (head); \
- pos = storage, storage = container_of(storage->member.next, storage, member))
-
-#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
- for (pos = NULL, pos = container_of((head)->prev, pos, member), \
- storage = container_of(pos->member.prev, pos, member); \
- &pos->member != (head); \
- pos = storage, storage = container_of(storage->member.prev, storage, member))
-
-#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
- for (pos = NULL, pos = container_of((start), pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.next, pos, member))
-
-#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
- for (pos = NULL, pos = container_of((start), pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.prev, pos, member))
+#define LIST_IS_EMPTY(__list) list_is_empty(__list)
#endif /*_U_DOUBLE_LIST_H_*/
diff --git a/src/gallium/auxiliary/util/u_format.c b/src/gallium/auxiliary/util/u_format.c
index 16d4cbfd..b7adfdab 100644
--- a/src/gallium/auxiliary/util/u_format.c
+++ b/src/gallium/auxiliary/util/u_format.c
@@ -36,7 +36,6 @@
#include "u_memory.h"
#include "u_format.h"
#include "u_format_s3tc.h"
-#include "u_surface.h"
#include "pipe/p_defines.h"
diff --git a/src/gallium/auxiliary/util/u_format.csv b/src/gallium/auxiliary/util/u_format.csv
index 1d743a69..f6e528a2 100644
--- a/src/gallium/auxiliary/util/u_format.csv
+++ b/src/gallium/auxiliary/util/u_format.csv
@@ -114,6 +114,7 @@ PIPE_FORMAT_I32_FLOAT , plain, 1, 1, f32 , , , , xxxx, r
PIPE_FORMAT_L8_SRGB , plain, 1, 1, un8 , , , , xxx1, srgb
PIPE_FORMAT_R8_SRGB , plain, 1, 1, un8 , , , , x001, srgb
PIPE_FORMAT_L8A8_SRGB , plain, 1, 1, un8 , un8 , , , xxxy, srgb
+PIPE_FORMAT_R8G8_SRGB , plain, 1, 1, un8 , un8 , , , xy01, srgb
PIPE_FORMAT_R8G8B8_SRGB , plain, 1, 1, un8 , un8 , un8 , , xyz1, srgb
PIPE_FORMAT_R8G8B8A8_SRGB , plain, 1, 1, un8 , un8 , un8 , un8 , xyzw, srgb
PIPE_FORMAT_A8B8G8R8_SRGB , plain, 1, 1, un8 , un8 , un8 , un8 , wzyx, srgb
diff --git a/src/gallium/auxiliary/util/u_format_table.py b/src/gallium/auxiliary/util/u_format_table.py
index dec2883c..05d64a20 100755
--- a/src/gallium/auxiliary/util/u_format_table.py
+++ b/src/gallium/auxiliary/util/u_format_table.py
@@ -124,7 +124,7 @@ def write_format_table(formats):
if format.nr_channels() <= 1:
func(format.le_channels, format.le_swizzles)
else:
- print('#ifdef PIPE_ARCH_BIG_ENDIAN')
+ print('#if UTIL_ARCH_BIG_ENDIAN')
func(format.be_channels, format.be_swizzles)
print('#else')
func(format.le_channels, format.le_swizzles)
diff --git a/src/gallium/auxiliary/util/u_hash_table.c b/src/gallium/auxiliary/util/u_hash_table.c
index 6281003b..2b7a67df 100644
--- a/src/gallium/auxiliary/util/u_hash_table.c
+++ b/src/gallium/auxiliary/util/u_hash_table.c
@@ -44,139 +44,68 @@
#include "cso_cache/cso_hash.h"
#include "util/u_memory.h"
+#include "util/u_pointer.h"
#include "util/u_hash_table.h"
+#include "util/hash_table.h"
+#include "ralloc.h"
-#define XXH_INLINE_ALL
-#include "xxhash.h"
struct util_hash_table
{
- struct cso_hash *cso;
-
- /** Hash function */
- unsigned (*hash)(void *key);
-
- /** Compare two keys */
- int (*compare)(void *key1, void *key2);
+ struct hash_table table;
/** free value */
void (*destroy)(void *value);
};
-
-struct util_hash_table_item
-{
- void *key;
- void *value;
-};
-
-
-static inline struct util_hash_table_item *
-util_hash_table_item(struct cso_hash_iter iter)
-{
- return (struct util_hash_table_item *)cso_hash_iter_data(iter);
-}
-
-
struct util_hash_table *
-util_hash_table_create(unsigned (*hash)(void *key),
- int (*compare)(void *key1, void *key2),
+util_hash_table_create(uint32_t (*hash)(const void *key),
+ bool (*equal)(const void *key1, const void *key2),
void (*destroy)(void *value))
{
struct util_hash_table *ht;
- ht = MALLOC_STRUCT(util_hash_table);
+ ht = ralloc(NULL, struct util_hash_table);
if(!ht)
return NULL;
- ht->cso = cso_hash_create();
- if(!ht->cso) {
- FREE(ht);
+ if (!_mesa_hash_table_init(&ht->table, ht, hash, equal)) {
+ ralloc_free(ht);
return NULL;
}
- ht->hash = hash;
- ht->compare = compare;
ht->destroy = destroy;
return ht;
}
-
-static inline struct cso_hash_iter
-util_hash_table_find_iter(struct util_hash_table *ht,
- void *key,
- unsigned key_hash)
-{
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
-
- iter = cso_hash_find(ht->cso, key_hash);
- while (!cso_hash_iter_is_null(iter)) {
- item = (struct util_hash_table_item *)cso_hash_iter_data(iter);
- if (!ht->compare(item->key, key))
- break;
- iter = cso_hash_iter_next(iter);
- }
-
- return iter;
-}
-
-
-static inline struct util_hash_table_item *
-util_hash_table_find_item(struct util_hash_table *ht,
- void *key,
- unsigned key_hash)
-{
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
-
- iter = cso_hash_find(ht->cso, key_hash);
- while (!cso_hash_iter_is_null(iter)) {
- item = (struct util_hash_table_item *)cso_hash_iter_data(iter);
- if (!ht->compare(item->key, key))
- return item;
- iter = cso_hash_iter_next(iter);
- }
-
- return NULL;
-}
-
-
enum pipe_error
util_hash_table_set(struct util_hash_table *ht,
void *key,
void *value)
{
- unsigned key_hash;
- struct util_hash_table_item *item;
- struct cso_hash_iter iter;
+ uint32_t key_hash;
+ struct hash_entry *item;
assert(ht);
if (!ht)
return PIPE_ERROR_BAD_INPUT;
- key_hash = ht->hash(key);
+ if (!key)
+ return PIPE_ERROR_BAD_INPUT;
+
+ key_hash = ht->table.key_hash_function(key);
- item = util_hash_table_find_item(ht, key, key_hash);
+ item = _mesa_hash_table_search_pre_hashed(&ht->table, key_hash, key);
if(item) {
- ht->destroy(item->value);
- item->value = value;
+ ht->destroy(item->data);
+ item->data = value;
return PIPE_OK;
}
-
- item = MALLOC_STRUCT(util_hash_table_item);
+
+ item = _mesa_hash_table_insert_pre_hashed(&ht->table, key_hash, key, value);
if(!item)
return PIPE_ERROR_OUT_OF_MEMORY;
-
- item->key = key;
- item->value = value;
-
- iter = cso_hash_insert(ht->cso, key_hash, item);
- if(cso_hash_iter_is_null(iter)) {
- FREE(item);
- return PIPE_ERROR_OUT_OF_MEMORY;
- }
return PIPE_OK;
}
@@ -186,20 +115,20 @@ void *
util_hash_table_get(struct util_hash_table *ht,
void *key)
{
- unsigned key_hash;
- struct util_hash_table_item *item;
+ struct hash_entry *item;
assert(ht);
if (!ht)
return NULL;
- key_hash = ht->hash(key);
+ if (!key)
+ return NULL;
- item = util_hash_table_find_item(ht, key, key_hash);
+ item = _mesa_hash_table_search(&ht->table, key);
if(!item)
return NULL;
-
- return item->value;
+
+ return item->data;
}
@@ -207,46 +136,36 @@ void
util_hash_table_remove(struct util_hash_table *ht,
void *key)
{
- unsigned key_hash;
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
+ struct hash_entry *item;
assert(ht);
if (!ht)
return;
- key_hash = ht->hash(key);
+ if (!key)
+ return;
- iter = util_hash_table_find_iter(ht, key, key_hash);
- if(cso_hash_iter_is_null(iter))
+ item = _mesa_hash_table_search(&ht->table, key);
+ if (!item)
return;
-
- item = util_hash_table_item(iter);
- assert(item);
- ht->destroy(item->value);
- FREE(item);
-
- cso_hash_erase(ht->cso, iter);
+
+ ht->destroy(item->data);
+ _mesa_hash_table_remove(&ht->table, item);
}
void
util_hash_table_clear(struct util_hash_table *ht)
{
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
-
assert(ht);
if (!ht)
return;
- iter = cso_hash_first_node(ht->cso);
- while (!cso_hash_iter_is_null(iter)) {
- item = (struct util_hash_table_item *)cso_hash_take(ht->cso, cso_hash_iter_key(iter));
- ht->destroy(item->value);
- FREE(item);
- iter = cso_hash_first_node(ht->cso);
+ hash_table_foreach(&ht->table, item) {
+ ht->destroy(item->data);
}
+
+ _mesa_hash_table_clear(&ht->table, NULL);
}
@@ -256,21 +175,14 @@ util_hash_table_foreach(struct util_hash_table *ht,
(void *key, void *value, void *data),
void *data)
{
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
- enum pipe_error result;
-
assert(ht);
if (!ht)
return PIPE_ERROR_BAD_INPUT;
- iter = cso_hash_first_node(ht->cso);
- while (!cso_hash_iter_is_null(iter)) {
- item = (struct util_hash_table_item *)cso_hash_iter_data(iter);
- result = callback(item->key, item->value, data);
- if(result != PIPE_OK)
- return result;
- iter = cso_hash_iter_next(iter);
+ hash_table_foreach(&ht->table, item) {
+ enum pipe_error result = callback((void *)item->key, item->data, data);
+ if (result != PIPE_OK)
+ return result;
}
return PIPE_OK;
@@ -280,154 +192,13 @@ util_hash_table_foreach(struct util_hash_table *ht,
void
util_hash_table_destroy(struct util_hash_table *ht)
{
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
-
assert(ht);
if (!ht)
return;
- iter = cso_hash_first_node(ht->cso);
- while (!cso_hash_iter_is_null(iter)) {
- item = (struct util_hash_table_item *)cso_hash_iter_data(iter);
- ht->destroy(item->value);
- FREE(item);
- iter = cso_hash_iter_next(iter);
- }
-
- cso_hash_delete(ht->cso);
-
- FREE(ht);
-}
-
-static unsigned hash_func_pointer(void *key)
-{
- return XXH32(&key, sizeof(key), 0);
-}
-
-static int compare_func_pointer(void *key1, void *key2)
-{
- return key1 != key2;
-}
-
-static unsigned hash_func_u64(void *key)
-{
- return XXH32(key, sizeof(uint64_t), 0);
-}
-
-static int compare_func_u64(void *key1, void *key2)
-{
- return *(const uint64_t *)key1 != *(const uint64_t*)key2;
-}
-
-static bool util_hash_table_u64_uses_pointer(void)
-{
- /* return true if we can store a uint64_t in a pointer */
- return sizeof(void *) >= sizeof(uint64_t);
-}
-
-struct util_hash_table_u64 *
-util_hash_table_create_u64(void (*destroy)(void *value))
-{
- if (util_hash_table_u64_uses_pointer()) {
- return (struct util_hash_table_u64 *)
- util_hash_table_create(hash_func_pointer,
- compare_func_pointer,
- destroy);
- }
-
- return (struct util_hash_table_u64 *)
- util_hash_table_create(hash_func_u64,
- compare_func_u64,
- destroy);
-}
-
-enum pipe_error
-util_hash_table_set_u64(struct util_hash_table_u64 *ht_u64,
- uint64_t key,
- void *value)
-{
- struct util_hash_table *ht = (struct util_hash_table *)ht_u64;
- uint64_t *real_key;
- enum pipe_error err;
-
- if (util_hash_table_u64_uses_pointer())
- return util_hash_table_set(ht, uintptr_to_pointer(key), value);
-
- real_key = MALLOC(sizeof(*real_key));
- if (!real_key)
- return PIPE_ERROR_OUT_OF_MEMORY;
- *real_key = key;
-
- err = util_hash_table_set(ht, real_key, value);
- if (err != PIPE_OK)
- FREE(real_key);
-
- return err;
-}
-
-void *
-util_hash_table_get_u64(struct util_hash_table_u64 *ht_u64,
- uint64_t key)
-{
- struct util_hash_table *ht = (struct util_hash_table *)ht_u64;
-
- if (util_hash_table_u64_uses_pointer())
- return util_hash_table_get(ht, uintptr_to_pointer(key));
-
- return util_hash_table_get(ht, &key);
-}
-
-void
-util_hash_table_remove_u64(struct util_hash_table_u64 *ht_u64,
- uint64_t key)
-{
- struct util_hash_table *ht = (struct util_hash_table *)ht_u64;
- unsigned key_hash;
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
-
- if (util_hash_table_u64_uses_pointer()) {
- util_hash_table_remove(ht, uintptr_to_pointer(key));
- return;
- }
-
- key_hash = ht->hash(&key);
- iter = util_hash_table_find_iter(ht, &key, key_hash);
-
- if (cso_hash_iter_is_null(iter))
- return;
-
- item = util_hash_table_item(iter);
- ht->destroy(item->value);
- FREE(item->key);
- FREE(item);
-
- cso_hash_erase(ht->cso, iter);
-}
-
-void
-util_hash_table_destroy_u64(struct util_hash_table_u64 *ht_u64)
-{
- struct util_hash_table *ht = (struct util_hash_table *)ht_u64;
- struct cso_hash_iter iter;
- struct util_hash_table_item *item;
-
- if (util_hash_table_u64_uses_pointer()) {
- util_hash_table_destroy(ht);
- return;
+ hash_table_foreach(&ht->table, item) {
+ ht->destroy(item->data);
}
- iter = cso_hash_first_node(ht->cso);
- while (!cso_hash_iter_is_null(iter)) {
- item = util_hash_table_item(iter);
- ht->destroy(item->value);
- FREE(item->key);
- FREE(item);
- iter = cso_hash_iter_next(iter);
- }
-
- cso_hash_delete(ht->cso);
-
- FREE(ht);
+ ralloc_free(ht);
}
diff --git a/src/gallium/auxiliary/util/u_hash_table.h b/src/gallium/auxiliary/util/u_hash_table.h
index c7244208..08481570 100644
--- a/src/gallium/auxiliary/util/u_hash_table.h
+++ b/src/gallium/auxiliary/util/u_hash_table.h
@@ -47,7 +47,6 @@ extern "C" {
* Generic purpose hash table.
*/
struct util_hash_table;
-struct util_hash_table_u64;
/**
@@ -57,8 +56,8 @@ struct util_hash_table_u64;
* @param compare should return 0 for two equal keys.
*/
struct util_hash_table *
-util_hash_table_create(unsigned (*hash)(void *key),
- int (*compare)(void *key1, void *key2),
+util_hash_table_create(uint32_t (*hash)(const void *key),
+ bool (*equal)(const void *key1, const void *key2),
void (*destroy)(void *value));
@@ -91,26 +90,6 @@ void
util_hash_table_destroy(struct util_hash_table *ht);
-struct util_hash_table_u64 *
-util_hash_table_create_u64(void (*destroy)(void *value));
-
-enum pipe_error
-util_hash_table_set_u64(struct util_hash_table_u64 *ht,
- uint64_t key,
- void *value);
-
-void *
-util_hash_table_get_u64(struct util_hash_table_u64 *ht,
- uint64_t key);
-
-
-void
-util_hash_table_remove_u64(struct util_hash_table_u64 *ht,
- uint64_t key);
-
-void
-util_hash_table_destroy_u64(struct util_hash_table_u64 *ht);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/gallium/auxiliary/util/u_inlines.h b/src/gallium/auxiliary/util/u_inlines.h
index 5a27e280..8ee3319e 100644
--- a/src/gallium/auxiliary/util/u_inlines.h
+++ b/src/gallium/auxiliary/util/u_inlines.h
@@ -28,16 +28,13 @@
#ifndef U_INLINES_H
#define U_INLINES_H
-#include "pipe/p_context.h"
#include "pipe/p_defines.h"
#include "pipe/p_shader_tokens.h"
#include "pipe/p_state.h"
-#include "pipe/p_screen.h"
#include "util/u_debug.h"
#include "util/u_debug_describe.h"
#include "util/u_debug_refcnt.h"
#include "util/u_atomic.h"
-#include "util/u_box.h"
#include "util/u_math.h"
@@ -103,111 +100,6 @@ pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
(debug_reference_descriptor)debug_describe_reference);
}
-static inline void
-pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
-{
- struct pipe_surface *old_surf = *ptr;
-
- if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
- (debug_reference_descriptor)debug_describe_surface))
- old_surf->context->surface_destroy(old_surf->context, old_surf);
- *ptr = surf;
-}
-
-/**
- * Similar to pipe_surface_reference() but always set the pointer to NULL
- * and pass in an explicit context. The explicit context avoids the problem
- * of using a deleted context's surface_destroy() method when freeing a surface
- * that's shared by multiple contexts.
- */
-static inline void
-pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
-{
- if (pipe_reference_described(&(*ptr)->reference, NULL,
- (debug_reference_descriptor)debug_describe_surface))
- pipe->surface_destroy(pipe, *ptr);
- *ptr = NULL;
-}
-
-
-static inline void
-pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
-{
- struct pipe_resource *old_tex = *ptr;
-
- if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
- (debug_reference_descriptor)debug_describe_resource))
- old_tex->screen->resource_destroy(old_tex->screen, old_tex);
- *ptr = tex;
-}
-
-static inline void
-pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
-{
- struct pipe_sampler_view *old_view = *ptr;
-
- if (pipe_reference_described(&(*ptr)->reference, &view->reference,
- (debug_reference_descriptor)debug_describe_sampler_view))
- old_view->context->sampler_view_destroy(old_view->context, old_view);
- *ptr = view;
-}
-
-/**
- * Similar to pipe_sampler_view_reference() but always set the pointer to
- * NULL and pass in an explicit context. Passing an explicit context is a
- * work-around for fixing a dangling context pointer problem when textures
- * are shared by multiple contexts. XXX fix this someday.
- */
-static inline void
-pipe_sampler_view_release(struct pipe_context *ctx,
- struct pipe_sampler_view **ptr)
-{
- struct pipe_sampler_view *old_view = *ptr;
- if (*ptr && (*ptr)->context != ctx) {
- debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
- }
- if (pipe_reference_described(&(*ptr)->reference, NULL,
- (debug_reference_descriptor)debug_describe_sampler_view)) {
- ctx->sampler_view_destroy(ctx, old_view);
- }
- *ptr = NULL;
-}
-
-
-static inline void
-pipe_so_target_reference(struct pipe_stream_output_target **ptr,
- struct pipe_stream_output_target *target)
-{
- struct pipe_stream_output_target *old = *ptr;
-
- if (pipe_reference_described(&(*ptr)->reference, &target->reference,
- (debug_reference_descriptor)debug_describe_so_target))
- old->context->stream_output_target_destroy(old->context, old);
- *ptr = target;
-}
-
-static inline void
-pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
- struct pipe_resource *pt, unsigned level, unsigned layer)
-{
- pipe_resource_reference(&ps->texture, pt);
- ps->format = pt->format;
- ps->width = u_minify(pt->width0, level);
- ps->height = u_minify(pt->height0, level);
- ps->u.tex.level = level;
- ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
- ps->context = ctx;
-}
-
-static inline void
-pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
- struct pipe_resource *pt, unsigned level, unsigned layer)
-{
- ps->texture = 0;
- pipe_reference_init(&ps->reference, 1);
- pipe_surface_reset(ctx, ps, pt, level, layer);
-}
-
/* Return true if the surfaces are equal. */
static inline boolean
pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
@@ -223,281 +115,6 @@ pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
s1->u.tex.last_layer == s2->u.tex.last_layer));
}
-/*
- * Convenience wrappers for screen buffer functions.
- */
-
-
-/**
- * Create a new resource.
- * \param bind bitmask of PIPE_BIND_x flags
- * \param usage bitmask of PIPE_USAGE_x flags
- */
-static inline struct pipe_resource *
-pipe_buffer_create( struct pipe_screen *screen,
- unsigned bind,
- unsigned usage,
- unsigned size )
-{
- struct pipe_resource buffer;
- memset(&buffer, 0, sizeof buffer);
- buffer.target = PIPE_BUFFER;
- buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
- buffer.bind = bind;
- buffer.usage = usage;
- buffer.flags = 0;
- buffer.width0 = size;
- buffer.height0 = 1;
- buffer.depth0 = 1;
- buffer.array_size = 1;
- return screen->resource_create(screen, &buffer);
-}
-
-
-/**
- * Map a range of a resource.
- * \param offset start of region, in bytes
- * \param length size of region, in bytes
- * \param access bitmask of PIPE_TRANSFER_x flags
- * \param transfer returns a transfer object
- */
-static inline void *
-pipe_buffer_map_range(struct pipe_context *pipe,
- struct pipe_resource *buffer,
- unsigned offset,
- unsigned length,
- unsigned access,
- struct pipe_transfer **transfer)
-{
- struct pipe_box box;
- void *map;
-
- assert(offset < buffer->width0);
- assert(offset + length <= buffer->width0);
- assert(length);
-
- u_box_1d(offset, length, &box);
-
- map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
- if (map == NULL) {
- return NULL;
- }
-
- return map;
-}
-
-
-/**
- * Map whole resource.
- * \param access bitmask of PIPE_TRANSFER_x flags
- * \param transfer returns a transfer object
- */
-static inline void *
-pipe_buffer_map(struct pipe_context *pipe,
- struct pipe_resource *buffer,
- unsigned access,
- struct pipe_transfer **transfer)
-{
- return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
-}
-
-
-static inline void
-pipe_buffer_unmap(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
-{
- pipe->transfer_unmap(pipe, transfer);
-}
-
-static inline void
-pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
- struct pipe_transfer *transfer,
- unsigned offset,
- unsigned length)
-{
- struct pipe_box box;
- int transfer_offset;
-
- assert(length);
- assert(transfer->box.x <= (int) offset);
- assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
-
- /* Match old screen->buffer_flush_mapped_range() behaviour, where
- * offset parameter is relative to the start of the buffer, not the
- * mapped range.
- */
- transfer_offset = offset - transfer->box.x;
-
- u_box_1d(transfer_offset, length, &box);
-
- pipe->transfer_flush_region(pipe, transfer, &box);
-}
-
-static inline void
-pipe_buffer_write(struct pipe_context *pipe,
- struct pipe_resource *buf,
- unsigned offset,
- unsigned size,
- const void *data)
-{
- struct pipe_box box;
- unsigned access = PIPE_TRANSFER_WRITE;
-
- if (offset == 0 && size == buf->width0) {
- access |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
- } else {
- access |= PIPE_TRANSFER_DISCARD_RANGE;
- }
-
- u_box_1d(offset, size, &box);
-
- pipe->transfer_inline_write( pipe,
- buf,
- 0,
- access,
- &box,
- data,
- size,
- 0);
-}
-
-/**
- * Special case for writing non-overlapping ranges.
- *
- * We can avoid GPU/CPU synchronization when writing range that has never
- * been written before.
- */
-static inline void
-pipe_buffer_write_nooverlap(struct pipe_context *pipe,
- struct pipe_resource *buf,
- unsigned offset, unsigned size,
- const void *data)
-{
- struct pipe_box box;
-
- u_box_1d(offset, size, &box);
-
- pipe->transfer_inline_write(pipe,
- buf,
- 0,
- (PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED),
- &box,
- data,
- 0, 0);
-}
-
-
-/**
- * Create a new resource and immediately put data into it
- * \param bind bitmask of PIPE_BIND_x flags
- * \param usage bitmask of PIPE_USAGE_x flags
- */
-static inline struct pipe_resource *
-pipe_buffer_create_with_data(struct pipe_context *pipe,
- unsigned bind,
- unsigned usage,
- unsigned size,
- const void *ptr)
-{
- struct pipe_resource *res = pipe_buffer_create(pipe->screen,
- bind, usage, size);
- pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
- return res;
-}
-
-static inline void
-pipe_buffer_read(struct pipe_context *pipe,
- struct pipe_resource *buf,
- unsigned offset,
- unsigned size,
- void *data)
-{
- struct pipe_transfer *src_transfer;
- ubyte *map;
-
- map = (ubyte *) pipe_buffer_map_range(pipe,
- buf,
- offset, size,
- PIPE_TRANSFER_READ,
- &src_transfer);
- if (!map)
- return;
-
- memcpy(data, map, size);
- pipe_buffer_unmap(pipe, src_transfer);
-}
-
-
-/**
- * Map a resource for reading/writing.
- * \param access bitmask of PIPE_TRANSFER_x flags
- */
-static inline void *
-pipe_transfer_map(struct pipe_context *context,
- struct pipe_resource *resource,
- unsigned level, unsigned layer,
- unsigned access,
- unsigned x, unsigned y,
- unsigned w, unsigned h,
- struct pipe_transfer **transfer)
-{
- struct pipe_box box;
- u_box_2d_zslice(x, y, layer, w, h, &box);
- return context->transfer_map(context,
- resource,
- level,
- access,
- &box, transfer);
-}
-
-
-/**
- * Map a 3D (texture) resource for reading/writing.
- * \param access bitmask of PIPE_TRANSFER_x flags
- */
-static inline void *
-pipe_transfer_map_3d(struct pipe_context *context,
- struct pipe_resource *resource,
- unsigned level,
- unsigned access,
- unsigned x, unsigned y, unsigned z,
- unsigned w, unsigned h, unsigned d,
- struct pipe_transfer **transfer)
-{
- struct pipe_box box;
- u_box_3d(x, y, z, w, h, d, &box);
- return context->transfer_map(context,
- resource,
- level,
- access,
- &box, transfer);
-}
-
-static inline void
-pipe_transfer_unmap( struct pipe_context *context,
- struct pipe_transfer *transfer )
-{
- context->transfer_unmap( context, transfer );
-}
-
-static inline void
-pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
- struct pipe_resource *buf)
-{
- if (buf) {
- struct pipe_constant_buffer cb;
- cb.buffer = buf;
- cb.buffer_offset = 0;
- cb.buffer_size = buf->width0;
- cb.user_buffer = NULL;
- pipe->set_constant_buffer(pipe, shader, index, &cb);
- } else {
- pipe->set_constant_buffer(pipe, shader, index, NULL);
- }
-}
-
-
/**
* Get the polygon offset enable/disable flag for the given polygon fill mode.
* \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
@@ -602,24 +219,6 @@ util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
}
-static inline void
-util_copy_constant_buffer(struct pipe_constant_buffer *dst,
- const struct pipe_constant_buffer *src)
-{
- if (src) {
- pipe_resource_reference(&dst->buffer, src->buffer);
- dst->buffer_offset = src->buffer_offset;
- dst->buffer_size = src->buffer_size;
- dst->user_buffer = src->user_buffer;
- }
- else {
- pipe_resource_reference(&dst->buffer, NULL);
- dst->buffer_offset = 0;
- dst->buffer_size = 0;
- dst->user_buffer = NULL;
- }
-}
-
static inline unsigned
util_max_layer(const struct pipe_resource *r, unsigned level)
{
diff --git a/src/gallium/auxiliary/util/u_math.c b/src/gallium/auxiliary/util/u_math.c
deleted file mode 100644
index 79c31e1a..00000000
--- a/src/gallium/auxiliary/util/u_math.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-
-#include "pipe/p_config.h"
-#include "util/u_math.h"
-#include "util/u_cpu_detect.h"
-
-#if defined(PIPE_ARCH_SSE)
-#include <xmmintrin.h>
-/* This is defined in pmmintrin.h, but it can only be included when -msse3 is
- * used, so just define it here to avoid further. */
-#ifndef _MM_DENORMALS_ZERO_MASK
-#define _MM_DENORMALS_ZERO_MASK 0x0040
-#endif
-#endif
-
-#if 0
-/** 2^x, for x in [-1.0, 1.0) */
-float pow2_table[POW2_TABLE_SIZE];
-
-
-static void
-init_pow2_table(void)
-{
- int i;
- for (i = 0; i < POW2_TABLE_SIZE; i++)
- pow2_table[i] = (float) pow(2.0, (i - POW2_TABLE_OFFSET) / POW2_TABLE_SCALE);
-}
-
-
-/** log2(x), for x in [1.0, 2.0) */
-float log2_table[LOG2_TABLE_SIZE];
-
-
-static void
-init_log2_table(void)
-{
- unsigned i;
- for (i = 0; i < LOG2_TABLE_SIZE; i++)
- log2_table[i] = (float) log2(1.0 + i * (1.0 / LOG2_TABLE_SCALE));
-}
-#endif
-
-/**
- * One time init for math utilities.
- */
-void
-util_init_math(void)
-{
- static boolean initialized = FALSE;
- if (!initialized) {
- // init_pow2_table();
- /* init_log2_table();*/
- initialized = TRUE;
- }
-}
-
-/**
- * Fetches the contents of the fpstate (mxcsr on x86) register.
- *
- * On platforms without support for it just returns 0.
- */
-unsigned
-util_fpstate_get(void)
-{
- unsigned mxcsr = 0;
-
-#if defined(PIPE_ARCH_SSE)
- if (util_cpu_caps.has_sse) {
- mxcsr = _mm_getcsr();
- }
-#endif
-
- return mxcsr;
-}
-
-/**
- * Make sure that the fp treats the denormalized floating
- * point numbers as zero.
- *
- * This is the behavior required by D3D10. OpenGL doesn't care.
- */
-unsigned
-util_fpstate_set_denorms_to_zero(unsigned current_mxcsr)
-{
-#if defined(PIPE_ARCH_SSE)
- if (util_cpu_caps.has_sse) {
- /* Enable flush to zero mode */
- current_mxcsr |= _MM_FLUSH_ZERO_MASK;
- if (util_cpu_caps.has_daz) {
- /* Enable denormals are zero mode */
- current_mxcsr |= _MM_DENORMALS_ZERO_MASK;
- }
- util_fpstate_set(current_mxcsr);
- }
-#endif
- return current_mxcsr;
-}
-
-/**
- * Set the state of the fpstate (mxcsr on x86) register.
- *
- * On platforms without support for it's a noop.
- */
-void
-util_fpstate_set(unsigned mxcsr)
-{
-#if defined(PIPE_ARCH_SSE)
- if (util_cpu_caps.has_sse) {
- _mm_setcsr(mxcsr);
- }
-#endif
-}
diff --git a/src/gallium/auxiliary/util/u_pack_color.h b/src/gallium/auxiliary/util/u_pack_color.h
deleted file mode 100644
index 93e2d50b..00000000
--- a/src/gallium/auxiliary/util/u_pack_color.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * Functions to produce packed colors/Z from floats.
- */
-
-
-#ifndef U_PACK_COLOR_H
-#define U_PACK_COLOR_H
-
-
-#include "pipe/p_compiler.h"
-#include "pipe/p_format.h"
-#include "util/u_debug.h"
-#include "util/u_format.h"
-#include "util/u_math.h"
-
-
-/**
- * Helper union for packing pixel values.
- * Will often contain values in formats which are too complex to be described
- * in simple terms, hence might just effectively contain a number of bytes.
- * Must be big enough to hold data for all formats (currently 256 bits).
- */
-union util_color {
- ubyte ub;
- ushort us;
- uint ui[4];
- ushort h[4]; /* half float */
- float f[4];
- double d[4];
-};
-
-/**
- * Pack 4 ubytes into a 4-byte word
- */
-static inline unsigned
-pack_ub4(ubyte b0, ubyte b1, ubyte b2, ubyte b3)
-{
- return ((((unsigned int)b0) << 0) |
- (((unsigned int)b1) << 8) |
- (((unsigned int)b2) << 16) |
- (((unsigned int)b3) << 24));
-}
-
-
-/**
- * Pack/convert 4 floats into one 4-byte word.
- */
-static inline unsigned
-pack_ui32_float4(float a, float b, float c, float d)
-{
- return pack_ub4( float_to_ubyte(a),
- float_to_ubyte(b),
- float_to_ubyte(c),
- float_to_ubyte(d) );
-}
-
-
-
-#endif /* U_PACK_COLOR_H */
diff --git a/src/gallium/auxiliary/util/u_rect.h b/src/gallium/auxiliary/util/u_rect.h
deleted file mode 100644
index b26f671f..00000000
--- a/src/gallium/auxiliary/util/u_rect.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#ifndef U_RECT_H
-#define U_RECT_H
-
-#include "pipe/p_compiler.h"
-#include "util/u_math.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct u_rect {
- int x0, x1;
- int y0, y1;
-};
-
-/* Do two rectangles intersect?
- */
-static inline boolean
-u_rect_test_intersection(const struct u_rect *a,
- const struct u_rect *b)
-{
- return (!(a->x1 < b->x0 ||
- b->x1 < a->x0 ||
- a->y1 < b->y0 ||
- b->y1 < a->y0));
-}
-
-/* Find the intersection of two rectangles known to intersect.
- */
-static inline void
-u_rect_find_intersection(const struct u_rect *a,
- struct u_rect *b)
-{
- /* Caller should verify intersection exists before calling.
- */
- if (b->x0 < a->x0) b->x0 = a->x0;
- if (b->x1 > a->x1) b->x1 = a->x1;
- if (b->y0 < a->y0) b->y0 = a->y0;
- if (b->y1 > a->y1) b->y1 = a->y1;
-}
-
-
-static inline int
-u_rect_area(const struct u_rect *r)
-{
- return (r->x1 - r->x0) * (r->y1 - r->y0);
-}
-
-static inline void
-u_rect_possible_intersection(const struct u_rect *a,
- struct u_rect *b)
-{
- if (u_rect_test_intersection(a,b)) {
- u_rect_find_intersection(a,b);
- }
- else {
- b->x0 = b->x1 = b->y0 = b->y1 = 0;
- }
-}
-
-/* Set @d to a rectangle that covers both @a and @b.
- */
-static inline void
-u_rect_union(struct u_rect *d, const struct u_rect *a, const struct u_rect *b)
-{
- d->x0 = MIN2(a->x0, b->x0);
- d->y0 = MIN2(a->y0, b->y0);
- d->x1 = MAX2(a->x1, b->x1);
- d->y1 = MAX2(a->y1, b->y1);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* U_RECT_H */
diff --git a/src/gallium/auxiliary/util/u_string.h b/src/gallium/auxiliary/util/u_string.h
deleted file mode 100644
index da3e2675..00000000
--- a/src/gallium/auxiliary/util/u_string.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * Platform independent functions for string manipulation.
- *
- * @author Jose Fonseca <jfonseca@vmware.com>
- */
-
-#ifndef U_STRING_H_
-#define U_STRING_H_
-
-#if !defined(_MSC_VER) && !defined(XF86_LIBC_H)
-#include <stdio.h>
-#endif
-#include <stddef.h>
-#include <stdarg.h>
-
-#include "pipe/p_compiler.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef _GNU_SOURCE
-
-#define util_strchrnul strchrnul
-
-#else
-
-static inline char *
-util_strchrnul(const char *s, char c)
-{
- for (; *s && *s != c; ++s);
-
- return (char *)s;
-}
-
-#endif
-
-#ifdef _MSC_VER
-
-int util_vsnprintf(char *, size_t, const char *, va_list);
-int util_snprintf(char *str, size_t size, const char *format, ...);
-
-static inline void
-util_vsprintf(char *str, const char *format, va_list ap)
-{
- util_vsnprintf(str, (size_t)-1, format, ap);
-}
-
-static inline void
-util_sprintf(char *str, const char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- util_vsnprintf(str, (size_t)-1, format, ap);
- va_end(ap);
-}
-
-static inline char *
-util_strchr(const char *s, char c)
-{
- char *p = util_strchrnul(s, c);
-
- return *p ? p : NULL;
-}
-
-static inline char*
-util_strncat(char *dst, const char *src, size_t n)
-{
- char *p = dst + strlen(dst);
- const char *q = src;
- size_t i;
-
- for (i = 0; i < n && *q != '\0'; ++i)
- *p++ = *q++;
- *p = '\0';
-
- return dst;
-}
-
-static inline int
-util_strcmp(const char *s1, const char *s2)
-{
- unsigned char u1, u2;
-
- while (1) {
- u1 = (unsigned char) *s1++;
- u2 = (unsigned char) *s2++;
- if (u1 != u2)
- return u1 - u2;
- if (u1 == '\0')
- return 0;
- }
- return 0;
-}
-
-static inline int
-util_strncmp(const char *s1, const char *s2, size_t n)
-{
- unsigned char u1, u2;
-
- while (n-- > 0) {
- u1 = (unsigned char) *s1++;
- u2 = (unsigned char) *s2++;
- if (u1 != u2)
- return u1 - u2;
- if (u1 == '\0')
- return 0;
- }
- return 0;
-}
-
-static inline char *
-util_strstr(const char *haystack, const char *needle)
-{
- const char *p = haystack;
- size_t len = strlen(needle);
-
- for (; (p = util_strchr(p, *needle)) != 0; p++) {
- if (util_strncmp(p, needle, len) == 0) {
- return (char *)p;
- }
- }
- return NULL;
-}
-
-static inline void *
-util_memmove(void *dest, const void *src, size_t n)
-{
- char *p = (char *)dest;
- const char *q = (const char *)src;
- if (dest < src) {
- while (n--)
- *p++ = *q++;
- }
- else
- {
- p += n;
- q += n;
- while (n--)
- *--p = *--q;
- }
- return dest;
-}
-
-
-#else
-
-#define util_vsnprintf vsnprintf
-#define util_snprintf snprintf
-#define util_vsprintf vsprintf
-#define util_sprintf sprintf
-#define util_strchr strchr
-#define util_strcmp strcmp
-#define util_strncmp strncmp
-#define util_strncat strncat
-#define util_strstr strstr
-#define util_memmove memmove
-
-#endif
-
-
-/**
- * Printable string buffer
- */
-struct util_strbuf
-{
- char *str;
- char *ptr;
- size_t left;
-};
-
-
-static inline void
-util_strbuf_init(struct util_strbuf *sbuf, char *str, size_t size)
-{
- sbuf->str = str;
- sbuf->str[0] = 0;
- sbuf->ptr = sbuf->str;
- sbuf->left = size;
-}
-
-
-static inline void
-util_strbuf_printf(struct util_strbuf *sbuf, const char *format, ...)
-{
- if(sbuf->left > 1) {
- size_t written;
- va_list ap;
- va_start(ap, format);
- written = util_vsnprintf(sbuf->ptr, sbuf->left, format, ap);
- va_end(ap);
- sbuf->ptr += written;
- sbuf->left -= written;
- }
-}
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* U_STRING_H_ */
diff --git a/src/gallium/auxiliary/util/u_surface.c b/src/gallium/auxiliary/util/u_surface.c
deleted file mode 100644
index ca274812..00000000
--- a/src/gallium/auxiliary/util/u_surface.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 VMware, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- * Surface utility functions.
- *
- * @author Brian Paul
- */
-
-
-#include "pipe/p_defines.h"
-#include "pipe/p_screen.h"
-#include "pipe/p_state.h"
-
-#include "util/u_format.h"
-#include "util/u_inlines.h"
-#include "util/u_rect.h"
-#include "util/u_surface.h"
-#include "util/u_pack_color.h"
-
-
-/**
- * Initialize a pipe_surface object. 'view' is considered to have
- * uninitialized contents.
- */
-void
-u_surface_default_template(struct pipe_surface *surf,
- const struct pipe_resource *texture)
-{
- memset(surf, 0, sizeof(*surf));
-
- surf->format = texture->format;
-}
-
-
-/**
- * Copy 2D rect from one place to another.
- * Position and sizes are in pixels.
- * src_stride may be negative to do vertical flip of pixels from source.
- */
-void
-util_copy_rect(ubyte * dst,
- enum pipe_format format,
- unsigned dst_stride,
- unsigned dst_x,
- unsigned dst_y,
- unsigned width,
- unsigned height,
- const ubyte * src,
- int src_stride,
- unsigned src_x,
- unsigned src_y)
-{
- unsigned i;
- int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
- int blocksize = util_format_get_blocksize(format);
- int blockwidth = util_format_get_blockwidth(format);
- int blockheight = util_format_get_blockheight(format);
-
- assert(blocksize > 0);
- assert(blockwidth > 0);
- assert(blockheight > 0);
-
- dst_x /= blockwidth;
- dst_y /= blockheight;
- width = (width + blockwidth - 1)/blockwidth;
- height = (height + blockheight - 1)/blockheight;
- src_x /= blockwidth;
- src_y /= blockheight;
-
- dst += dst_x * blocksize;
- src += src_x * blocksize;
- dst += dst_y * dst_stride;
- src += src_y * src_stride_pos;
- width *= blocksize;
-
- if (width == dst_stride && (int)width == src_stride)
- memcpy(dst, src, height * width);
- else {
- for (i = 0; i < height; i++) {
- memcpy(dst, src, width);
- dst += dst_stride;
- src += src_stride;
- }
- }
-}
-
-
-/**
- * Copy 3D box from one place to another.
- * Position and sizes are in pixels.
- */
-void
-util_copy_box(ubyte * dst,
- enum pipe_format format,
- unsigned dst_stride, unsigned dst_slice_stride,
- unsigned dst_x, unsigned dst_y, unsigned dst_z,
- unsigned width, unsigned height, unsigned depth,
- const ubyte * src,
- int src_stride, unsigned src_slice_stride,
- unsigned src_x, unsigned src_y, unsigned src_z)
-{
- unsigned z;
- dst += dst_z * dst_slice_stride;
- src += src_z * src_slice_stride;
- for (z = 0; z < depth; ++z) {
- util_copy_rect(dst,
- format,
- dst_stride,
- dst_x, dst_y,
- width, height,
- src,
- src_stride,
- src_x, src_y);
-
- dst += dst_slice_stride;
- src += src_slice_stride;
- }
-}
-
-
-void
-util_fill_rect(ubyte * dst,
- enum pipe_format format,
- unsigned dst_stride,
- unsigned dst_x,
- unsigned dst_y,
- unsigned width,
- unsigned height,
- union util_color *uc)
-{
- const struct util_format_description *desc = util_format_description(format);
- unsigned i, j;
- unsigned width_size;
- int blocksize = desc->block.bits / 8;
- int blockwidth = desc->block.width;
- int blockheight = desc->block.height;
-
- assert(blocksize > 0);
- assert(blockwidth > 0);
- assert(blockheight > 0);
-
- dst_x /= blockwidth;
- dst_y /= blockheight;
- width = (width + blockwidth - 1)/blockwidth;
- height = (height + blockheight - 1)/blockheight;
-
- dst += dst_x * blocksize;
- dst += dst_y * dst_stride;
- width_size = width * blocksize;
-
- switch (blocksize) {
- case 1:
- if(dst_stride == width_size)
- memset(dst, uc->ub, height * width_size);
- else {
- for (i = 0; i < height; i++) {
- memset(dst, uc->ub, width_size);
- dst += dst_stride;
- }
- }
- break;
- case 2:
- for (i = 0; i < height; i++) {
- uint16_t *row = (uint16_t *)dst;
- for (j = 0; j < width; j++)
- *row++ = uc->us;
- dst += dst_stride;
- }
- break;
- case 4:
- for (i = 0; i < height; i++) {
- uint32_t *row = (uint32_t *)dst;
- for (j = 0; j < width; j++)
- *row++ = uc->ui[0];
- dst += dst_stride;
- }
- break;
- default:
- for (i = 0; i < height; i++) {
- ubyte *row = dst;
- for (j = 0; j < width; j++) {
- memcpy(row, uc, blocksize);
- row += blocksize;
- }
- dst += dst_stride;
- }
- break;
- }
-}
-
-
-void
-util_fill_box(ubyte * dst,
- enum pipe_format format,
- unsigned stride,
- unsigned layer_stride,
- unsigned x,
- unsigned y,
- unsigned z,
- unsigned width,
- unsigned height,
- unsigned depth,
- union util_color *uc)
-{
- unsigned layer;
- dst += z * layer_stride;
- for (layer = z; layer < depth; layer++) {
- util_fill_rect(dst, format,
- stride,
- x, y, width, height, uc);
- dst += layer_stride;
- }
-}
-
-
-/**
- * Fallback function for pipe->resource_copy_region().
- * Note: (X,Y)=(0,0) is always the upper-left corner.
- */
-void
-util_resource_copy_region(struct pipe_context *pipe,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dst_x, unsigned dst_y, unsigned dst_z,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box)
-{
- struct pipe_transfer *src_trans, *dst_trans;
- uint8_t *dst_map;
- const uint8_t *src_map;
- MAYBE_UNUSED enum pipe_format src_format;
- enum pipe_format dst_format;
- struct pipe_box dst_box;
-
- assert(src && dst);
- if (!src || !dst)
- return;
-
- assert((src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER) ||
- (src->target != PIPE_BUFFER && dst->target != PIPE_BUFFER));
-
- src_format = src->format;
- dst_format = dst->format;
-
- assert(util_format_get_blocksize(dst_format) == util_format_get_blocksize(src_format));
- assert(util_format_get_blockwidth(dst_format) == util_format_get_blockwidth(src_format));
- assert(util_format_get_blockheight(dst_format) == util_format_get_blockheight(src_format));
-
- src_map = pipe->transfer_map(pipe,
- src,
- src_level,
- PIPE_TRANSFER_READ,
- src_box, &src_trans);
- assert(src_map);
- if (!src_map) {
- goto no_src_map;
- }
-
- dst_box.x = dst_x;
- dst_box.y = dst_y;
- dst_box.z = dst_z;
- dst_box.width = src_box->width;
- dst_box.height = src_box->height;
- dst_box.depth = src_box->depth;
-
- dst_map = pipe->transfer_map(pipe,
- dst,
- dst_level,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
- &dst_box, &dst_trans);
- assert(dst_map);
- if (!dst_map) {
- goto no_dst_map;
- }
-
- if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
- assert(src_box->height == 1);
- assert(src_box->depth == 1);
- memcpy(dst_map, src_map, src_box->width);
- } else {
- util_copy_box(dst_map,
- dst_format,
- dst_trans->stride, dst_trans->layer_stride,
- 0, 0, 0,
- src_box->width, src_box->height, src_box->depth,
- src_map,
- src_trans->stride, src_trans->layer_stride,
- 0, 0, 0);
- }
-
- pipe->transfer_unmap(pipe, dst_trans);
-no_dst_map:
- pipe->transfer_unmap(pipe, src_trans);
-no_src_map:
- ;
-}
-
-
-
-#define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
-
-
-/* Return if the box is totally inside the resource.
- */
-static boolean
-is_box_inside_resource(const struct pipe_resource *res,
- const struct pipe_box *box,
- unsigned level)
-{
- unsigned width = 1, height = 1, depth = 1;
-
- switch (res->target) {
- case PIPE_BUFFER:
- width = res->width0;
- height = 1;
- depth = 1;
- break;
- case PIPE_TEXTURE_1D:
- width = u_minify(res->width0, level);
- height = 1;
- depth = 1;
- break;
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_RECT:
- width = u_minify(res->width0, level);
- height = u_minify(res->height0, level);
- depth = 1;
- break;
- case PIPE_TEXTURE_3D:
- width = u_minify(res->width0, level);
- height = u_minify(res->height0, level);
- depth = u_minify(res->depth0, level);
- break;
- case PIPE_TEXTURE_CUBE:
- width = u_minify(res->width0, level);
- height = u_minify(res->height0, level);
- depth = 6;
- break;
- case PIPE_TEXTURE_1D_ARRAY:
- width = u_minify(res->width0, level);
- height = 1;
- depth = res->array_size;
- break;
- case PIPE_TEXTURE_2D_ARRAY:
- width = u_minify(res->width0, level);
- height = u_minify(res->height0, level);
- depth = res->array_size;
- break;
- case PIPE_TEXTURE_CUBE_ARRAY:
- width = u_minify(res->width0, level);
- height = u_minify(res->height0, level);
- depth = res->array_size;
- assert(res->array_size % 6 == 0);
- break;
- case PIPE_MAX_TEXTURE_TYPES:;
- }
-
- return box->x >= 0 &&
- box->x + box->width <= (int) width &&
- box->y >= 0 &&
- box->y + box->height <= (int) height &&
- box->z >= 0 &&
- box->z + box->depth <= (int) depth;
-}
-
-static unsigned
-get_sample_count(const struct pipe_resource *res)
-{
- return res->nr_samples ? res->nr_samples : 1;
-}
-
-/**
- * Try to do a blit using resource_copy_region. The function calls
- * resource_copy_region if the blit description is compatible with it.
- *
- * It returns TRUE if the blit was done using resource_copy_region.
- *
- * It returns FALSE otherwise and the caller must fall back to a more generic
- * codepath for the blit operation. (e.g. by using u_blitter)
- */
-boolean
-util_try_blit_via_copy_region(struct pipe_context *ctx,
- const struct pipe_blit_info *blit)
-{
- unsigned mask = util_format_get_mask(blit->dst.format);
-
- /* No format conversions. */
- if (blit->src.resource->format != blit->src.format ||
- blit->dst.resource->format != blit->dst.format ||
- !util_is_format_compatible(
- util_format_description(blit->src.resource->format),
- util_format_description(blit->dst.resource->format))) {
- return FALSE;
- }
-
- /* No masks, no filtering, no scissor. */
- if ((blit->mask & mask) != mask ||
- blit->filter != PIPE_TEX_FILTER_NEAREST ||
- blit->scissor_enable) {
- return FALSE;
- }
-
- /* No flipping. */
- if (blit->src.box.width < 0 ||
- blit->src.box.height < 0 ||
- blit->src.box.depth < 0) {
- return FALSE;
- }
-
- /* No scaling. */
- if (blit->src.box.width != blit->dst.box.width ||
- blit->src.box.height != blit->dst.box.height ||
- blit->src.box.depth != blit->dst.box.depth) {
- return FALSE;
- }
-
- /* No out-of-bounds access. */
- if (!is_box_inside_resource(blit->src.resource, &blit->src.box,
- blit->src.level) ||
- !is_box_inside_resource(blit->dst.resource, &blit->dst.box,
- blit->dst.level)) {
- return FALSE;
- }
-
- /* Sample counts must match. */
- if (get_sample_count(blit->src.resource) !=
- get_sample_count(blit->dst.resource)) {
- return FALSE;
- }
-
- ctx->resource_copy_region(ctx, blit->dst.resource, blit->dst.level,
- blit->dst.box.x, blit->dst.box.y, blit->dst.box.z,
- blit->src.resource, blit->src.level,
- &blit->src.box);
- return TRUE;
-}
diff --git a/src/gallium/auxiliary/util/u_surface.h b/src/gallium/auxiliary/util/u_surface.h
deleted file mode 100644
index bfd8f40d..00000000
--- a/src/gallium/auxiliary/util/u_surface.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2009 VMware, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#ifndef U_SURFACE_H
-#define U_SURFACE_H
-
-
-#include "pipe/p_compiler.h"
-#include "pipe/p_state.h"
-
-#include "util/u_pack_color.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-extern void
-u_surface_default_template(struct pipe_surface *view,
- const struct pipe_resource *texture);
-
-extern void
-util_copy_rect(ubyte * dst, enum pipe_format format,
- unsigned dst_stride, unsigned dst_x, unsigned dst_y,
- unsigned width, unsigned height, const ubyte * src,
- int src_stride, unsigned src_x, unsigned src_y);
-
-extern void
-util_copy_box(ubyte * dst,
- enum pipe_format format,
- unsigned dst_stride, unsigned dst_slice_stride,
- unsigned dst_x, unsigned dst_y, unsigned dst_z,
- unsigned width, unsigned height, unsigned depth,
- const ubyte * src,
- int src_stride, unsigned src_slice_stride,
- unsigned src_x, unsigned src_y, unsigned src_z);
-
-extern void
-util_fill_rect(ubyte * dst, enum pipe_format format,
- unsigned dst_stride, unsigned dst_x, unsigned dst_y,
- unsigned width, unsigned height, union util_color *uc);
-
-extern void
-util_fill_box(ubyte * dst, enum pipe_format format,
- unsigned stride, unsigned layer_stride,
- unsigned x, unsigned y, unsigned z,
- unsigned width, unsigned height, unsigned depth,
- union util_color *uc);
-
-
-extern void
-util_resource_copy_region(struct pipe_context *pipe,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dst_x, unsigned dst_y, unsigned dst_z,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box);
-
-extern void
-util_clear_render_target(struct pipe_context *pipe,
- struct pipe_surface *dst,
- const union pipe_color_union *color,
- unsigned dstx, unsigned dsty,
- unsigned width, unsigned height);
-
-extern void
-util_clear_depth_stencil(struct pipe_context *pipe,
- struct pipe_surface *dst,
- unsigned clear_flags,
- double depth,
- unsigned stencil,
- unsigned dstx, unsigned dsty,
- unsigned width, unsigned height);
-
-extern boolean
-util_try_blit_via_copy_region(struct pipe_context *ctx,
- const struct pipe_blit_info *blit);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /* U_SURFACE_H */
diff --git a/src/gallium/include/pipe/p_context.h b/src/gallium/include/pipe/p_context.h
deleted file mode 100644
index 0702729e..00000000
--- a/src/gallium/include/pipe/p_context.h
+++ /dev/null
@@ -1,546 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2007 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef PIPE_CONTEXT_H
-#define PIPE_CONTEXT_H
-
-#include "p_compiler.h"
-#include "p_format.h"
-#include "p_video_enums.h"
-#include "p_defines.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-struct pipe_blend_color;
-struct pipe_blend_state;
-struct pipe_blit_info;
-struct pipe_box;
-struct pipe_clip_state;
-struct pipe_constant_buffer;
-struct pipe_depth_stencil_alpha_state;
-struct pipe_draw_info;
-struct pipe_fence_handle;
-struct pipe_framebuffer_state;
-struct pipe_index_buffer;
-struct pipe_query;
-struct pipe_poly_stipple;
-struct pipe_rasterizer_state;
-struct pipe_resolve_info;
-struct pipe_resource;
-struct pipe_sampler_state;
-struct pipe_sampler_view;
-struct pipe_scissor_state;
-struct pipe_shader_state;
-struct pipe_stencil_ref;
-struct pipe_stream_output_target;
-struct pipe_surface;
-struct pipe_transfer;
-struct pipe_vertex_buffer;
-struct pipe_vertex_element;
-struct pipe_video_buffer;
-struct pipe_video_codec;
-struct pipe_viewport_state;
-struct pipe_compute_state;
-union pipe_color_union;
-union pipe_query_result;
-
-/**
- * Gallium rendering context. Basically:
- * - state setting functions
- * - VBO drawing functions
- * - surface functions
- */
-struct pipe_context {
- struct pipe_screen *screen;
-
- void *priv; /**< context private data (for DRI for example) */
- void *draw; /**< private, for draw module (temporary?) */
-
- void (*destroy)( struct pipe_context * );
-
- /**
- * VBO drawing
- */
- /*@{*/
- void (*draw_vbo)( struct pipe_context *pipe,
- const struct pipe_draw_info *info );
- /*@}*/
-
- /**
- * Predicate subsequent rendering on occlusion query result
- * \param query the query predicate, or NULL if no predicate
- * \param condition whether to skip on FALSE or TRUE query results
- * \param mode one of PIPE_RENDER_COND_x
- */
- void (*render_condition)( struct pipe_context *pipe,
- struct pipe_query *query,
- boolean condition,
- uint mode );
-
- /**
- * Query objects
- */
- /*@{*/
- struct pipe_query *(*create_query)( struct pipe_context *pipe,
- unsigned query_type );
-
- void (*destroy_query)(struct pipe_context *pipe,
- struct pipe_query *q);
-
- void (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
- void (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
-
- /**
- * Get results of a query.
- * \param wait if true, this query will block until the result is ready
- * \return TRUE if results are ready, FALSE otherwise
- */
- boolean (*get_query_result)(struct pipe_context *pipe,
- struct pipe_query *q,
- boolean wait,
- union pipe_query_result *result);
- /*@}*/
-
- /**
- * State functions (create/bind/destroy state objects)
- */
- /*@{*/
- void * (*create_blend_state)(struct pipe_context *,
- const struct pipe_blend_state *);
- void (*bind_blend_state)(struct pipe_context *, void *);
- void (*delete_blend_state)(struct pipe_context *, void *);
-
- void * (*create_sampler_state)(struct pipe_context *,
- const struct pipe_sampler_state *);
- void (*bind_sampler_states)(struct pipe_context *,
- unsigned shader, unsigned start_slot,
- unsigned num_samplers, void **samplers);
- void (*delete_sampler_state)(struct pipe_context *, void *);
-
- void * (*create_rasterizer_state)(struct pipe_context *,
- const struct pipe_rasterizer_state *);
- void (*bind_rasterizer_state)(struct pipe_context *, void *);
- void (*delete_rasterizer_state)(struct pipe_context *, void *);
-
- void * (*create_depth_stencil_alpha_state)(struct pipe_context *,
- const struct pipe_depth_stencil_alpha_state *);
- void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *);
- void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *);
-
- void * (*create_fs_state)(struct pipe_context *,
- const struct pipe_shader_state *);
- void (*bind_fs_state)(struct pipe_context *, void *);
- void (*delete_fs_state)(struct pipe_context *, void *);
-
- void * (*create_vs_state)(struct pipe_context *,
- const struct pipe_shader_state *);
- void (*bind_vs_state)(struct pipe_context *, void *);
- void (*delete_vs_state)(struct pipe_context *, void *);
-
- void * (*create_gs_state)(struct pipe_context *,
- const struct pipe_shader_state *);
- void (*bind_gs_state)(struct pipe_context *, void *);
- void (*delete_gs_state)(struct pipe_context *, void *);
-
- void * (*create_vertex_elements_state)(struct pipe_context *,
- unsigned num_elements,
- const struct pipe_vertex_element *);
- void (*bind_vertex_elements_state)(struct pipe_context *, void *);
- void (*delete_vertex_elements_state)(struct pipe_context *, void *);
-
- /*@}*/
-
- /**
- * Parameter-like state (or properties)
- */
- /*@{*/
- void (*set_blend_color)( struct pipe_context *,
- const struct pipe_blend_color * );
-
- void (*set_stencil_ref)( struct pipe_context *,
- const struct pipe_stencil_ref * );
-
- void (*set_sample_mask)( struct pipe_context *,
- unsigned sample_mask );
-
- void (*set_clip_state)( struct pipe_context *,
- const struct pipe_clip_state * );
-
- void (*set_constant_buffer)( struct pipe_context *,
- uint shader, uint index,
- struct pipe_constant_buffer *buf );
-
- void (*set_framebuffer_state)( struct pipe_context *,
- const struct pipe_framebuffer_state * );
-
- void (*set_polygon_stipple)( struct pipe_context *,
- const struct pipe_poly_stipple * );
-
- void (*set_scissor_states)( struct pipe_context *,
- unsigned start_slot,
- unsigned num_scissors,
- const struct pipe_scissor_state * );
-
- void (*set_viewport_states)( struct pipe_context *,
- unsigned start_slot,
- unsigned num_viewports,
- const struct pipe_viewport_state *);
-
- void (*set_sampler_views)(struct pipe_context *, unsigned shader,
- unsigned start_slot, unsigned num_views,
- struct pipe_sampler_view **);
-
- /**
- * Bind an array of shader resources that will be used by the
- * graphics pipeline. Any resources that were previously bound to
- * the specified range will be unbound after this call.
- *
- * \param start first resource to bind.
- * \param count number of consecutive resources to bind.
- * \param resources array of pointers to the resources to bind, it
- * should contain at least \a count elements
- * unless it's NULL, in which case no new
- * resources will be bound.
- */
- void (*set_shader_resources)(struct pipe_context *,
- unsigned start, unsigned count,
- struct pipe_surface **resources);
-
- void (*set_vertex_buffers)( struct pipe_context *,
- unsigned start_slot,
- unsigned num_buffers,
- const struct pipe_vertex_buffer * );
-
- void (*set_index_buffer)( struct pipe_context *pipe,
- const struct pipe_index_buffer * );
-
- /*@}*/
-
- /**
- * Stream output functions.
- */
- /*@{*/
-
- struct pipe_stream_output_target *(*create_stream_output_target)(
- struct pipe_context *,
- struct pipe_resource *,
- unsigned buffer_offset,
- unsigned buffer_size);
-
- void (*stream_output_target_destroy)(struct pipe_context *,
- struct pipe_stream_output_target *);
-
- void (*set_stream_output_targets)(struct pipe_context *,
- unsigned num_targets,
- struct pipe_stream_output_target **targets,
- unsigned append_bitmask);
-
- /*@}*/
-
-
- /**
- * Resource functions for blit-like functionality
- *
- * If a driver supports multisampling, blit must implement color resolve.
- */
- /*@{*/
-
- /**
- * Copy a block of pixels from one resource to another.
- * The resource must be of the same format.
- * Resources with nr_samples > 1 are not allowed.
- */
- void (*resource_copy_region)(struct pipe_context *pipe,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dstx, unsigned dsty, unsigned dstz,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box);
-
- /* Optimal hardware path for blitting pixels.
- * Scaling, format conversion, up- and downsampling (resolve) are allowed.
- */
- void (*blit)(struct pipe_context *pipe,
- const struct pipe_blit_info *info);
-
- /*@}*/
-
- /**
- * Clear the specified set of currently bound buffers to specified values.
- * The entire buffers are cleared (no scissor, no colormask, etc).
- *
- * \param buffers bitfield of PIPE_CLEAR_* values.
- * \param color pointer to a union of fiu array for each of r, g, b, a.
- * \param depth depth clear value in [0,1].
- * \param stencil stencil clear value
- */
- void (*clear)(struct pipe_context *pipe,
- unsigned buffers,
- const union pipe_color_union *color,
- double depth,
- unsigned stencil);
-
- /**
- * Clear a color rendertarget surface.
- * \param color pointer to an union of fiu array for each of r, g, b, a.
- */
- void (*clear_render_target)(struct pipe_context *pipe,
- struct pipe_surface *dst,
- const union pipe_color_union *color,
- unsigned dstx, unsigned dsty,
- unsigned width, unsigned height);
-
- /**
- * Clear a depth-stencil surface.
- * \param clear_flags bitfield of PIPE_CLEAR_DEPTH/STENCIL values.
- * \param depth depth clear value in [0,1].
- * \param stencil stencil clear value
- */
- void (*clear_depth_stencil)(struct pipe_context *pipe,
- struct pipe_surface *dst,
- unsigned clear_flags,
- double depth,
- unsigned stencil,
- unsigned dstx, unsigned dsty,
- unsigned width, unsigned height);
-
- /** Flush draw commands
- *
- * \param flags bitfield of enum pipe_flush_flags values.
- */
- void (*flush)(struct pipe_context *pipe,
- struct pipe_fence_handle **fence,
- unsigned flags);
-
- /**
- * Create a view on a texture to be used by a shader stage.
- */
- struct pipe_sampler_view * (*create_sampler_view)(struct pipe_context *ctx,
- struct pipe_resource *texture,
- const struct pipe_sampler_view *templat);
-
- void (*sampler_view_destroy)(struct pipe_context *ctx,
- struct pipe_sampler_view *view);
-
-
- /**
- * Get a surface which is a "view" into a resource, used by
- * render target / depth stencil stages.
- */
- struct pipe_surface *(*create_surface)(struct pipe_context *ctx,
- struct pipe_resource *resource,
- const struct pipe_surface *templat);
-
- void (*surface_destroy)(struct pipe_context *ctx,
- struct pipe_surface *);
-
- /**
- * Map a resource.
- *
- * Transfers are (by default) context-private and allow uploads to be
- * interleaved with rendering.
- *
- * out_transfer will contain the transfer object that must be passed
- * to all the other transfer functions. It also contains useful
- * information (like texture strides).
- */
- void *(*transfer_map)(struct pipe_context *,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
- const struct pipe_box *,
- struct pipe_transfer **out_transfer);
-
- /* If transfer was created with WRITE|FLUSH_EXPLICIT, only the
- * regions specified with this call are guaranteed to be written to
- * the resource.
- */
- void (*transfer_flush_region)( struct pipe_context *,
- struct pipe_transfer *transfer,
- const struct pipe_box *);
-
- void (*transfer_unmap)(struct pipe_context *,
- struct pipe_transfer *transfer);
-
- /* One-shot transfer operation with data supplied in a user
- * pointer. XXX: strides??
- */
- void (*transfer_inline_write)( struct pipe_context *,
- struct pipe_resource *,
- unsigned level,
- unsigned usage, /* a combination of PIPE_TRANSFER_x */
- const struct pipe_box *,
- const void *data,
- unsigned stride,
- unsigned layer_stride);
-
- /**
- * Flush any pending framebuffer writes and invalidate texture caches.
- */
- void (*texture_barrier)(struct pipe_context *);
-
- /**
- * Flush caches according to flags.
- */
- void (*memory_barrier)(struct pipe_context *, unsigned flags);
-
- /**
- * Creates a video codec for a specific video format/profile
- */
- struct pipe_video_codec *(*create_video_codec)( struct pipe_context *context,
- const struct pipe_video_codec *templat );
-
- /**
- * Creates a video buffer as decoding target
- */
- struct pipe_video_buffer *(*create_video_buffer)( struct pipe_context *context,
- const struct pipe_video_buffer *templat );
-
- /**
- * Compute kernel execution
- */
- /*@{*/
- /**
- * Define the compute program and parameters to be used by
- * pipe_context::launch_grid.
- */
- void *(*create_compute_state)(struct pipe_context *context,
- const struct pipe_compute_state *);
- void (*bind_compute_state)(struct pipe_context *, void *);
- void (*delete_compute_state)(struct pipe_context *, void *);
-
- /**
- * Bind an array of shader resources that will be used by the
- * compute program. Any resources that were previously bound to
- * the specified range will be unbound after this call.
- *
- * \param start first resource to bind.
- * \param count number of consecutive resources to bind.
- * \param resources array of pointers to the resources to bind, it
- * should contain at least \a count elements
- * unless it's NULL, in which case no new
- * resources will be bound.
- */
- void (*set_compute_resources)(struct pipe_context *,
- unsigned start, unsigned count,
- struct pipe_surface **resources);
-
- /**
- * Bind an array of buffers to be mapped into the address space of
- * the GLOBAL resource. Any buffers that were previously bound
- * between [first, first + count - 1] are unbound after this call.
- *
- * \param first first buffer to map.
- * \param count number of consecutive buffers to map.
- * \param resources array of pointers to the buffers to map, it
- * should contain at least \a count elements
- * unless it's NULL, in which case no new
- * resources will be bound.
- * \param handles array of pointers to the memory locations that
- * will be updated with the address each buffer
- * will be mapped to. The base memory address of
- * each of the buffers will be added to the value
- * pointed to by its corresponding handle to form
- * the final address argument. It should contain
- * at least \a count elements, unless \a
- * resources is NULL in which case \a handles
- * should be NULL as well.
- *
- * Note that the driver isn't required to make any guarantees about
- * the contents of the \a handles array being valid anytime except
- * during the subsequent calls to pipe_context::launch_grid. This
- * means that the only sensible location handles[i] may point to is
- * somewhere within the INPUT buffer itself. This is so to
- * accommodate implementations that lack virtual memory but
- * nevertheless migrate buffers on the fly, leading to resource
- * base addresses that change on each kernel invocation or are
- * unknown to the pipe driver.
- */
- void (*set_global_binding)(struct pipe_context *context,
- unsigned first, unsigned count,
- struct pipe_resource **resources,
- uint32_t **handles);
-
- /**
- * Launch the compute kernel starting from instruction \a pc of the
- * currently bound compute program.
- *
- * \a grid_layout and \a block_layout are arrays of size \a
- * PIPE_COMPUTE_CAP_GRID_DIMENSION that determine the layout of the
- * grid (in block units) and working block (in thread units) to be
- * used, respectively.
- *
- * \a pc For drivers that use PIPE_SHADER_IR_LLVM as their prefered IR,
- * this value will be the index of the kernel in the opencl.kernels
- * metadata list.
- *
- * \a input will be used to initialize the INPUT resource, and it
- * should point to a buffer of at least
- * pipe_compute_state::req_input_mem bytes.
- */
- void (*launch_grid)(struct pipe_context *context,
- const uint *block_layout, const uint *grid_layout,
- uint32_t pc, const void *input);
- /*@}*/
-
- /**
- * Get sample position for an individual sample point.
- *
- * \param sample_count - total number of samples
- * \param sample_index - sample to get the position values for
- * \param out_value - return value of 2 floats for x and y position for
- * requested sample.
- */
- void (*get_sample_position)(struct pipe_context *context,
- unsigned sample_count,
- unsigned sample_index,
- float *out_value);
-
- /**
- * Flush the resource cache, so that the resource can be used
- * by an external client. Possible usage:
- * - flushing a resource before presenting it on the screen
- * - flushing a resource if some other process or device wants to use it
- * This shouldn't be used to flush caches if the resource is only managed
- * by a single pipe_screen and is not shared with another process.
- * (i.e. you shouldn't use it to flush caches explicitly if you want to e.g.
- * use the resource for texturing)
- */
- void (*flush_resource)(struct pipe_context *ctx,
- struct pipe_resource *resource);
-};
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* PIPE_CONTEXT_H */
diff --git a/src/gallium/include/pipe/p_defines.h b/src/gallium/include/pipe/p_defines.h
index 610b92c2..a7cefc61 100644
--- a/src/gallium/include/pipe/p_defines.h
+++ b/src/gallium/include/pipe/p_defines.h
@@ -50,33 +50,37 @@ enum pipe_error {
/* TODO */
};
+enum pipe_blendfactor {
+ PIPE_BLENDFACTOR_ONE = 1,
+ PIPE_BLENDFACTOR_SRC_COLOR,
+ PIPE_BLENDFACTOR_SRC_ALPHA,
+ PIPE_BLENDFACTOR_DST_ALPHA,
+ PIPE_BLENDFACTOR_DST_COLOR,
+ PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE,
+ PIPE_BLENDFACTOR_CONST_COLOR,
+ PIPE_BLENDFACTOR_CONST_ALPHA,
+ PIPE_BLENDFACTOR_SRC1_COLOR,
+ PIPE_BLENDFACTOR_SRC1_ALPHA,
+
+ PIPE_BLENDFACTOR_ZERO = 0x11,
+ PIPE_BLENDFACTOR_INV_SRC_COLOR,
+ PIPE_BLENDFACTOR_INV_SRC_ALPHA,
+ PIPE_BLENDFACTOR_INV_DST_ALPHA,
+ PIPE_BLENDFACTOR_INV_DST_COLOR,
+
+ PIPE_BLENDFACTOR_INV_CONST_COLOR = 0x17,
+ PIPE_BLENDFACTOR_INV_CONST_ALPHA,
+ PIPE_BLENDFACTOR_INV_SRC1_COLOR,
+ PIPE_BLENDFACTOR_INV_SRC1_ALPHA,
+};
-#define PIPE_BLENDFACTOR_ONE 0x1
-#define PIPE_BLENDFACTOR_SRC_COLOR 0x2
-#define PIPE_BLENDFACTOR_SRC_ALPHA 0x3
-#define PIPE_BLENDFACTOR_DST_ALPHA 0x4
-#define PIPE_BLENDFACTOR_DST_COLOR 0x5
-#define PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE 0x6
-#define PIPE_BLENDFACTOR_CONST_COLOR 0x7
-#define PIPE_BLENDFACTOR_CONST_ALPHA 0x8
-#define PIPE_BLENDFACTOR_SRC1_COLOR 0x9
-#define PIPE_BLENDFACTOR_SRC1_ALPHA 0x0A
-#define PIPE_BLENDFACTOR_ZERO 0x11
-#define PIPE_BLENDFACTOR_INV_SRC_COLOR 0x12
-#define PIPE_BLENDFACTOR_INV_SRC_ALPHA 0x13
-#define PIPE_BLENDFACTOR_INV_DST_ALPHA 0x14
-#define PIPE_BLENDFACTOR_INV_DST_COLOR 0x15
-#define PIPE_BLENDFACTOR_INV_CONST_COLOR 0x17
-#define PIPE_BLENDFACTOR_INV_CONST_ALPHA 0x18
-#define PIPE_BLENDFACTOR_INV_SRC1_COLOR 0x19
-#define PIPE_BLENDFACTOR_INV_SRC1_ALPHA 0x1A
-
-#define PIPE_BLEND_ADD 0
-#define PIPE_BLEND_SUBTRACT 1
-#define PIPE_BLEND_REVERSE_SUBTRACT 2
-#define PIPE_BLEND_MIN 3
-#define PIPE_BLEND_MAX 4
-
+enum pipe_blend_func {
+ PIPE_BLEND_ADD,
+ PIPE_BLEND_SUBTRACT,
+ PIPE_BLEND_REVERSE_SUBTRACT,
+ PIPE_BLEND_MIN,
+ PIPE_BLEND_MAX,
+};
enum pipe_logicop {
PIPE_LOGICOP_CLEAR,
@@ -112,19 +116,23 @@ enum pipe_logicop {
* Inequality functions. Used for depth test, stencil compare, alpha
* test, shadow compare, etc.
*/
-#define PIPE_FUNC_NEVER 0
-#define PIPE_FUNC_LESS 1
-#define PIPE_FUNC_EQUAL 2
-#define PIPE_FUNC_LEQUAL 3
-#define PIPE_FUNC_GREATER 4
-#define PIPE_FUNC_NOTEQUAL 5
-#define PIPE_FUNC_GEQUAL 6
-#define PIPE_FUNC_ALWAYS 7
+enum pipe_compare_func {
+ PIPE_FUNC_NEVER,
+ PIPE_FUNC_LESS,
+ PIPE_FUNC_EQUAL,
+ PIPE_FUNC_LEQUAL,
+ PIPE_FUNC_GREATER,
+ PIPE_FUNC_NOTEQUAL,
+ PIPE_FUNC_GEQUAL,
+ PIPE_FUNC_ALWAYS,
+};
/** Polygon fill mode */
-#define PIPE_POLYGON_MODE_FILL 0
-#define PIPE_POLYGON_MODE_LINE 1
-#define PIPE_POLYGON_MODE_POINT 2
+enum {
+ PIPE_POLYGON_MODE_FILL,
+ PIPE_POLYGON_MODE_LINE,
+ PIPE_POLYGON_MODE_POINT,
+};
/** Polygon face specification, eg for culling */
#define PIPE_FACE_NONE 0
@@ -133,60 +141,73 @@ enum pipe_logicop {
#define PIPE_FACE_FRONT_AND_BACK (PIPE_FACE_FRONT | PIPE_FACE_BACK)
/** Stencil ops */
-#define PIPE_STENCIL_OP_KEEP 0
-#define PIPE_STENCIL_OP_ZERO 1
-#define PIPE_STENCIL_OP_REPLACE 2
-#define PIPE_STENCIL_OP_INCR 3
-#define PIPE_STENCIL_OP_DECR 4
-#define PIPE_STENCIL_OP_INCR_WRAP 5
-#define PIPE_STENCIL_OP_DECR_WRAP 6
-#define PIPE_STENCIL_OP_INVERT 7
+enum pipe_stencil_op {
+ PIPE_STENCIL_OP_KEEP,
+ PIPE_STENCIL_OP_ZERO,
+ PIPE_STENCIL_OP_REPLACE,
+ PIPE_STENCIL_OP_INCR,
+ PIPE_STENCIL_OP_DECR,
+ PIPE_STENCIL_OP_INCR_WRAP,
+ PIPE_STENCIL_OP_DECR_WRAP,
+ PIPE_STENCIL_OP_INVERT,
+};
/** Texture types.
* See the documentation for info on PIPE_TEXTURE_RECT vs PIPE_TEXTURE_2D */
-enum pipe_texture_target {
- PIPE_BUFFER = 0,
- PIPE_TEXTURE_1D = 1,
- PIPE_TEXTURE_2D = 2,
- PIPE_TEXTURE_3D = 3,
- PIPE_TEXTURE_CUBE = 4,
- PIPE_TEXTURE_RECT = 5,
- PIPE_TEXTURE_1D_ARRAY = 6,
- PIPE_TEXTURE_2D_ARRAY = 7,
- PIPE_TEXTURE_CUBE_ARRAY = 8,
- PIPE_MAX_TEXTURE_TYPES
-};
-
-#define PIPE_TEX_FACE_POS_X 0
-#define PIPE_TEX_FACE_NEG_X 1
-#define PIPE_TEX_FACE_POS_Y 2
-#define PIPE_TEX_FACE_NEG_Y 3
-#define PIPE_TEX_FACE_POS_Z 4
-#define PIPE_TEX_FACE_NEG_Z 5
-#define PIPE_TEX_FACE_MAX 6
-
-#define PIPE_TEX_WRAP_REPEAT 0
-#define PIPE_TEX_WRAP_CLAMP 1
-#define PIPE_TEX_WRAP_CLAMP_TO_EDGE 2
-#define PIPE_TEX_WRAP_CLAMP_TO_BORDER 3
-#define PIPE_TEX_WRAP_MIRROR_REPEAT 4
-#define PIPE_TEX_WRAP_MIRROR_CLAMP 5
-#define PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE 6
-#define PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER 7
+enum pipe_texture_target
+{
+ PIPE_BUFFER,
+ PIPE_TEXTURE_1D,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_3D,
+ PIPE_TEXTURE_CUBE,
+ PIPE_TEXTURE_RECT,
+ PIPE_TEXTURE_1D_ARRAY,
+ PIPE_TEXTURE_2D_ARRAY,
+ PIPE_TEXTURE_CUBE_ARRAY,
+ PIPE_MAX_TEXTURE_TYPES,
+};
+
+enum pipe_tex_face {
+ PIPE_TEX_FACE_POS_X,
+ PIPE_TEX_FACE_NEG_X,
+ PIPE_TEX_FACE_POS_Y,
+ PIPE_TEX_FACE_NEG_Y,
+ PIPE_TEX_FACE_POS_Z,
+ PIPE_TEX_FACE_NEG_Z,
+ PIPE_TEX_FACE_MAX,
+};
+
+enum pipe_tex_wrap {
+ PIPE_TEX_WRAP_REPEAT,
+ PIPE_TEX_WRAP_CLAMP,
+ PIPE_TEX_WRAP_CLAMP_TO_EDGE,
+ PIPE_TEX_WRAP_CLAMP_TO_BORDER,
+ PIPE_TEX_WRAP_MIRROR_REPEAT,
+ PIPE_TEX_WRAP_MIRROR_CLAMP,
+ PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE,
+ PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER,
+};
/* Between mipmaps, ie mipfilter
*/
-#define PIPE_TEX_MIPFILTER_NEAREST 0
-#define PIPE_TEX_MIPFILTER_LINEAR 1
-#define PIPE_TEX_MIPFILTER_NONE 2
+enum pipe_tex_mipfilter {
+ PIPE_TEX_MIPFILTER_NEAREST,
+ PIPE_TEX_MIPFILTER_LINEAR,
+ PIPE_TEX_MIPFILTER_NONE,
+};
/* Within a mipmap, ie min/mag filter
*/
-#define PIPE_TEX_FILTER_NEAREST 0
-#define PIPE_TEX_FILTER_LINEAR 1
+enum pipe_tex_filter {
+ PIPE_TEX_FILTER_NEAREST,
+ PIPE_TEX_FILTER_LINEAR,
+};
-#define PIPE_TEX_COMPARE_NONE 0
-#define PIPE_TEX_COMPARE_R_TO_TEXTURE 1
+enum pipe_tex_compare {
+ PIPE_TEX_COMPARE_NONE,
+ PIPE_TEX_COMPARE_R_TO_TEXTURE,
+};
/**
* Clear buffer bits
@@ -404,105 +425,114 @@ enum pipe_flush_flags {
#define PIPE_RESOURCE_FLAG_DRV_PRIV (1 << 16) /* driver/winsys private */
#define PIPE_RESOURCE_FLAG_ST_PRIV (1 << 24) /* state-tracker/winsys private */
-/* Hint about the expected lifecycle of a resource.
+/**
+ * Hint about the expected lifecycle of a resource.
* Sorted according to GPU vs CPU access.
*/
-#define PIPE_USAGE_DEFAULT 0 /* fast GPU access */
-#define PIPE_USAGE_IMMUTABLE 1 /* fast GPU access, immutable */
-#define PIPE_USAGE_DYNAMIC 2 /* uploaded data is used multiple times */
-#define PIPE_USAGE_STREAM 3 /* uploaded data is used once */
-#define PIPE_USAGE_STAGING 4 /* fast CPU access */
-
+enum pipe_resource_usage {
+ PIPE_USAGE_DEFAULT, /* fast GPU access */
+ PIPE_USAGE_IMMUTABLE, /* fast GPU access, immutable */
+ PIPE_USAGE_DYNAMIC, /* uploaded data is used multiple times */
+ PIPE_USAGE_STREAM, /* uploaded data is used once */
+ PIPE_USAGE_STAGING, /* fast CPU access */
+};
/**
* Shaders
*/
-#define PIPE_SHADER_VERTEX 0
-#define PIPE_SHADER_FRAGMENT 1
-#define PIPE_SHADER_GEOMETRY 2
-#define PIPE_SHADER_TESS_CTRL 3
-#define PIPE_SHADER_TESS_EVAL 4
-#define PIPE_SHADER_COMPUTE 5
-#define PIPE_SHADER_TYPES 6
-
+enum pipe_shader_type {
+ PIPE_SHADER_VERTEX,
+ PIPE_SHADER_FRAGMENT,
+ PIPE_SHADER_GEOMETRY,
+ PIPE_SHADER_TESS_CTRL,
+ PIPE_SHADER_TESS_EVAL,
+ PIPE_SHADER_COMPUTE,
+ PIPE_SHADER_TYPES,
+ PIPE_SHADER_INVALID,
+};
/**
* Primitive types:
*/
-#define PIPE_PRIM_POINTS 0
-#define PIPE_PRIM_LINES 1
-#define PIPE_PRIM_LINE_LOOP 2
-#define PIPE_PRIM_LINE_STRIP 3
-#define PIPE_PRIM_TRIANGLES 4
-#define PIPE_PRIM_TRIANGLE_STRIP 5
-#define PIPE_PRIM_TRIANGLE_FAN 6
-#define PIPE_PRIM_QUADS 7
-#define PIPE_PRIM_QUAD_STRIP 8
-#define PIPE_PRIM_POLYGON 9
-#define PIPE_PRIM_LINES_ADJACENCY 10
-#define PIPE_PRIM_LINE_STRIP_ADJACENCY 11
-#define PIPE_PRIM_TRIANGLES_ADJACENCY 12
-#define PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY 13
-#define PIPE_PRIM_PATCHES 14
-#define PIPE_PRIM_MAX 15
-
+enum pipe_prim_type {
+ PIPE_PRIM_POINTS,
+ PIPE_PRIM_LINES,
+ PIPE_PRIM_LINE_LOOP,
+ PIPE_PRIM_LINE_STRIP,
+ PIPE_PRIM_TRIANGLES,
+ PIPE_PRIM_TRIANGLE_STRIP,
+ PIPE_PRIM_TRIANGLE_FAN,
+ PIPE_PRIM_QUADS,
+ PIPE_PRIM_QUAD_STRIP,
+ PIPE_PRIM_POLYGON,
+ PIPE_PRIM_LINES_ADJACENCY,
+ PIPE_PRIM_LINE_STRIP_ADJACENCY,
+ PIPE_PRIM_TRIANGLES_ADJACENCY,
+ PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY,
+ PIPE_PRIM_PATCHES,
+ PIPE_PRIM_MAX,
+} ENUM_PACKED;
/**
* Tessellator spacing types
*/
-#define PIPE_TESS_SPACING_FRACTIONAL_ODD 0
-#define PIPE_TESS_SPACING_FRACTIONAL_EVEN 1
-#define PIPE_TESS_SPACING_EQUAL 2
+enum pipe_tess_spacing {
+ PIPE_TESS_SPACING_FRACTIONAL_ODD,
+ PIPE_TESS_SPACING_FRACTIONAL_EVEN,
+ PIPE_TESS_SPACING_EQUAL,
+};
/**
* Query object types
*/
-#define PIPE_QUERY_OCCLUSION_COUNTER 0
-#define PIPE_QUERY_OCCLUSION_PREDICATE 1
-#define PIPE_QUERY_TIMESTAMP 2
-#define PIPE_QUERY_TIMESTAMP_DISJOINT 3
-#define PIPE_QUERY_TIME_ELAPSED 4
-#define PIPE_QUERY_PRIMITIVES_GENERATED 5
-#define PIPE_QUERY_PRIMITIVES_EMITTED 6
-#define PIPE_QUERY_SO_STATISTICS 7
-#define PIPE_QUERY_SO_OVERFLOW_PREDICATE 8
-#define PIPE_QUERY_GPU_FINISHED 9
-#define PIPE_QUERY_PIPELINE_STATISTICS 10
-#define PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE 11
-#define PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE 12
-#define PIPE_QUERY_TYPES 13
-
-/* start of driver queries,
- * see pipe_screen::get_driver_query_info */
-#define PIPE_QUERY_DRIVER_SPECIFIC 256
-
+enum pipe_query_type {
+ PIPE_QUERY_OCCLUSION_COUNTER,
+ PIPE_QUERY_OCCLUSION_PREDICATE,
+ PIPE_QUERY_TIMESTAMP,
+ PIPE_QUERY_TIMESTAMP_DISJOINT,
+ PIPE_QUERY_TIME_ELAPSED,
+ PIPE_QUERY_PRIMITIVES_GENERATED,
+ PIPE_QUERY_PRIMITIVES_EMITTED,
+ PIPE_QUERY_SO_STATISTICS,
+ PIPE_QUERY_SO_OVERFLOW_PREDICATE,
+ PIPE_QUERY_GPU_FINISHED,
+ PIPE_QUERY_PIPELINE_STATISTICS,
+ PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE,
+ PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE,
+ PIPE_QUERY_TYPES,
+ /* start of driver queries, see pipe_screen::get_driver_query_info */
+ PIPE_QUERY_DRIVER_SPECIFIC = 256,
+};
/**
* Conditional rendering modes
*/
-#define PIPE_RENDER_COND_WAIT 0
-#define PIPE_RENDER_COND_NO_WAIT 1
-#define PIPE_RENDER_COND_BY_REGION_WAIT 2
-#define PIPE_RENDER_COND_BY_REGION_NO_WAIT 3
-
+enum pipe_render_cond_flag {
+ PIPE_RENDER_COND_WAIT,
+ PIPE_RENDER_COND_NO_WAIT,
+ PIPE_RENDER_COND_BY_REGION_WAIT,
+ PIPE_RENDER_COND_BY_REGION_NO_WAIT,
+};
/**
* Point sprite coord modes
*/
-#define PIPE_SPRITE_COORD_UPPER_LEFT 0
-#define PIPE_SPRITE_COORD_LOWER_LEFT 1
-
+enum pipe_sprite_coord_mode {
+ PIPE_SPRITE_COORD_UPPER_LEFT,
+ PIPE_SPRITE_COORD_LOWER_LEFT,
+};
/**
* Texture swizzles
*/
-#define PIPE_SWIZZLE_RED 0
-#define PIPE_SWIZZLE_GREEN 1
-#define PIPE_SWIZZLE_BLUE 2
-#define PIPE_SWIZZLE_ALPHA 3
-#define PIPE_SWIZZLE_ZERO 4
-#define PIPE_SWIZZLE_ONE 5
-
+enum pipe_swizzle {
+ PIPE_SWIZZLE_RED,
+ PIPE_SWIZZLE_GREEN,
+ PIPE_SWIZZLE_BLUE,
+ PIPE_SWIZZLE_ALPHA,
+ PIPE_SWIZZLE_ZERO,
+ PIPE_SWIZZLE_ONE,
+};
#define PIPE_TIMEOUT_INFINITE 0xffffffffffffffffull
@@ -624,9 +654,9 @@ enum pipe_cap {
enum pipe_endian {
PIPE_ENDIAN_LITTLE = 0,
PIPE_ENDIAN_BIG = 1,
-#if defined(PIPE_ARCH_LITTLE_ENDIAN)
+#if UTIL_ARCH_LITTLE_ENDIAN
PIPE_ENDIAN_NATIVE = PIPE_ENDIAN_LITTLE
-#elif defined(PIPE_ARCH_BIG_ENDIAN)
+#elif UTIL_ARCH_BIG_ENDIAN
PIPE_ENDIAN_NATIVE = PIPE_ENDIAN_BIG
#endif
};
diff --git a/src/gallium/include/pipe/p_format.h b/src/gallium/include/pipe/p_format.h
index f9b7c671..ff35c1e3 100644
--- a/src/gallium/include/pipe/p_format.h
+++ b/src/gallium/include/pipe/p_format.h
@@ -134,6 +134,7 @@ extern "C" {
/* sRGB formats */
#define PIPE_FORMAT_L8_SRGB VIRGL_FORMAT_L8_SRGB
#define PIPE_FORMAT_L8A8_SRGB VIRGL_FORMAT_L8A8_SRGB
+#define PIPE_FORMAT_R8G8_SRGB VIRGL_FORMAT_R8G8_SRGB
#define PIPE_FORMAT_R8G8B8_SRGB VIRGL_FORMAT_R8G8B8_SRGB
#define PIPE_FORMAT_A8B8G8R8_SRGB VIRGL_FORMAT_A8B8G8R8_SRGB
#define PIPE_FORMAT_X8B8G8R8_SRGB VIRGL_FORMAT_X8B8G8R8_SRGB
@@ -402,7 +403,7 @@ extern "C" {
#define PIPE_FORMAT_COUNT VIRGL_FORMAT_MAX
-#if defined(PIPE_ARCH_LITTLE_ENDIAN)
+#if UTIL_ARCH_LITTLE_ENDIAN
#define PIPE_FORMAT_RGBA8888_UNORM PIPE_FORMAT_R8G8B8A8_UNORM
#define PIPE_FORMAT_RGBX8888_UNORM PIPE_FORMAT_R8G8B8X8_UNORM
#define PIPE_FORMAT_BGRA8888_UNORM PIPE_FORMAT_B8G8R8A8_UNORM
@@ -439,7 +440,7 @@ extern "C" {
#define PIPE_FORMAT_GR1616_UNORM PIPE_FORMAT_G16R16_UNORM
#define PIPE_FORMAT_RG1616_SNORM PIPE_FORMAT_R16G16_SNORM
#define PIPE_FORMAT_GR1616_SNORM PIPE_FORMAT_G16R16_SNORM
-#elif defined(PIPE_ARCH_BIG_ENDIAN)
+#elif UTIL_ARCH_BIG_ENDIAN
#define PIPE_FORMAT_ABGR8888_UNORM PIPE_FORMAT_R8G8B8A8_UNORM
#define PIPE_FORMAT_XBGR8888_UNORM PIPE_FORMAT_R8G8B8X8_UNORM
#define PIPE_FORMAT_ARGB8888_UNORM PIPE_FORMAT_B8G8R8A8_UNORM
diff --git a/src/gallium/include/pipe/p_screen.h b/src/gallium/include/pipe/p_screen.h
deleted file mode 100644
index cf958d26..00000000
--- a/src/gallium/include/pipe/p_screen.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2007 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * @file
- *
- * Screen, Adapter or GPU
- *
- * These are driver functions/facilities that are context independent.
- */
-
-
-#ifndef P_SCREEN_H
-#define P_SCREEN_H
-
-
-#include "pipe/p_compiler.h"
-#include "pipe/p_format.h"
-#include "pipe/p_defines.h"
-#include "pipe/p_video_enums.h"
-
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/** Opaque types */
-struct winsys_handle;
-struct pipe_fence_handle;
-struct pipe_resource;
-struct pipe_surface;
-struct pipe_transfer;
-struct pipe_box;
-
-
-/**
- * Gallium screen/adapter context. Basically everything
- * hardware-specific that doesn't actually require a rendering
- * context.
- */
-struct pipe_screen {
- void (*destroy)( struct pipe_screen * );
-
- const char *(*get_name)( struct pipe_screen * );
-
- const char *(*get_vendor)( struct pipe_screen * );
-
- /**
- * Query an integer-valued capability/parameter/limit
- * \param param one of PIPE_CAP_x
- */
- int (*get_param)( struct pipe_screen *, enum pipe_cap param );
-
- /**
- * Query a float-valued capability/parameter/limit
- * \param param one of PIPE_CAP_x
- */
- float (*get_paramf)( struct pipe_screen *, enum pipe_capf param );
-
- /**
- * Query a per-shader-stage integer-valued capability/parameter/limit
- * \param param one of PIPE_CAP_x
- */
- int (*get_shader_param)( struct pipe_screen *, unsigned shader, enum pipe_shader_cap param );
-
- /**
- * Query an integer-valued capability/parameter/limit for a codec/profile
- * \param param one of PIPE_VIDEO_CAP_x
- */
- int (*get_video_param)( struct pipe_screen *,
- enum pipe_video_profile profile,
- enum pipe_video_entrypoint entrypoint,
- enum pipe_video_cap param );
-
- /**
- * Query a compute-specific capability/parameter/limit.
- * \param param one of PIPE_COMPUTE_CAP_x
- * \param ret pointer to a preallocated buffer that will be
- * initialized to the parameter value, or NULL.
- * \return size in bytes of the parameter value that would be
- * returned.
- */
- int (*get_compute_param)(struct pipe_screen *,
- enum pipe_compute_cap param,
- void *ret);
-
- /**
- * Query a timestamp in nanoseconds. The returned value should match
- * PIPE_QUERY_TIMESTAMP. This function returns immediately and doesn't
- * wait for rendering to complete (which cannot be achieved with queries).
- */
- uint64_t (*get_timestamp)(struct pipe_screen *);
-
- struct pipe_context * (*context_create)( struct pipe_screen *,
- void *priv );
-
- /**
- * Check if the given pipe_format is supported as a texture or
- * drawing surface.
- * \param bindings bitmask of PIPE_BIND_*
- */
- boolean (*is_format_supported)( struct pipe_screen *,
- enum pipe_format format,
- enum pipe_texture_target target,
- unsigned sample_count,
- unsigned bindings );
-
- /**
- * Check if the given pipe_format is supported as output for this codec/profile.
- * \param profile profile to check, may also be PIPE_VIDEO_PROFILE_UNKNOWN
- */
- boolean (*is_video_format_supported)( struct pipe_screen *,
- enum pipe_format format,
- enum pipe_video_profile profile,
- enum pipe_video_entrypoint entrypoint );
-
- /**
- * Check if we can actually create the given resource (test the dimension,
- * overall size, etc). Used to implement proxy textures.
- * \return TRUE if size is OK, FALSE if too large.
- */
- boolean (*can_create_resource)(struct pipe_screen *screen,
- const struct pipe_resource *templat);
-
- /**
- * Create a new texture object, using the given template info.
- */
- struct pipe_resource * (*resource_create)(struct pipe_screen *,
- const struct pipe_resource *templat);
-
- /**
- * Create a texture from a winsys_handle. The handle is often created in
- * another process by first creating a pipe texture and then calling
- * resource_get_handle.
- */
- struct pipe_resource * (*resource_from_handle)(struct pipe_screen *,
- const struct pipe_resource *templat,
- struct winsys_handle *handle);
-
- /**
- * Get a winsys_handle from a texture. Some platforms/winsys requires
- * that the texture is created with a special usage flag like
- * DISPLAYTARGET or PRIMARY.
- */
- boolean (*resource_get_handle)(struct pipe_screen *,
- struct pipe_resource *tex,
- struct winsys_handle *handle);
-
-
- void (*resource_destroy)(struct pipe_screen *,
- struct pipe_resource *pt);
-
-
- /**
- * Do any special operations to ensure frontbuffer contents are
- * displayed, eg copy fake frontbuffer.
- * \param winsys_drawable_handle an opaque handle that the calling context
- * gets out-of-band
- * \param subbox an optional sub region to flush
- */
- void (*flush_frontbuffer)( struct pipe_screen *screen,
- struct pipe_resource *resource,
- unsigned level, unsigned layer,
- void *winsys_drawable_handle,
- struct pipe_box *subbox );
-
- /** Set ptr = fence, with reference counting */
- void (*fence_reference)( struct pipe_screen *screen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence );
-
- /**
- * Checks whether the fence has been signalled.
- */
- boolean (*fence_signalled)( struct pipe_screen *screen,
- struct pipe_fence_handle *fence );
-
- /**
- * Wait for the fence to finish.
- * \param timeout in nanoseconds (may be PIPE_TIMEOUT_INFINITE).
- */
- boolean (*fence_finish)( struct pipe_screen *screen,
- struct pipe_fence_handle *fence,
- uint64_t timeout );
-
- /**
- * Returns a driver-specific query.
- *
- * If \p info is NULL, the number of available queries is returned.
- * Otherwise, the driver query at the specified \p index is returned
- * in \p info. The function returns non-zero on success.
- */
- int (*get_driver_query_info)(struct pipe_screen *screen,
- unsigned index,
- struct pipe_driver_query_info *info);
-
-};
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* P_SCREEN_H */
diff --git a/src/gallium/include/pipe/p_shader_tokens.h b/src/gallium/include/pipe/p_shader_tokens.h
index 58606430..a5210698 100644
--- a/src/gallium/include/pipe/p_shader_tokens.h
+++ b/src/gallium/include/pipe/p_shader_tokens.h
@@ -40,12 +40,14 @@ struct tgsi_header
unsigned BodySize : 24;
};
-#define TGSI_PROCESSOR_FRAGMENT 0
-#define TGSI_PROCESSOR_VERTEX 1
-#define TGSI_PROCESSOR_GEOMETRY 2
-#define TGSI_PROCESSOR_TESS_CTRL 3
-#define TGSI_PROCESSOR_TESS_EVAL 4
-#define TGSI_PROCESSOR_COMPUTE 5
+enum tgsi_processor_type {
+ TGSI_PROCESSOR_FRAGMENT,
+ TGSI_PROCESSOR_VERTEX,
+ TGSI_PROCESSOR_GEOMETRY,
+ TGSI_PROCESSOR_TESS_CTRL,
+ TGSI_PROCESSOR_TESS_EVAL,
+ TGSI_PROCESSOR_COMPUTE,
+};
struct tgsi_processor
{
@@ -53,10 +55,12 @@ struct tgsi_processor
unsigned Padding : 28;
};
-#define TGSI_TOKEN_TYPE_DECLARATION 0
-#define TGSI_TOKEN_TYPE_IMMEDIATE 1
-#define TGSI_TOKEN_TYPE_INSTRUCTION 2
-#define TGSI_TOKEN_TYPE_PROPERTY 3
+enum tgsi_token_type {
+ TGSI_TOKEN_TYPE_DECLARATION,
+ TGSI_TOKEN_TYPE_IMMEDIATE,
+ TGSI_TOKEN_TYPE_INSTRUCTION,
+ TGSI_TOKEN_TYPE_PROPERTY,
+};
struct tgsi_token
{
@@ -102,16 +106,20 @@ enum tgsi_file_type {
#define TGSI_WRITEMASK_YZW 0x0E
#define TGSI_WRITEMASK_XYZW 0x0F
-#define TGSI_INTERPOLATE_CONSTANT 0
-#define TGSI_INTERPOLATE_LINEAR 1
-#define TGSI_INTERPOLATE_PERSPECTIVE 2
-#define TGSI_INTERPOLATE_COLOR 3 /* special color case for smooth/flat */
-#define TGSI_INTERPOLATE_COUNT 4
+enum tgsi_interpolate_mode {
+ TGSI_INTERPOLATE_CONSTANT,
+ TGSI_INTERPOLATE_LINEAR,
+ TGSI_INTERPOLATE_PERSPECTIVE,
+ TGSI_INTERPOLATE_COLOR, /* special color case for smooth/flat */
+ TGSI_INTERPOLATE_COUNT,
+};
-#define TGSI_INTERPOLATE_LOC_CENTER 0
-#define TGSI_INTERPOLATE_LOC_CENTROID 1
-#define TGSI_INTERPOLATE_LOC_SAMPLE 2
-#define TGSI_INTERPOLATE_LOC_COUNT 3
+enum tgsi_interpolate_loc {
+ TGSI_INTERPOLATE_LOC_CENTER,
+ TGSI_INTERPOLATE_LOC_CENTROID,
+ TGSI_INTERPOLATE_LOC_SAMPLE,
+ TGSI_INTERPOLATE_LOC_COUNT,
+};
#define TGSI_CYLINDRICAL_WRAP_X (1 << 0)
#define TGSI_CYLINDRICAL_WRAP_Y (1 << 1)
@@ -163,43 +171,45 @@ struct tgsi_declaration_interp
unsigned Padding : 22;
};
-#define TGSI_SEMANTIC_POSITION 0
-#define TGSI_SEMANTIC_COLOR 1
-#define TGSI_SEMANTIC_BCOLOR 2 /**< back-face color */
-#define TGSI_SEMANTIC_FOG 3
-#define TGSI_SEMANTIC_PSIZE 4
-#define TGSI_SEMANTIC_GENERIC 5
-#define TGSI_SEMANTIC_NORMAL 6
-#define TGSI_SEMANTIC_FACE 7
-#define TGSI_SEMANTIC_EDGEFLAG 8
-#define TGSI_SEMANTIC_PRIMID 9
-#define TGSI_SEMANTIC_INSTANCEID 10 /**< doesn't include start_instance */
-#define TGSI_SEMANTIC_VERTEXID 11
-#define TGSI_SEMANTIC_STENCIL 12
-#define TGSI_SEMANTIC_CLIPDIST 13
-#define TGSI_SEMANTIC_CLIPVERTEX 14
-#define TGSI_SEMANTIC_GRID_SIZE 15 /**< grid size in blocks */
-#define TGSI_SEMANTIC_BLOCK_ID 16 /**< id of the current block */
-#define TGSI_SEMANTIC_BLOCK_SIZE 17 /**< block size in threads */
-#define TGSI_SEMANTIC_THREAD_ID 18 /**< block-relative id of the current thread */
-#define TGSI_SEMANTIC_TEXCOORD 19 /**< texture or sprite coordinates */
-#define TGSI_SEMANTIC_PCOORD 20 /**< point sprite coordinate */
-#define TGSI_SEMANTIC_VIEWPORT_INDEX 21 /**< viewport index */
-#define TGSI_SEMANTIC_LAYER 22 /**< layer (rendertarget index) */
-#define TGSI_SEMANTIC_CULLDIST 23
-#define TGSI_SEMANTIC_SAMPLEID 24
-#define TGSI_SEMANTIC_SAMPLEPOS 25
-#define TGSI_SEMANTIC_SAMPLEMASK 26
-#define TGSI_SEMANTIC_INVOCATIONID 27
-#define TGSI_SEMANTIC_VERTEXID_NOBASE 28
-#define TGSI_SEMANTIC_BASEVERTEX 29
-#define TGSI_SEMANTIC_PATCH 30 /**< generic per-patch semantic */
-#define TGSI_SEMANTIC_TESSCOORD 31 /**< coordinate being processed by tess */
-#define TGSI_SEMANTIC_TESSOUTER 32 /**< outer tessellation levels */
-#define TGSI_SEMANTIC_TESSINNER 33 /**< inner tessellation levels */
-#define TGSI_SEMANTIC_VERTICESIN 34 /**< number of input vertices */
-#define TGSI_SEMANTIC_HELPER_INVOCATION 35 /**< current invocation is helper */
-#define TGSI_SEMANTIC_COUNT 36 /**< number of semantic values */
+enum tgsi_semantic {
+ TGSI_SEMANTIC_POSITION,
+ TGSI_SEMANTIC_COLOR,
+ TGSI_SEMANTIC_BCOLOR, /**< back-face color */
+ TGSI_SEMANTIC_FOG,
+ TGSI_SEMANTIC_PSIZE,
+ TGSI_SEMANTIC_GENERIC,
+ TGSI_SEMANTIC_NORMAL,
+ TGSI_SEMANTIC_FACE,
+ TGSI_SEMANTIC_EDGEFLAG,
+ TGSI_SEMANTIC_PRIMID,
+ TGSI_SEMANTIC_INSTANCEID, /**< doesn't include start_instance */
+ TGSI_SEMANTIC_VERTEXID,
+ TGSI_SEMANTIC_STENCIL,
+ TGSI_SEMANTIC_CLIPDIST,
+ TGSI_SEMANTIC_CLIPVERTEX,
+ TGSI_SEMANTIC_GRID_SIZE, /**< grid size in blocks */
+ TGSI_SEMANTIC_BLOCK_ID, /**< id of the current block */
+ TGSI_SEMANTIC_BLOCK_SIZE, /**< block size in threads */
+ TGSI_SEMANTIC_THREAD_ID, /**< block-relative id of the current thread */
+ TGSI_SEMANTIC_TEXCOORD, /**< texture or sprite coordinates */
+ TGSI_SEMANTIC_PCOORD, /**< point sprite coordinate */
+ TGSI_SEMANTIC_VIEWPORT_INDEX, /**< viewport index */
+ TGSI_SEMANTIC_LAYER, /**< layer (rendertarget index) */
+ TGSI_SEMANTIC_CULLDIST,
+ TGSI_SEMANTIC_SAMPLEID,
+ TGSI_SEMANTIC_SAMPLEPOS,
+ TGSI_SEMANTIC_SAMPLEMASK,
+ TGSI_SEMANTIC_INVOCATIONID,
+ TGSI_SEMANTIC_VERTEXID_NOBASE,
+ TGSI_SEMANTIC_BASEVERTEX,
+ TGSI_SEMANTIC_PATCH, /**< generic per-patch semantic */
+ TGSI_SEMANTIC_TESSCOORD, /**< coordinate being processed by tess */
+ TGSI_SEMANTIC_TESSOUTER, /**< outer tessellation levels */
+ TGSI_SEMANTIC_TESSINNER, /**< inner tessellation levels */
+ TGSI_SEMANTIC_VERTICESIN, /**< number of input vertices */
+ TGSI_SEMANTIC_HELPER_INVOCATION, /**< current invocation is helper */
+ TGSI_SEMANTIC_COUNT, /**< number of semantic values */
+};
struct tgsi_declaration_semantic
{
@@ -250,10 +260,14 @@ struct tgsi_declaration_array {
#define TGSI_RESOURCE_PRIVATE 0x7ffd
#define TGSI_RESOURCE_INPUT 0x7ffc
-#define TGSI_IMM_FLOAT32 0
-#define TGSI_IMM_UINT32 1
-#define TGSI_IMM_INT32 2
-#define TGSI_IMM_FLOAT64 3
+enum tgsi_imm_type {
+ TGSI_IMM_FLOAT32,
+ TGSI_IMM_UINT32,
+ TGSI_IMM_INT32,
+ TGSI_IMM_FLOAT64,
+ TGSI_IMM_UINT64,
+ TGSI_IMM_INT64,
+};
struct tgsi_immediate
{
@@ -270,35 +284,38 @@ union tgsi_immediate_data
int Int;
};
-#define TGSI_PROPERTY_GS_INPUT_PRIM 0
-#define TGSI_PROPERTY_GS_OUTPUT_PRIM 1
-#define TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES 2
-#define TGSI_PROPERTY_FS_COORD_ORIGIN 3
-#define TGSI_PROPERTY_FS_COORD_PIXEL_CENTER 4
-#define TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS 5
-#define TGSI_PROPERTY_FS_DEPTH_LAYOUT 6
-#define TGSI_PROPERTY_VS_PROHIBIT_UCPS 7
-#define TGSI_PROPERTY_GS_INVOCATIONS 8
-#define TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION 9
-#define TGSI_PROPERTY_TCS_VERTICES_OUT 10
-#define TGSI_PROPERTY_TES_PRIM_MODE 11
-#define TGSI_PROPERTY_TES_SPACING 12
-#define TGSI_PROPERTY_TES_VERTEX_ORDER_CW 13
-#define TGSI_PROPERTY_TES_POINT_MODE 14
-#define TGSI_PROPERTY_NUM_CLIPDIST_ENABLED 15
-#define TGSI_PROPERTY_NUM_CULLDIST_ENABLED 16
-#define TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL 17
-#define TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE 18
-#define TGSI_PROPERTY_NEXT_SHADER 19
-#define TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH 20
-#define TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT 21
-#define TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH 22
-#define TGSI_PROPERTY_MUL_ZERO_WINS 23
-#define TGSI_PROPERTY_VS_BLIT_SGPRS_AMD 24
-#define TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD 25
-#define TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE 26
-#define TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED 27
-#define TGSI_PROPERTY_COUNT 28
+enum tgsi_property_name {
+ TGSI_PROPERTY_GS_INPUT_PRIM,
+ TGSI_PROPERTY_GS_OUTPUT_PRIM,
+ TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
+ TGSI_PROPERTY_FS_COORD_ORIGIN,
+ TGSI_PROPERTY_FS_COORD_PIXEL_CENTER,
+ TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS,
+ TGSI_PROPERTY_FS_DEPTH_LAYOUT,
+ TGSI_PROPERTY_VS_PROHIBIT_UCPS,
+ TGSI_PROPERTY_GS_INVOCATIONS,
+ TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION,
+ TGSI_PROPERTY_TCS_VERTICES_OUT,
+ TGSI_PROPERTY_TES_PRIM_MODE,
+ TGSI_PROPERTY_TES_SPACING,
+ TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
+ TGSI_PROPERTY_TES_POINT_MODE,
+ TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
+ TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
+ TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL,
+ TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE,
+ TGSI_PROPERTY_NEXT_SHADER,
+ TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH,
+ TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT,
+ TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH,
+ TGSI_PROPERTY_MUL_ZERO_WINS,
+ TGSI_PROPERTY_VS_BLIT_SGPRS_AMD,
+ TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD,
+ TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE,
+ TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED,
+ TGSI_PROPERTY_SEPARABLE_PROGRAM,
+ TGSI_PROPERTY_COUNT,
+};
struct tgsi_property {
unsigned Type : 4; /**< TGSI_TOKEN_TYPE_PROPERTY */
@@ -307,18 +324,23 @@ struct tgsi_property {
unsigned Padding : 12;
};
-#define TGSI_FS_COORD_ORIGIN_UPPER_LEFT 0
-#define TGSI_FS_COORD_ORIGIN_LOWER_LEFT 1
-
-#define TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER 0
-#define TGSI_FS_COORD_PIXEL_CENTER_INTEGER 1
+enum tgsi_fs_coord_origin {
+ TGSI_FS_COORD_ORIGIN_UPPER_LEFT,
+ TGSI_FS_COORD_ORIGIN_LOWER_LEFT,
+};
-#define TGSI_FS_DEPTH_LAYOUT_NONE 0
-#define TGSI_FS_DEPTH_LAYOUT_ANY 1
-#define TGSI_FS_DEPTH_LAYOUT_GREATER 2
-#define TGSI_FS_DEPTH_LAYOUT_LESS 3
-#define TGSI_FS_DEPTH_LAYOUT_UNCHANGED 4
+enum tgsi_fs_coord_pixcenter {
+ TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER,
+ TGSI_FS_COORD_PIXEL_CENTER_INTEGER,
+};
+enum tgsi_fs_depth_layout {
+ TGSI_FS_DEPTH_LAYOUT_NONE,
+ TGSI_FS_DEPTH_LAYOUT_ANY,
+ TGSI_FS_DEPTH_LAYOUT_GREATER,
+ TGSI_FS_DEPTH_LAYOUT_LESS,
+ TGSI_FS_DEPTH_LAYOUT_UNCHANGED,
+};
struct tgsi_property_data {
unsigned Data;
@@ -334,277 +356,278 @@ struct tgsi_property_data {
* OR REMOVE OPCODES - FILL in and REWRITE tgsi_info
* accordingly.
*/
-#define TGSI_OPCODE_ARL 0
-#define TGSI_OPCODE_MOV 1
-#define TGSI_OPCODE_LIT 2
-#define TGSI_OPCODE_RCP 3
-#define TGSI_OPCODE_RSQ 4
-#define TGSI_OPCODE_EXP 5
-#define TGSI_OPCODE_LOG 6
-#define TGSI_OPCODE_MUL 7
-#define TGSI_OPCODE_ADD 8
-#define TGSI_OPCODE_DP3 9
-#define TGSI_OPCODE_DP4 10
-#define TGSI_OPCODE_DST 11
-#define TGSI_OPCODE_MIN 12
-#define TGSI_OPCODE_MAX 13
-#define TGSI_OPCODE_SLT 14
-#define TGSI_OPCODE_SGE 15
-#define TGSI_OPCODE_MAD 16
-#define TGSI_OPCODE_SUB 17
-#define TGSI_OPCODE_LRP 18
-#define TGSI_OPCODE_FMA 19
-#define TGSI_OPCODE_SQRT 20
+enum tgsi_opcode {
+ TGSI_OPCODE_ARL = 0,
+ TGSI_OPCODE_MOV = 1,
+ TGSI_OPCODE_LIT = 2,
+ TGSI_OPCODE_RCP = 3,
+ TGSI_OPCODE_RSQ = 4,
+ TGSI_OPCODE_EXP = 5,
+ TGSI_OPCODE_LOG = 6,
+ TGSI_OPCODE_MUL = 7,
+ TGSI_OPCODE_ADD = 8,
+ TGSI_OPCODE_DP3 = 9,
+ TGSI_OPCODE_DP4 = 10,
+ TGSI_OPCODE_DST = 11,
+ TGSI_OPCODE_MIN = 12,
+ TGSI_OPCODE_MAX = 13,
+ TGSI_OPCODE_SLT = 14,
+ TGSI_OPCODE_SGE = 15,
+ TGSI_OPCODE_MAD = 16,
+ TGSI_OPCODE_SUB = 17,
+ TGSI_OPCODE_LRP = 18,
+ TGSI_OPCODE_FMA = 19,
+ TGSI_OPCODE_SQRT = 20,
+ /* gap */
+ TGSI_OPCODE_FRC = 24,
/* gap */
-#define TGSI_OPCODE_FRC 24
+ TGSI_OPCODE_FLR = 26,
+ TGSI_OPCODE_ROUND = 27,
+ TGSI_OPCODE_EX2 = 28,
+ TGSI_OPCODE_LG2 = 29,
+ TGSI_OPCODE_POW = 30,
+ TGSI_OPCODE_XPD = 31,
/* gap */
-#define TGSI_OPCODE_FLR 26
-#define TGSI_OPCODE_ROUND 27
-#define TGSI_OPCODE_EX2 28
-#define TGSI_OPCODE_LG2 29
-#define TGSI_OPCODE_POW 30
-#define TGSI_OPCODE_XPD 31
+ TGSI_OPCODE_ABS = 33,
/* gap */
-#define TGSI_OPCODE_ABS 33
+ TGSI_OPCODE_DPH = 35,
+ TGSI_OPCODE_COS = 36,
+ TGSI_OPCODE_DDX = 37,
+ TGSI_OPCODE_DDY = 38,
+ TGSI_OPCODE_KILL = 39 /* unconditional */,
+ TGSI_OPCODE_PK2H = 40,
+ TGSI_OPCODE_PK2US = 41,
+ TGSI_OPCODE_PK4B = 42,
+ TGSI_OPCODE_PK4UB = 43,
/* gap */
-#define TGSI_OPCODE_DPH 35
-#define TGSI_OPCODE_COS 36
-#define TGSI_OPCODE_DDX 37
-#define TGSI_OPCODE_DDY 38
-#define TGSI_OPCODE_KILL 39 /* unconditional */
-#define TGSI_OPCODE_PK2H 40
-#define TGSI_OPCODE_PK2US 41
-#define TGSI_OPCODE_PK4B 42
-#define TGSI_OPCODE_PK4UB 43
+ TGSI_OPCODE_SEQ = 45,
/* gap */
-#define TGSI_OPCODE_SEQ 45
+ TGSI_OPCODE_SGT = 47,
+ TGSI_OPCODE_SIN = 48,
+ TGSI_OPCODE_SLE = 49,
+ TGSI_OPCODE_SNE = 50,
/* gap */
-#define TGSI_OPCODE_SGT 47
-#define TGSI_OPCODE_SIN 48
-#define TGSI_OPCODE_SLE 49
-#define TGSI_OPCODE_SNE 50
+ TGSI_OPCODE_TEX = 52,
+ TGSI_OPCODE_TXD = 53,
+ TGSI_OPCODE_TXP = 54,
+ TGSI_OPCODE_UP2H = 55,
+ TGSI_OPCODE_UP2US = 56,
+ TGSI_OPCODE_UP4B = 57,
+ TGSI_OPCODE_UP4UB = 58,
/* gap */
-#define TGSI_OPCODE_TEX 52
-#define TGSI_OPCODE_TXD 53
-#define TGSI_OPCODE_TXP 54
-#define TGSI_OPCODE_UP2H 55
-#define TGSI_OPCODE_UP2US 56
-#define TGSI_OPCODE_UP4B 57
-#define TGSI_OPCODE_UP4UB 58
+ TGSI_OPCODE_ARR = 61,
/* gap */
-#define TGSI_OPCODE_ARR 61
+ TGSI_OPCODE_CAL = 63,
+ TGSI_OPCODE_RET = 64,
+ TGSI_OPCODE_SSG = 65 /* SGN */,
+ TGSI_OPCODE_CMP = 66,
+ TGSI_OPCODE_SCS = 67,
+ TGSI_OPCODE_TXB = 68,
+ TGSI_OPCODE_FBFETCH = 69,
+ TGSI_OPCODE_DIV = 70,
+ TGSI_OPCODE_DP2 = 71,
+ TGSI_OPCODE_TXL = 72,
+ TGSI_OPCODE_BRK = 73,
+ TGSI_OPCODE_IF = 74,
+ TGSI_OPCODE_UIF = 75,
/* gap */
-#define TGSI_OPCODE_CAL 63
-#define TGSI_OPCODE_RET 64
-#define TGSI_OPCODE_SSG 65 /* SGN */
-#define TGSI_OPCODE_CMP 66
-#define TGSI_OPCODE_SCS 67
-#define TGSI_OPCODE_TXB 68
-#define TGSI_OPCODE_FBFETCH 69
-#define TGSI_OPCODE_DIV 70
-#define TGSI_OPCODE_DP2 71
-#define TGSI_OPCODE_TXL 72
-#define TGSI_OPCODE_BRK 73
-#define TGSI_OPCODE_IF 74
-#define TGSI_OPCODE_UIF 75
-#define TGSI_OPCODE_ELSE 77
-#define TGSI_OPCODE_ENDIF 78
-
-#define TGSI_OPCODE_DDX_FINE 79
-#define TGSI_OPCODE_DDY_FINE 80
+ TGSI_OPCODE_ELSE = 77,
+ TGSI_OPCODE_ENDIF = 78,
+ TGSI_OPCODE_DDX_FINE = 79,
+ TGSI_OPCODE_DDY_FINE = 80,
/* gap */
-#define TGSI_OPCODE_CEIL 83
-#define TGSI_OPCODE_I2F 84
-#define TGSI_OPCODE_NOT 85
-#define TGSI_OPCODE_TRUNC 86
-#define TGSI_OPCODE_SHL 87
+ TGSI_OPCODE_CEIL = 83,
+ TGSI_OPCODE_I2F = 84,
+ TGSI_OPCODE_NOT = 85,
+ TGSI_OPCODE_TRUNC = 86,
+ TGSI_OPCODE_SHL = 87,
/* gap */
-#define TGSI_OPCODE_AND 89
-#define TGSI_OPCODE_OR 90
-#define TGSI_OPCODE_MOD 91
-#define TGSI_OPCODE_XOR 92
+ TGSI_OPCODE_AND = 89,
+ TGSI_OPCODE_OR = 90,
+ TGSI_OPCODE_MOD = 91,
+ TGSI_OPCODE_XOR = 92,
/* gap */
-#define TGSI_OPCODE_TXF 94
-#define TGSI_OPCODE_TXQ 95
-#define TGSI_OPCODE_CONT 96
-#define TGSI_OPCODE_EMIT 97
-#define TGSI_OPCODE_ENDPRIM 98
-#define TGSI_OPCODE_BGNLOOP 99
-#define TGSI_OPCODE_BGNSUB 100
-#define TGSI_OPCODE_ENDLOOP 101
-#define TGSI_OPCODE_ENDSUB 102
+ TGSI_OPCODE_TXF = 94,
+ TGSI_OPCODE_TXQ = 95,
+ TGSI_OPCODE_CONT = 96,
+ TGSI_OPCODE_EMIT = 97,
+ TGSI_OPCODE_ENDPRIM = 98,
+ TGSI_OPCODE_BGNLOOP = 99,
+ TGSI_OPCODE_BGNSUB = 100,
+ TGSI_OPCODE_ENDLOOP = 101,
+ TGSI_OPCODE_ENDSUB = 102,
/* gap */
-#define TGSI_OPCODE_TXQS 104
-#define TGSI_OPCODE_RESQ 105
+ TGSI_OPCODE_TXQS = 104,
+ TGSI_OPCODE_RESQ = 105,
/* gap */
-#define TGSI_OPCODE_NOP 107
+ TGSI_OPCODE_NOP = 107,
-#define TGSI_OPCODE_FSEQ 108
-#define TGSI_OPCODE_FSGE 109
-#define TGSI_OPCODE_FSLT 110
-#define TGSI_OPCODE_FSNE 111
+ TGSI_OPCODE_FSEQ = 108,
+ TGSI_OPCODE_FSGE = 109,
+ TGSI_OPCODE_FSLT = 110,
+ TGSI_OPCODE_FSNE = 111,
-#define TGSI_OPCODE_MEMBAR 112
+ TGSI_OPCODE_MEMBAR = 112,
/* gap */
-#define TGSI_OPCODE_KILL_IF 116 /* conditional kill */
-#define TGSI_OPCODE_END 117 /* aka HALT */
-#define TGSI_OPCODE_DFMA 118
-#define TGSI_OPCODE_F2I 119
-#define TGSI_OPCODE_IDIV 120
-#define TGSI_OPCODE_IMAX 121
-#define TGSI_OPCODE_IMIN 122
-#define TGSI_OPCODE_INEG 123
-#define TGSI_OPCODE_ISGE 124
-#define TGSI_OPCODE_ISHR 125
-#define TGSI_OPCODE_ISLT 126
-#define TGSI_OPCODE_F2U 127
-#define TGSI_OPCODE_U2F 128
-#define TGSI_OPCODE_UADD 129
-#define TGSI_OPCODE_UDIV 130
-#define TGSI_OPCODE_UMAD 131
-#define TGSI_OPCODE_UMAX 132
-#define TGSI_OPCODE_UMIN 133
-#define TGSI_OPCODE_UMOD 134
-#define TGSI_OPCODE_UMUL 135
-#define TGSI_OPCODE_USEQ 136
-#define TGSI_OPCODE_USGE 137
-#define TGSI_OPCODE_USHR 138
-#define TGSI_OPCODE_USLT 139
-#define TGSI_OPCODE_USNE 140
-#define TGSI_OPCODE_SWITCH 141
-#define TGSI_OPCODE_CASE 142
-#define TGSI_OPCODE_DEFAULT 143
-#define TGSI_OPCODE_ENDSWITCH 144
+ TGSI_OPCODE_KILL_IF = 116 /* conditional kill */,
+ TGSI_OPCODE_END = 117 /* aka HALT */,
+ TGSI_OPCODE_DFMA = 118,
+ TGSI_OPCODE_F2I = 119,
+ TGSI_OPCODE_IDIV = 120,
+ TGSI_OPCODE_IMAX = 121,
+ TGSI_OPCODE_IMIN = 122,
+ TGSI_OPCODE_INEG = 123,
+ TGSI_OPCODE_ISGE = 124,
+ TGSI_OPCODE_ISHR = 125,
+ TGSI_OPCODE_ISLT = 126,
+ TGSI_OPCODE_F2U = 127,
+ TGSI_OPCODE_U2F = 128,
+ TGSI_OPCODE_UADD = 129,
+ TGSI_OPCODE_UDIV = 130,
+ TGSI_OPCODE_UMAD = 131,
+ TGSI_OPCODE_UMAX = 132,
+ TGSI_OPCODE_UMIN = 133,
+ TGSI_OPCODE_UMOD = 134,
+ TGSI_OPCODE_UMUL = 135,
+ TGSI_OPCODE_USEQ = 136,
+ TGSI_OPCODE_USGE = 137,
+ TGSI_OPCODE_USHR = 138,
+ TGSI_OPCODE_USLT = 139,
+ TGSI_OPCODE_USNE = 140,
+ TGSI_OPCODE_SWITCH = 141,
+ TGSI_OPCODE_CASE = 142,
+ TGSI_OPCODE_DEFAULT = 143,
+ TGSI_OPCODE_ENDSWITCH = 144,
/* resource related opcodes */
-#define TGSI_OPCODE_SAMPLE 145
-#define TGSI_OPCODE_SAMPLE_I 146
-#define TGSI_OPCODE_SAMPLE_I_MS 147
-#define TGSI_OPCODE_SAMPLE_B 148
-#define TGSI_OPCODE_SAMPLE_C 149
-#define TGSI_OPCODE_SAMPLE_C_LZ 150
-#define TGSI_OPCODE_SAMPLE_D 151
-#define TGSI_OPCODE_SAMPLE_L 152
-#define TGSI_OPCODE_GATHER4 153
-#define TGSI_OPCODE_SVIEWINFO 154
-#define TGSI_OPCODE_SAMPLE_POS 155
-#define TGSI_OPCODE_SAMPLE_INFO 156
-
-#define TGSI_OPCODE_UARL 157
-#define TGSI_OPCODE_UCMP 158
-#define TGSI_OPCODE_IABS 159
-#define TGSI_OPCODE_ISSG 160
-
-#define TGSI_OPCODE_LOAD 161
-#define TGSI_OPCODE_STORE 162
-
+ TGSI_OPCODE_SAMPLE = 145,
+ TGSI_OPCODE_SAMPLE_I = 146,
+ TGSI_OPCODE_SAMPLE_I_MS = 147,
+ TGSI_OPCODE_SAMPLE_B = 148,
+ TGSI_OPCODE_SAMPLE_C = 149,
+ TGSI_OPCODE_SAMPLE_C_LZ = 150,
+ TGSI_OPCODE_SAMPLE_D = 151,
+ TGSI_OPCODE_SAMPLE_L = 152,
+ TGSI_OPCODE_GATHER4 = 153,
+ TGSI_OPCODE_SVIEWINFO = 154,
+ TGSI_OPCODE_SAMPLE_POS = 155,
+ TGSI_OPCODE_SAMPLE_INFO = 156,
+
+ TGSI_OPCODE_UARL = 157,
+ TGSI_OPCODE_UCMP = 158,
+ TGSI_OPCODE_IABS = 159,
+ TGSI_OPCODE_ISSG = 160,
+
+ TGSI_OPCODE_LOAD = 161,
+ TGSI_OPCODE_STORE = 162,
/* gap */
-#define TGSI_OPCODE_BARRIER 166
-
-#define TGSI_OPCODE_ATOMUADD 167
-#define TGSI_OPCODE_ATOMXCHG 168
-#define TGSI_OPCODE_ATOMCAS 169
-#define TGSI_OPCODE_ATOMAND 170
-#define TGSI_OPCODE_ATOMOR 171
-#define TGSI_OPCODE_ATOMXOR 172
-#define TGSI_OPCODE_ATOMUMIN 173
-#define TGSI_OPCODE_ATOMUMAX 174
-#define TGSI_OPCODE_ATOMIMIN 175
-#define TGSI_OPCODE_ATOMIMAX 176
+ TGSI_OPCODE_BARRIER = 166,
+
+ TGSI_OPCODE_ATOMUADD = 167,
+ TGSI_OPCODE_ATOMXCHG = 168,
+ TGSI_OPCODE_ATOMCAS = 169,
+ TGSI_OPCODE_ATOMAND = 170,
+ TGSI_OPCODE_ATOMOR = 171,
+ TGSI_OPCODE_ATOMXOR = 172,
+ TGSI_OPCODE_ATOMUMIN = 173,
+ TGSI_OPCODE_ATOMUMAX = 174,
+ TGSI_OPCODE_ATOMIMIN = 175,
+ TGSI_OPCODE_ATOMIMAX = 176,
/* to be used for shadow cube map compares */
-#define TGSI_OPCODE_TEX2 177
-#define TGSI_OPCODE_TXB2 178
-#define TGSI_OPCODE_TXL2 179
+ TGSI_OPCODE_TEX2 = 177,
+ TGSI_OPCODE_TXB2 = 178,
+ TGSI_OPCODE_TXL2 = 179,
-#define TGSI_OPCODE_IMUL_HI 180
-#define TGSI_OPCODE_UMUL_HI 181
+ TGSI_OPCODE_IMUL_HI = 180,
+ TGSI_OPCODE_UMUL_HI = 181,
-#define TGSI_OPCODE_TG4 182
+ TGSI_OPCODE_TG4 = 182,
-#define TGSI_OPCODE_LODQ 183
+ TGSI_OPCODE_LODQ = 183,
-#define TGSI_OPCODE_IBFE 184
-#define TGSI_OPCODE_UBFE 185
-#define TGSI_OPCODE_BFI 186
-#define TGSI_OPCODE_BREV 187
-#define TGSI_OPCODE_POPC 188
-#define TGSI_OPCODE_LSB 189
-#define TGSI_OPCODE_IMSB 190
-#define TGSI_OPCODE_UMSB 191
+ TGSI_OPCODE_IBFE = 184,
+ TGSI_OPCODE_UBFE = 185,
+ TGSI_OPCODE_BFI = 186,
+ TGSI_OPCODE_BREV = 187,
+ TGSI_OPCODE_POPC = 188,
+ TGSI_OPCODE_LSB = 189,
+ TGSI_OPCODE_IMSB = 190,
+ TGSI_OPCODE_UMSB = 191,
-#define TGSI_OPCODE_INTERP_CENTROID 192
-#define TGSI_OPCODE_INTERP_SAMPLE 193
-#define TGSI_OPCODE_INTERP_OFFSET 194
+ TGSI_OPCODE_INTERP_CENTROID = 192,
+ TGSI_OPCODE_INTERP_SAMPLE = 193,
+ TGSI_OPCODE_INTERP_OFFSET = 194,
/* sm5 marked opcodes are supported in D3D11 optionally - also DMOV, DMOVC */
-#define TGSI_OPCODE_F2D 195 /* SM5 */
-#define TGSI_OPCODE_D2F 196
-#define TGSI_OPCODE_DABS 197
-#define TGSI_OPCODE_DNEG 198 /* SM5 */
-#define TGSI_OPCODE_DADD 199 /* SM5 */
-#define TGSI_OPCODE_DMUL 200 /* SM5 */
-#define TGSI_OPCODE_DMAX 201 /* SM5 */
-#define TGSI_OPCODE_DMIN 202 /* SM5 */
-#define TGSI_OPCODE_DSLT 203 /* SM5 */
-#define TGSI_OPCODE_DSGE 204 /* SM5 */
-#define TGSI_OPCODE_DSEQ 205 /* SM5 */
-#define TGSI_OPCODE_DSNE 206 /* SM5 */
-#define TGSI_OPCODE_DRCP 207 /* eg, cayman */
-#define TGSI_OPCODE_DSQRT 208 /* eg, cayman also has DRSQ */
-#define TGSI_OPCODE_DMAD 209
-#define TGSI_OPCODE_DFRAC 210 /* eg, cayman */
-#define TGSI_OPCODE_DLDEXP 211 /* eg, cayman */
-#define TGSI_OPCODE_DFRACEXP 212 /* eg, cayman */
-#define TGSI_OPCODE_D2I 213
-#define TGSI_OPCODE_I2D 214
-#define TGSI_OPCODE_D2U 215
-#define TGSI_OPCODE_U2D 216
-#define TGSI_OPCODE_DRSQ 217 /* eg, cayman also has DRSQ */
-#define TGSI_OPCODE_DTRUNC 218 /* nvc0 */
-#define TGSI_OPCODE_DCEIL 219 /* nvc0 */
-#define TGSI_OPCODE_DFLR 220 /* nvc0 */
-#define TGSI_OPCODE_DROUND 221 /* nvc0 */
-#define TGSI_OPCODE_DSSG 222
-#define TGSI_OPCODE_DDIV 223
-#define TGSI_OPCODE_CLOCK 224
+ TGSI_OPCODE_F2D = 195 /* SM5 */,
+ TGSI_OPCODE_D2F = 196,
+ TGSI_OPCODE_DABS = 197,
+ TGSI_OPCODE_DNEG = 198 /* SM5 */,
+ TGSI_OPCODE_DADD = 199 /* SM5 */,
+ TGSI_OPCODE_DMUL = 200 /* SM5 */,
+ TGSI_OPCODE_DMAX = 201 /* SM5 */,
+ TGSI_OPCODE_DMIN = 202 /* SM5 */,
+ TGSI_OPCODE_DSLT = 203 /* SM5 */,
+ TGSI_OPCODE_DSGE = 204 /* SM5 */,
+ TGSI_OPCODE_DSEQ = 205 /* SM5 */,
+ TGSI_OPCODE_DSNE = 206 /* SM5 */,
+ TGSI_OPCODE_DRCP = 207 /* eg, cayman */,
+ TGSI_OPCODE_DSQRT = 208 /* eg, cayman also has DRSQ */,
+ TGSI_OPCODE_DMAD = 209,
+ TGSI_OPCODE_DFRAC = 210 /* eg, cayman */,
+ TGSI_OPCODE_DLDEXP = 211 /* eg, cayman */,
+ TGSI_OPCODE_DFRACEXP = 212 /* eg, cayman */,
+ TGSI_OPCODE_D2I = 213,
+ TGSI_OPCODE_I2D = 214,
+ TGSI_OPCODE_D2U = 215,
+ TGSI_OPCODE_U2D = 216,
+ TGSI_OPCODE_DRSQ = 217 /* eg, cayman also has DRSQ */,
+ TGSI_OPCODE_DTRUNC = 218 /* nvc0 */,
+ TGSI_OPCODE_DCEIL = 219 /* nvc0 */,
+ TGSI_OPCODE_DFLR = 220 /* nvc0 */,
+ TGSI_OPCODE_DROUND = 221 /* nvc0 */,
+ TGSI_OPCODE_DSSG = 222,
+ TGSI_OPCODE_DDIV = 223,
+ TGSI_OPCODE_CLOCK = 224,
/* opcodes for ARB_gpu_shader_int64 */
-#define TGSI_OPCODE_I64ABS 225
-#define TGSI_OPCODE_I64NEG 226
-#define TGSI_OPCODE_I64SSG 227
-#define TGSI_OPCODE_I64SLT 228
-#define TGSI_OPCODE_I64SGE 229
-#define TGSI_OPCODE_I64MIN 230
-#define TGSI_OPCODE_I64MAX 231
-#define TGSI_OPCODE_I64SHR 232
-#define TGSI_OPCODE_I64DIV 233
-#define TGSI_OPCODE_I64MOD 234
-#define TGSI_OPCODE_F2I64 235
-#define TGSI_OPCODE_U2I64 236
-#define TGSI_OPCODE_I2I64 237
-#define TGSI_OPCODE_D2I64 238
-#define TGSI_OPCODE_I642F 239
-#define TGSI_OPCODE_I642D 240
-
-#define TGSI_OPCODE_U64ADD 241
-#define TGSI_OPCODE_U64MUL 242
-#define TGSI_OPCODE_U64SEQ 243
-#define TGSI_OPCODE_U64SNE 244
-#define TGSI_OPCODE_U64SLT 245
-#define TGSI_OPCODE_U64SGE 246
-#define TGSI_OPCODE_U64MIN 247
-#define TGSI_OPCODE_U64MAX 248
-#define TGSI_OPCODE_U64SHL 249
-#define TGSI_OPCODE_U64SHR 250
-#define TGSI_OPCODE_U64DIV 251
-#define TGSI_OPCODE_U64MOD 252
-#define TGSI_OPCODE_F2U64 253
-#define TGSI_OPCODE_D2U64 254
-#define TGSI_OPCODE_U642F 255
-#define TGSI_OPCODE_U642D 256
-
-#define TGSI_OPCODE_LAST 257
+ TGSI_OPCODE_I64ABS = 225,
+ TGSI_OPCODE_I64NEG = 226,
+ TGSI_OPCODE_I64SSG = 227,
+ TGSI_OPCODE_I64SLT = 228,
+ TGSI_OPCODE_I64SGE = 229,
+ TGSI_OPCODE_I64MIN = 230,
+ TGSI_OPCODE_I64MAX = 231,
+ TGSI_OPCODE_I64SHR = 232,
+ TGSI_OPCODE_I64DIV = 233,
+ TGSI_OPCODE_I64MOD = 234,
+ TGSI_OPCODE_F2I64 = 235,
+ TGSI_OPCODE_U2I64 = 236,
+ TGSI_OPCODE_I2I64 = 237,
+ TGSI_OPCODE_D2I64 = 238,
+ TGSI_OPCODE_I642F = 239,
+ TGSI_OPCODE_I642D = 240,
+
+ TGSI_OPCODE_U64ADD = 241,
+ TGSI_OPCODE_U64MUL = 242,
+ TGSI_OPCODE_U64SEQ = 243,
+ TGSI_OPCODE_U64SNE = 244,
+ TGSI_OPCODE_U64SLT = 245,
+ TGSI_OPCODE_U64SGE = 246,
+ TGSI_OPCODE_U64MIN = 247,
+ TGSI_OPCODE_U64MAX = 248,
+ TGSI_OPCODE_U64SHL = 249,
+ TGSI_OPCODE_U64SHR = 250,
+ TGSI_OPCODE_U64DIV = 251,
+ TGSI_OPCODE_U64MOD = 252,
+ TGSI_OPCODE_F2U64 = 253,
+ TGSI_OPCODE_D2U64 = 254,
+ TGSI_OPCODE_U642F = 255,
+ TGSI_OPCODE_U642D = 256,
+
+ TGSI_OPCODE_LAST = 257,
+};
/**
* Opcode is the operation code to execute. A given operation defines the
@@ -652,10 +675,12 @@ struct tgsi_instruction
* instruction, including the instruction word.
*/
-#define TGSI_SWIZZLE_X 0
-#define TGSI_SWIZZLE_Y 1
-#define TGSI_SWIZZLE_Z 2
-#define TGSI_SWIZZLE_W 3
+enum tgsi_swizzle {
+ TGSI_SWIZZLE_X,
+ TGSI_SWIZZLE_Y,
+ TGSI_SWIZZLE_Z,
+ TGSI_SWIZZLE_W,
+};
struct tgsi_instruction_label
{
@@ -663,26 +688,28 @@ struct tgsi_instruction_label
unsigned Padding : 8;
};
-#define TGSI_TEXTURE_BUFFER 0
-#define TGSI_TEXTURE_1D 1
-#define TGSI_TEXTURE_2D 2
-#define TGSI_TEXTURE_3D 3
-#define TGSI_TEXTURE_CUBE 4
-#define TGSI_TEXTURE_RECT 5
-#define TGSI_TEXTURE_SHADOW1D 6
-#define TGSI_TEXTURE_SHADOW2D 7
-#define TGSI_TEXTURE_SHADOWRECT 8
-#define TGSI_TEXTURE_1D_ARRAY 9
-#define TGSI_TEXTURE_2D_ARRAY 10
-#define TGSI_TEXTURE_SHADOW1D_ARRAY 11
-#define TGSI_TEXTURE_SHADOW2D_ARRAY 12
-#define TGSI_TEXTURE_SHADOWCUBE 13
-#define TGSI_TEXTURE_2D_MSAA 14
-#define TGSI_TEXTURE_2D_ARRAY_MSAA 15
-#define TGSI_TEXTURE_CUBE_ARRAY 16
-#define TGSI_TEXTURE_SHADOWCUBE_ARRAY 17
-#define TGSI_TEXTURE_UNKNOWN 18
-#define TGSI_TEXTURE_COUNT 19
+enum tgsi_texture_type {
+ TGSI_TEXTURE_BUFFER,
+ TGSI_TEXTURE_1D,
+ TGSI_TEXTURE_2D,
+ TGSI_TEXTURE_3D,
+ TGSI_TEXTURE_CUBE,
+ TGSI_TEXTURE_RECT,
+ TGSI_TEXTURE_SHADOW1D,
+ TGSI_TEXTURE_SHADOW2D,
+ TGSI_TEXTURE_SHADOWRECT,
+ TGSI_TEXTURE_1D_ARRAY,
+ TGSI_TEXTURE_2D_ARRAY,
+ TGSI_TEXTURE_SHADOW1D_ARRAY,
+ TGSI_TEXTURE_SHADOW2D_ARRAY,
+ TGSI_TEXTURE_SHADOWCUBE,
+ TGSI_TEXTURE_2D_MSAA,
+ TGSI_TEXTURE_2D_ARRAY_MSAA,
+ TGSI_TEXTURE_CUBE_ARRAY,
+ TGSI_TEXTURE_SHADOWCUBE_ARRAY,
+ TGSI_TEXTURE_UNKNOWN,
+ TGSI_TEXTURE_COUNT,
+};
struct tgsi_instruction_texture
{
diff --git a/src/gallium/include/pipe/p_state.h b/src/gallium/include/pipe/p_state.h
index f0a7b550..48d5a263 100644
--- a/src/gallium/include/pipe/p_state.h
+++ b/src/gallium/include/pipe/p_state.h
@@ -57,10 +57,10 @@ extern "C" {
#define PIPE_MAX_CLIP_PLANES 8
#define PIPE_MAX_COLOR_BUFS 8
#define PIPE_MAX_CONSTANT_BUFFERS 32
-#define PIPE_MAX_SAMPLERS 16
+#define PIPE_MAX_SAMPLERS 32
#define PIPE_MAX_SHADER_INPUTS 80 /* 32 GENERIC + 32 PATCH + 16 others */
#define PIPE_MAX_SHADER_OUTPUTS 80 /* 32 GENERIC + 32 PATCH + 16 others */
-#define PIPE_MAX_SHADER_SAMPLER_VIEWS 32
+#define PIPE_MAX_SHADER_SAMPLER_VIEWS 128
#define PIPE_MAX_SHADER_BUFFERS 32
#define PIPE_MAX_SHADER_IMAGES 32
#define PIPE_MAX_TEXTURE_LEVELS 16
diff --git a/src/gallium/include/pipe/p_video_enums.h b/src/gallium/include/pipe/p_video_enums.h
index 10205ac4..f7853528 100644
--- a/src/gallium/include/pipe/p_video_enums.h
+++ b/src/gallium/include/pipe/p_video_enums.h
@@ -28,13 +28,21 @@
#ifndef PIPE_VIDEO_ENUMS_H
#define PIPE_VIDEO_ENUMS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
enum pipe_video_format
{
PIPE_VIDEO_FORMAT_UNKNOWN = 0,
PIPE_VIDEO_FORMAT_MPEG12, /**< MPEG1, MPEG2 */
PIPE_VIDEO_FORMAT_MPEG4, /**< DIVX, XVID */
PIPE_VIDEO_FORMAT_VC1, /**< WMV */
- PIPE_VIDEO_FORMAT_MPEG4_AVC /**< H.264 */
+ PIPE_VIDEO_FORMAT_MPEG4_AVC,/**< H.264 */
+ PIPE_VIDEO_FORMAT_HEVC, /**< H.265 */
+ PIPE_VIDEO_FORMAT_JPEG, /**< JPEG */
+ PIPE_VIDEO_FORMAT_VP9, /**< VP9 */
+ PIPE_VIDEO_FORMAT_AV1 /**< AV1 */
};
enum pipe_video_profile
@@ -49,8 +57,23 @@ enum pipe_video_profile
PIPE_VIDEO_PROFILE_VC1_MAIN,
PIPE_VIDEO_PROFILE_VC1_ADVANCED,
PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_CONSTRAINED_BASELINE,
PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN,
- PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_EXTENDED,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH10,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH422,
+ PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH444,
+ PIPE_VIDEO_PROFILE_HEVC_MAIN,
+ PIPE_VIDEO_PROFILE_HEVC_MAIN_10,
+ PIPE_VIDEO_PROFILE_HEVC_MAIN_STILL,
+ PIPE_VIDEO_PROFILE_HEVC_MAIN_12,
+ PIPE_VIDEO_PROFILE_HEVC_MAIN_444,
+ PIPE_VIDEO_PROFILE_JPEG_BASELINE,
+ PIPE_VIDEO_PROFILE_VP9_PROFILE0,
+ PIPE_VIDEO_PROFILE_VP9_PROFILE2,
+ PIPE_VIDEO_PROFILE_AV1_MAIN,
+ PIPE_VIDEO_PROFILE_MAX
};
/* Video caps, can be different for each codec/profile */
@@ -64,9 +87,51 @@ enum pipe_video_cap
PIPE_VIDEO_CAP_PREFERS_INTERLACED = 5,
PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE = 6,
PIPE_VIDEO_CAP_SUPPORTS_INTERLACED = 7,
- PIPE_VIDEO_CAP_MAX_LEVEL = 8
+ PIPE_VIDEO_CAP_MAX_LEVEL = 8,
+ PIPE_VIDEO_CAP_STACKED_FRAMES = 9,
+ PIPE_VIDEO_CAP_MAX_MACROBLOCKS = 10,
+ PIPE_VIDEO_CAP_MAX_TEMPORAL_LAYERS = 11,
+ PIPE_VIDEO_CAP_EFC_SUPPORTED = 12,
+ PIPE_VIDEO_CAP_ENC_MAX_SLICES_PER_FRAME = 13,
+ PIPE_VIDEO_CAP_ENC_SLICES_STRUCTURE = 14,
+ PIPE_VIDEO_CAP_ENC_MAX_REFERENCES_PER_FRAME = 15,
+};
+
+/* To be used with cap PIPE_VIDEO_CAP_ENC_SLICES_STRUCTURE*/
+/**
+ * pipe_video_cap_slice_structure
+ *
+ * This attribute determines slice structures supported by the
+ * driver for encoding. This attribute is a hint to the user so
+ * that he can choose a suitable surface size and how to arrange
+ * the encoding process of multiple slices per frame.
+ *
+ * More specifically, for H.264 encoding, this attribute
+ * determines the range of accepted values to
+ * h264_slice_descriptor::macroblock_address and
+ * h264_slice_descriptor::num_macroblocks.
+ */
+enum pipe_video_cap_slice_structure
+{
+ /* Driver does not supports multiple slice per frame.*/
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_NONE = 0x00000000,
+ /* Driver supports a power-of-two number of rows per slice.*/
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_POWER_OF_TWO_ROWS = 0x00000001,
+ /* Driver supports an arbitrary number of macroblocks per slice.*/
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS = 0x00000002,
+ /* Driver support 1 row per slice*/
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_EQUAL_ROWS = 0x00000004,
+ /* Driver support max encoded slice size per slice */
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_MAX_SLICE_SIZE = 0x00000008,
+ /* Driver supports an arbitrary number of rows per slice. */
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_ARBITRARY_ROWS = 0x00000010,
+ /* Driver supports any number of rows per slice but they must be the same
+ * for all slices except for the last one, which must be equal or smaller
+ * to the previous slices. */
+ PIPE_VIDEO_CAP_SLICE_STRUCTURE_EQUAL_MULTI_ROWS = 0x00000020,
};
+
enum pipe_video_entrypoint
{
PIPE_VIDEO_ENTRYPOINT_UNKNOWN,
@@ -76,4 +141,8 @@ enum pipe_video_entrypoint
PIPE_VIDEO_ENTRYPOINT_ENCODE
};
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* PIPE_VIDEO_ENUMS_H */
diff --git a/src/gallium/include/pipe/p_video_state.h b/src/gallium/include/pipe/p_video_state.h
new file mode 100644
index 00000000..8540f020
--- /dev/null
+++ b/src/gallium/include/pipe/p_video_state.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2022 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_VIDEO_STATE_H
+#define PIPE_VIDEO_STATE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum pipe_h264_slice_type
+{
+ PIPE_H264_SLICE_TYPE_P = 0x0,
+ PIPE_H264_SLICE_TYPE_B = 0x1,
+ PIPE_H264_SLICE_TYPE_I = 0x2,
+ PIPE_H264_SLICE_TYPE_SP = 0x3,
+ PIPE_H264_SLICE_TYPE_SI = 0x4
+};
+
+/* Same enum for h264/h265 */
+enum pipe_h2645_enc_picture_type
+{
+ PIPE_H2645_ENC_PICTURE_TYPE_P = 0x00,
+ PIPE_H2645_ENC_PICTURE_TYPE_B = 0x01,
+ PIPE_H2645_ENC_PICTURE_TYPE_I = 0x02,
+ PIPE_H2645_ENC_PICTURE_TYPE_IDR = 0x03,
+ PIPE_H2645_ENC_PICTURE_TYPE_SKIP = 0x04
+};
+
+enum pipe_h2645_enc_rate_control_method
+{
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_DISABLE = 0x00,
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_CONSTANT_SKIP = 0x01,
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_VARIABLE_SKIP = 0x02,
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_CONSTANT = 0x03,
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_VARIABLE = 0x04
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PIPE_VIDEO_STATE_H */
diff --git a/src/gallium/meson.build b/src/gallium/meson.build
index 62c85333..7d70ba0a 100644
--- a/src/gallium/meson.build
+++ b/src/gallium/meson.build
@@ -22,28 +22,12 @@
#
sources_libgallium = [
- 'include/c99_compat.h',
- 'include/no_extern_c.h',
- 'include/pipe/p_config.h',
'include/pipe/p_defines.h',
- 'include/pipe/p_context.h',
'include/pipe/p_state.h',
'include/pipe/p_format.h',
'include/pipe/p_shader_tokens.h',
- 'include/pipe/p_screen.h',
- 'include/pipe/p_compiler.h',
- 'include/pipe/p_video_enums.h',
- 'include/c11/threads_win32.h',
- 'include/c11/threads.h',
- 'include/c11/threads_posix.h',
'auxiliary/util/u_format.h',
- 'auxiliary/util/u_memory.h',
- 'auxiliary/util/u_rect.h',
- 'auxiliary/util/u_surface.h',
- 'auxiliary/util/u_math.h',
- 'auxiliary/util/rgtc.h',
'auxiliary/util/u_format.c',
- 'auxiliary/util/u_debug.h',
'auxiliary/util/u_inlines.h',
'auxiliary/util/u_texture.c',
'auxiliary/util/u_pointer.h',
@@ -52,31 +36,17 @@ sources_libgallium = [
'auxiliary/util/u_dual_blend.h',
'auxiliary/util/u_texture.h',
'auxiliary/util/u_hash_table.h',
- 'auxiliary/util/u_box.h',
- 'auxiliary/util/u_debug.c',
- 'auxiliary/util/u_cpu_detect.c',
- 'auxiliary/util/u_pack_color.h',
'auxiliary/util/u_double_list.h',
'auxiliary/util/u_debug_refcnt.h',
- 'auxiliary/util/u_bitmask.c',
- 'auxiliary/util/u_cpu_detect.h',
- 'auxiliary/util/u_bitmask.h',
'auxiliary/util/u_format_s3tc.h',
- 'auxiliary/util/u_string.h',
- 'auxiliary/util/u_surface.c',
- 'auxiliary/util/u_math.c',
'auxiliary/util/u_half.h',
'auxiliary/util/u_prim.h',
'auxiliary/util/u_debug_describe.c',
- 'auxiliary/util/u_atomic.h',
- 'auxiliary/util/xxhash.h',
'auxiliary/cso_cache/cso_hash.h',
'auxiliary/cso_cache/cso_cache.h',
'auxiliary/cso_cache/cso_cache.c',
'auxiliary/cso_cache/cso_hash.c',
- 'auxiliary/tgsi/tgsi_opcode_tmp.h',
'auxiliary/tgsi/tgsi_dump.c',
- 'auxiliary/tgsi/tgsi_ureg.c',
'auxiliary/tgsi/tgsi_build.c',
'auxiliary/tgsi/tgsi_build.h',
'auxiliary/tgsi/tgsi_util.h',
@@ -86,9 +56,7 @@ sources_libgallium = [
'auxiliary/tgsi/tgsi_parse.h',
'auxiliary/tgsi/tgsi_text.h',
'auxiliary/tgsi/tgsi_strings.h',
- 'auxiliary/tgsi/tgsi_ureg.h',
'auxiliary/tgsi/tgsi_parse.c',
- 'auxiliary/tgsi/tgsi_transform.h',
'auxiliary/tgsi/tgsi_info.h',
'auxiliary/tgsi/tgsi_text.c',
'auxiliary/tgsi/tgsi_strings.c',
@@ -98,15 +66,6 @@ sources_libgallium = [
'auxiliary/tgsi/tgsi_dump.h',
'auxiliary/tgsi/tgsi_util.c',
'auxiliary/tgsi/tgsi_sanity.h',
- 'auxiliary/tgsi/tgsi_transform.c',
- 'auxiliary/os/os_memory_aligned.h',
- 'auxiliary/os/os_thread.h',
- 'auxiliary/os/os_mman.h',
- 'auxiliary/os/os_misc.h',
- 'auxiliary/os/os_memory.h',
- 'auxiliary/os/os_memory_debug.h',
- 'auxiliary/os/os_memory_stdc.h',
- 'auxiliary/os/os_misc.c',
]
inc_gallium = include_directories('include', 'auxiliary', 'auxiliary/util')
@@ -115,7 +74,7 @@ u_format_table_c = custom_target(
'u_format_table.c',
input : ['auxiliary/util/u_format_table.py', 'auxiliary/util/u_format.csv'],
output : 'u_format_table.c',
- command : [prog_python, '@INPUT@'],
+ command : [prog_python, '-B', '@INPUT@'],
depend_files : files('auxiliary/util/u_format_parse.py'),
capture : true,
)
@@ -126,9 +85,11 @@ libgallium = static_library(
include_directories : [
inc_gallium, inc_configuration
],
+ dependencies: mesa_dep,
)
gallium_dep = declare_dependency(
link_with: libgallium,
- include_directories: [inc_gallium, inc_configuration]
+ include_directories: [inc_gallium, inc_configuration],
+ dependencies: mesa_dep,
)
diff --git a/src/gallium/include/c11/threads.h b/src/mesa/compat/c11/threads.h
index 45823df3..3c3f23a8 100644
--- a/src/gallium/include/c11/threads.h
+++ b/src/mesa/compat/c11/threads.h
@@ -41,12 +41,6 @@
typedef void (*tss_dtor_t)(void*);
typedef int (*thrd_start_t)(void*);
-struct xtime {
- time_t sec;
- long nsec;
-};
-typedef struct xtime xtime;
-
/*-------------------- enumeration constants --------------------*/
enum {
diff --git a/src/gallium/include/c11/threads_posix.h b/src/mesa/compat/c11/threads_posix.h
index ce9853b1..45cb6075 100644
--- a/src/gallium/include/c11/threads_posix.h
+++ b/src/mesa/compat/c11/threads_posix.h
@@ -132,19 +132,15 @@ cnd_signal(cnd_t *cond)
// 7.25.3.5
static inline int
-cnd_timedwait(cnd_t *cond, mtx_t *mtx, const xtime *xt)
+cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *abs_time)
{
- struct timespec abs_time;
int rt;
assert(mtx != NULL);
assert(cond != NULL);
- assert(xt != NULL);
+ assert(abs_time != NULL);
- abs_time.tv_sec = xt->sec;
- abs_time.tv_nsec = xt->nsec;
-
- rt = pthread_cond_timedwait(cond, mtx, &abs_time);
+ rt = pthread_cond_timedwait(cond, mtx, abs_time);
if (rt == ETIMEDOUT)
return thrd_busy;
return (rt == 0) ? thrd_success : thrd_error;
@@ -169,6 +165,32 @@ mtx_destroy(mtx_t *mtx)
pthread_mutex_destroy(mtx);
}
+/*
+ * XXX: Workaround when building with -O0 and without pthreads link.
+ *
+ * In such cases constant folding and dead code elimination won't be
+ * available, thus the compiler will always add the pthread_mutexattr*
+ * functions into the binary. As we try to link, we'll fail as the
+ * symbols are unresolved.
+ *
+ * Ideally we'll enable the optimisations locally, yet that does not
+ * seem to work.
+ *
+ * So the alternative workaround is to annotate the symbols as weak.
+ * Thus the linker will be happy and things don't clash when building
+ * with -O1 or greater.
+ */
+#if defined(HAVE_FUNC_ATTRIBUTE_WEAK) && !defined(__CYGWIN__)
+__attribute__((weak))
+int pthread_mutexattr_init(pthread_mutexattr_t *attr);
+
+__attribute__((weak))
+int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type);
+
+__attribute__((weak))
+int pthread_mutexattr_destroy(pthread_mutexattr_t *attr);
+#endif
+
// 7.25.4.2
static inline int
mtx_init(mtx_t *mtx, int type)
@@ -180,9 +202,14 @@ mtx_init(mtx_t *mtx, int type)
&& type != (mtx_timed|mtx_recursive)
&& type != (mtx_try|mtx_recursive))
return thrd_error;
+
+ if ((type & mtx_recursive) == 0) {
+ pthread_mutex_init(mtx, NULL);
+ return thrd_success;
+ }
+
pthread_mutexattr_init(&attr);
- if ((type & mtx_recursive) != 0)
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(mtx, &attr);
pthread_mutexattr_destroy(&attr);
return thrd_success;
@@ -204,24 +231,21 @@ thrd_yield(void);
// 7.25.4.4
static inline int
-mtx_timedlock(mtx_t *mtx, const xtime *xt)
+mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
{
assert(mtx != NULL);
- assert(xt != NULL);
+ assert(ts != NULL);
{
#ifdef EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
- struct timespec ts;
int rt;
- ts.tv_sec = xt->sec;
- ts.tv_nsec = xt->nsec;
- rt = pthread_mutex_timedlock(mtx, &ts);
+ rt = pthread_mutex_timedlock(mtx, ts);
if (rt == 0)
return thrd_success;
return (rt == ETIMEDOUT) ? thrd_busy : thrd_error;
#else
time_t expire = time(NULL);
- expire += xt->sec;
+ expire += ts->tv_sec;
while (mtx_trylock(mtx) != thrd_success) {
time_t now = time(NULL);
if (expire < now)
@@ -311,13 +335,10 @@ thrd_join(thrd_t thr, int *res)
// 7.25.5.7
static inline void
-thrd_sleep(const xtime *xt)
+thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
{
- struct timespec req;
- assert(xt);
- req.tv_sec = xt->sec;
- req.tv_nsec = xt->nsec;
- nanosleep(&req, NULL);
+ assert(time_point != NULL);
+ nanosleep(time_point, remaining);
}
// 7.25.5.8
@@ -361,14 +382,15 @@ tss_set(tss_t key, void *val)
/*-------------------- 7.25.7 Time functions --------------------*/
// 7.25.6.1
+#ifndef HAVE_TIMESPEC_GET
static inline int
-xtime_get(xtime *xt, int base)
+timespec_get(struct timespec *ts, int base)
{
- if (!xt) return 0;
+ if (!ts) return 0;
if (base == TIME_UTC) {
- xt->sec = time(NULL);
- xt->nsec = 0;
+ clock_gettime(CLOCK_REALTIME, ts);
return base;
}
return 0;
}
+#endif
diff --git a/src/gallium/include/c11/threads_win32.h b/src/mesa/compat/c11/threads_win32.h
index d017c31c..02c2a73d 100644
--- a/src/gallium/include/c11/threads_win32.h
+++ b/src/mesa/compat/c11/threads_win32.h
@@ -42,23 +42,14 @@ Configuration macro:
(requires WinVista or later)
Otherwise emulate by mtx_trylock() + *busy loop* for WinXP.
- EMULATED_THREADS_USE_NATIVE_CV
- Use native WindowsAPI condition variable object.
- (requires WinVista or later)
- Otherwise use emulated implementation for WinXP.
-
EMULATED_THREADS_TSS_DTOR_SLOTNUM
Max registerable TSS dtor number.
*/
-// XXX: Retain XP compatability
-#if 0
#if _WIN32_WINNT >= 0x0600
// Prefer native WindowsAPI on newer environment.
#if !defined(__MINGW32__)
-#define EMULATED_THREADS_USE_NATIVE_CALL_ONCE
-#endif
-#define EMULATED_THREADS_USE_NATIVE_CV
+#define EMULATED_THREADS_USE_NATIVE_CALL_ONCE
#endif
#endif
#define EMULATED_THREADS_TSS_DTOR_SLOTNUM 64 // see TLS_MINIMUM_AVAILABLE
@@ -71,11 +62,11 @@ Configuration macro:
#error EMULATED_THREADS_USE_NATIVE_CALL_ONCE requires _WIN32_WINNT>=0x0600
#endif
-#if defined(EMULATED_THREADS_USE_NATIVE_CV) && (_WIN32_WINNT < 0x0600)
-#error EMULATED_THREADS_USE_NATIVE_CV requires _WIN32_WINNT>=0x0600
+/* Visual Studio 2015 and later */
+#ifdef _MSC_VER
+#define HAVE_TIMESPEC_GET
#endif
-
/*---------------------------- macros ----------------------------*/
#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
#define ONCE_FLAG_INIT INIT_ONCE_STATIC_INIT
@@ -88,18 +79,7 @@ Configuration macro:
#define _MTX_INITIALIZER_NP {(PCRITICAL_SECTION_DEBUG)-1, -1, 0, 0, 0, 0}
/*---------------------------- types ----------------------------*/
-typedef struct cnd_t {
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
- CONDITION_VARIABLE condvar;
-#else
- int blocked;
- int gone;
- int to_unblock;
- HANDLE sem_queue;
- HANDLE sem_gate;
- CRITICAL_SECTION monitor;
-#endif
-} cnd_t;
+typedef CONDITION_VARIABLE cnd_t;
typedef HANDLE thrd_t;
@@ -146,11 +126,23 @@ static unsigned __stdcall impl_thrd_routine(void *p)
return (unsigned)code;
}
-static DWORD impl_xtime2msec(const xtime *xt)
+static time_t impl_timespec2msec(const struct timespec *ts)
{
- return (DWORD)((xt->sec * 1000U) + (xt->nsec / 1000000L));
+ return (ts->tv_sec * 1000U) + (ts->tv_nsec / 1000000L);
}
+#ifdef HAVE_TIMESPEC_GET
+static DWORD impl_abs2relmsec(const struct timespec *abs_time)
+{
+ const time_t abs_ms = impl_timespec2msec(abs_time);
+ struct timespec now;
+ timespec_get(&now, TIME_UTC);
+ const time_t now_ms = impl_timespec2msec(&now);
+ const DWORD rel_ms = (abs_ms > now_ms) ? (DWORD)(abs_ms - now_ms) : 0;
+ return rel_ms;
+}
+#endif
+
#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
struct impl_call_once_param { void (*func)(void); };
static BOOL CALLBACK impl_call_once_callback(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context)
@@ -162,103 +154,6 @@ static BOOL CALLBACK impl_call_once_callback(PINIT_ONCE InitOnce, PVOID Paramete
}
#endif // ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
-#ifndef EMULATED_THREADS_USE_NATIVE_CV
-/*
-Note:
- The implementation of condition variable is ported from Boost.Interprocess
- See http://www.boost.org/boost/interprocess/sync/windows/condition.hpp
-*/
-static void impl_cond_do_signal(cnd_t *cond, int broadcast)
-{
- int nsignal = 0;
-
- EnterCriticalSection(&cond->monitor);
- if (cond->to_unblock != 0) {
- if (cond->blocked == 0) {
- LeaveCriticalSection(&cond->monitor);
- return;
- }
- if (broadcast) {
- cond->to_unblock += nsignal = cond->blocked;
- cond->blocked = 0;
- } else {
- nsignal = 1;
- cond->to_unblock++;
- cond->blocked--;
- }
- } else if (cond->blocked > cond->gone) {
- WaitForSingleObject(cond->sem_gate, INFINITE);
- if (cond->gone != 0) {
- cond->blocked -= cond->gone;
- cond->gone = 0;
- }
- if (broadcast) {
- nsignal = cond->to_unblock = cond->blocked;
- cond->blocked = 0;
- } else {
- nsignal = cond->to_unblock = 1;
- cond->blocked--;
- }
- }
- LeaveCriticalSection(&cond->monitor);
-
- if (0 < nsignal)
- ReleaseSemaphore(cond->sem_queue, nsignal, NULL);
-}
-
-static int impl_cond_do_wait(cnd_t *cond, mtx_t *mtx, const xtime *xt)
-{
- int nleft = 0;
- int ngone = 0;
- int timeout = 0;
- DWORD w;
-
- WaitForSingleObject(cond->sem_gate, INFINITE);
- cond->blocked++;
- ReleaseSemaphore(cond->sem_gate, 1, NULL);
-
- mtx_unlock(mtx);
-
- w = WaitForSingleObject(cond->sem_queue, xt ? impl_xtime2msec(xt) : INFINITE);
- timeout = (w == WAIT_TIMEOUT);
-
- EnterCriticalSection(&cond->monitor);
- if ((nleft = cond->to_unblock) != 0) {
- if (timeout) {
- if (cond->blocked != 0) {
- cond->blocked--;
- } else {
- cond->gone++;
- }
- }
- if (--cond->to_unblock == 0) {
- if (cond->blocked != 0) {
- ReleaseSemaphore(cond->sem_gate, 1, NULL);
- nleft = 0;
- }
- else if ((ngone = cond->gone) != 0) {
- cond->gone = 0;
- }
- }
- } else if (++cond->gone == INT_MAX/2) {
- WaitForSingleObject(cond->sem_gate, INFINITE);
- cond->blocked -= cond->gone;
- ReleaseSemaphore(cond->sem_gate, 1, NULL);
- cond->gone = 0;
- }
- LeaveCriticalSection(&cond->monitor);
-
- if (nleft == 1) {
- while (ngone--)
- WaitForSingleObject(cond->sem_queue, INFINITE);
- ReleaseSemaphore(cond->sem_gate, 1, NULL);
- }
-
- mtx_lock(mtx);
- return timeout ? thrd_busy : thrd_success;
-}
-#endif // ifndef EMULATED_THREADS_USE_NATIVE_CV
-
static struct impl_tss_dtor_entry {
tss_t key;
tss_dtor_t dtor;
@@ -322,12 +217,8 @@ call_once(once_flag *flag, void (*func)(void))
static inline int
cnd_broadcast(cnd_t *cond)
{
- if (!cond) return thrd_error;
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
- WakeAllConditionVariable(&cond->condvar);
-#else
- impl_cond_do_signal(cond, 1);
-#endif
+ assert(cond != NULL);
+ WakeAllConditionVariable(cond);
return thrd_success;
}
@@ -335,31 +226,16 @@ cnd_broadcast(cnd_t *cond)
static inline void
cnd_destroy(cnd_t *cond)
{
- assert(cond);
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ assert(cond != NULL);
// do nothing
-#else
- CloseHandle(cond->sem_queue);
- CloseHandle(cond->sem_gate);
- DeleteCriticalSection(&cond->monitor);
-#endif
}
// 7.25.3.3
static inline int
cnd_init(cnd_t *cond)
{
- if (!cond) return thrd_error;
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
- InitializeConditionVariable(&cond->condvar);
-#else
- cond->blocked = 0;
- cond->gone = 0;
- cond->to_unblock = 0;
- cond->sem_queue = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
- cond->sem_gate = CreateSemaphore(NULL, 1, 1, NULL);
- InitializeCriticalSection(&cond->monitor);
-#endif
+ assert(cond != NULL);
+ InitializeConditionVariable(cond);
return thrd_success;
}
@@ -367,26 +243,25 @@ cnd_init(cnd_t *cond)
static inline int
cnd_signal(cnd_t *cond)
{
- if (!cond) return thrd_error;
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
- WakeConditionVariable(&cond->condvar);
-#else
- impl_cond_do_signal(cond, 0);
-#endif
+ assert(cond != NULL);
+ WakeConditionVariable(cond);
return thrd_success;
}
// 7.25.3.5
static inline int
-cnd_timedwait(cnd_t *cond, mtx_t *mtx, const xtime *xt)
-{
- if (!cond || !mtx || !xt) return thrd_error;
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
- if (SleepConditionVariableCS(&cond->condvar, mtx, impl_xtime2msec(xt)))
+cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *abs_time)
+{
+ assert(cond != NULL);
+ assert(mtx != NULL);
+ assert(abs_time != NULL);
+#ifdef HAVE_TIMESPEC_GET
+ const DWORD timeout = impl_abs2relmsec(abs_time);
+ if (SleepConditionVariableCS(cond, mtx, timeout))
return thrd_success;
return (GetLastError() == ERROR_TIMEOUT) ? thrd_busy : thrd_error;
#else
- return impl_cond_do_wait(cond, mtx, xt);
+ return thrd_error;
#endif
}
@@ -394,12 +269,9 @@ cnd_timedwait(cnd_t *cond, mtx_t *mtx, const xtime *xt)
static inline int
cnd_wait(cnd_t *cond, mtx_t *mtx)
{
- if (!cond || !mtx) return thrd_error;
-#ifdef EMULATED_THREADS_USE_NATIVE_CV
- SleepConditionVariableCS(&cond->condvar, mtx, INFINITE);
-#else
- impl_cond_do_wait(cond, mtx, NULL);
-#endif
+ assert(cond != NULL);
+ assert(mtx != NULL);
+ SleepConditionVariableCS(cond, mtx, INFINITE);
return thrd_success;
}
@@ -417,7 +289,7 @@ mtx_destroy(mtx_t *mtx)
static inline int
mtx_init(mtx_t *mtx, int type)
{
- if (!mtx) return thrd_error;
+ assert(mtx != NULL);
if (type != mtx_plain && type != mtx_timed && type != mtx_try
&& type != (mtx_plain|mtx_recursive)
&& type != (mtx_timed|mtx_recursive)
@@ -431,34 +303,35 @@ mtx_init(mtx_t *mtx, int type)
static inline int
mtx_lock(mtx_t *mtx)
{
- if (!mtx) return thrd_error;
+ assert(mtx != NULL);
EnterCriticalSection(mtx);
return thrd_success;
}
// 7.25.4.4
static inline int
-mtx_timedlock(mtx_t *mtx, const xtime *xt)
+mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
{
- time_t expire, now;
- if (!mtx || !xt) return thrd_error;
- expire = time(NULL);
- expire += xt->sec;
+ assert(mtx != NULL);
+ assert(ts != NULL);
+#ifdef HAVE_TIMESPEC_GET
while (mtx_trylock(mtx) != thrd_success) {
- now = time(NULL);
- if (expire < now)
+ if (impl_abs2relmsec(ts) == 0)
return thrd_busy;
// busy loop!
thrd_yield();
}
return thrd_success;
+#else
+ return thrd_error;
+#endif
}
// 7.25.4.5
static inline int
mtx_trylock(mtx_t *mtx)
{
- if (!mtx) return thrd_error;
+ assert(mtx != NULL);
return TryEnterCriticalSection(mtx) ? thrd_success : thrd_busy;
}
@@ -466,7 +339,7 @@ mtx_trylock(mtx_t *mtx)
static inline int
mtx_unlock(mtx_t *mtx)
{
- if (!mtx) return thrd_error;
+ assert(mtx != NULL);
LeaveCriticalSection(mtx);
return thrd_success;
}
@@ -479,7 +352,7 @@ thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
{
struct impl_thrd_param *pack;
uintptr_t handle;
- if (!thr) return thrd_error;
+ assert(thr != NULL);
pack = (struct impl_thrd_param *)malloc(sizeof(struct impl_thrd_param));
if (!pack) return thrd_nomem;
pack->func = func;
@@ -502,9 +375,13 @@ thrd_current(void)
HANDLE hCurrentThread;
BOOL bRet;
- /* GetCurrentThread() returns a pseudo-handle, which is useless. We need
- * to call DuplicateHandle to get a real handle. However the handle value
- * will not match the one returned by thread_create.
+ /* GetCurrentThread() returns a pseudo-handle, which we need
+ * to pass to DuplicateHandle(). Only the resulting handle can be used
+ * from other threads.
+ *
+ * Note that neither handle can be compared to the one by thread_create.
+ * Only the thread IDs - as returned by GetThreadId() and GetCurrentThreadId()
+ * can be compared directly.
*
* Other potential solutions would be:
* - define thrd_t as a thread Ids, but this would mean we'd need to OpenThread for many operations
@@ -575,10 +452,11 @@ thrd_join(thrd_t thr, int *res)
// 7.25.5.7
static inline void
-thrd_sleep(const xtime *xt)
+thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
{
- assert(xt);
- Sleep(impl_xtime2msec(xt));
+ assert(time_point);
+ assert(!remaining); /* not implemented */
+ Sleep((DWORD)impl_timespec2msec(time_point));
}
// 7.25.5.8
@@ -594,7 +472,7 @@ thrd_yield(void)
static inline int
tss_create(tss_t *key, tss_dtor_t dtor)
{
- if (!key) return thrd_error;
+ assert(key != NULL);
*key = TlsAlloc();
if (dtor) {
if (impl_tss_dtor_register(*key, dtor)) {
@@ -629,14 +507,16 @@ tss_set(tss_t key, void *val)
/*-------------------- 7.25.7 Time functions --------------------*/
// 7.25.6.1
+#ifndef HAVE_TIMESPEC_GET
static inline int
-xtime_get(xtime *xt, int base)
+timespec_get(struct timespec *ts, int base)
{
- if (!xt) return 0;
+ assert(ts != NULL);
if (base == TIME_UTC) {
- xt->sec = time(NULL);
- xt->nsec = 0;
+ ts->tv_sec = time(NULL);
+ ts->tv_nsec = 0;
return base;
}
return 0;
}
+#endif
diff --git a/src/mesa/compat/c11_compat.h b/src/mesa/compat/c11_compat.h
new file mode 100644
index 00000000..d35740f4
--- /dev/null
+++ b/src/mesa/compat/c11_compat.h
@@ -0,0 +1,27 @@
+/* Copyright 2019 Intel Corporation */
+/* SPDX-License-Identifier: MIT */
+
+#include "no_extern_c.h"
+
+#ifndef _C11_COMPAT_H_
+#define _C11_COMPAT_H_
+
+#if defined(__cplusplus)
+ /* This is C++ code, not C */
+#elif (__STDC_VERSION__ >= 201112L)
+ /* Already C11 */
+#else
+
+
+/*
+ * C11 static_assert() macro
+ * assert.h only defines that name for C11 and above
+ */
+#ifndef static_assert
+#define static_assert _Static_assert
+#endif
+
+
+#endif /* !C++ && !C11 */
+
+#endif /* _C11_COMPAT_H_ */
diff --git a/src/gallium/include/c99_compat.h b/src/mesa/compat/c99_compat.h
index 4be5b7e1..ab1ec53a 100644
--- a/src/gallium/include/c99_compat.h
+++ b/src/mesa/compat/c99_compat.h
@@ -36,17 +36,17 @@
*/
#if defined(_MSC_VER)
-# if _MSC_VER < 1500
-# error "Microsoft Visual Studio 2008 or higher required"
+# if _MSC_VER < 1900
+# error "Microsoft Visual Studio 2015 or higher required"
# endif
/*
- * Visual Studio 2012 will complain if we define the `inline` keyword, but
+ * Visual Studio will complain if we define the `inline` keyword, but
* actually it only supports the keyword on C++.
*
* To avoid this the _ALLOW_KEYWORD_MACROS must be set.
*/
-# if (_MSC_VER >= 1700) && !defined(_ALLOW_KEYWORD_MACROS)
+# if !defined(_ALLOW_KEYWORD_MACROS)
# define _ALLOW_KEYWORD_MACROS
# endif
@@ -96,7 +96,7 @@
* - http://cellperformance.beyond3d.com/articles/2006/05/demystifying-the-restrict-keyword.html
*/
#ifndef restrict
-# if (__STDC_VERSION__ >= 199901L)
+# if (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus)
/* C99 */
# elif defined(__GNUC__)
# define restrict __restrict__
diff --git a/src/mesa/compat/c99_math.h b/src/mesa/compat/c99_math.h
new file mode 100644
index 00000000..e906c26a
--- /dev/null
+++ b/src/mesa/compat/c99_math.h
@@ -0,0 +1,211 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2015 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Wrapper for math.h which makes sure we have definitions of all the c99
+ * functions.
+ */
+
+
+#ifndef _C99_MATH_H_
+#define _C99_MATH_H_
+
+#include <math.h>
+#include "c99_compat.h"
+
+
+/* This is to ensure that we get M_PI, etc. definitions */
+#if defined(_MSC_VER) && !defined(_USE_MATH_DEFINES)
+#error _USE_MATH_DEFINES define required when building with MSVC
+#endif
+
+
+#if !defined(_MSC_VER) && \
+ __STDC_VERSION__ < 199901L && \
+ (!defined(_XOPEN_SOURCE) || _XOPEN_SOURCE < 600) && \
+ !defined(__cplusplus)
+
+static inline long int
+lrint(double d)
+{
+ long int rounded = (long int)(d + 0.5);
+
+ if (d - floor(d) == 0.5) {
+ if (rounded % 2 != 0)
+ rounded += (d > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline long int
+lrintf(float f)
+{
+ long int rounded = (long int)(f + 0.5f);
+
+ if (f - floorf(f) == 0.5f) {
+ if (rounded % 2 != 0)
+ rounded += (f > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline long long int
+llrint(double d)
+{
+ long long int rounded = (long long int)(d + 0.5);
+
+ if (d - floor(d) == 0.5) {
+ if (rounded % 2 != 0)
+ rounded += (d > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline long long int
+llrintf(float f)
+{
+ long long int rounded = (long long int)(f + 0.5f);
+
+ if (f - floorf(f) == 0.5f) {
+ if (rounded % 2 != 0)
+ rounded += (f > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline float
+exp2f(float f)
+{
+ return powf(2.0f, f);
+}
+
+static inline double
+exp2(double d)
+{
+ return pow(2.0, d);
+}
+
+#endif /* C99 */
+
+
+/*
+ * signbit() is a macro on Linux. Not available on Windows.
+ */
+#ifndef signbit
+#define signbit(x) ((x) < 0.0f)
+#endif
+
+
+#ifndef M_PI
+#define M_PI (3.14159265358979323846)
+#endif
+
+#ifndef M_E
+#define M_E (2.7182818284590452354)
+#endif
+
+#ifndef M_LOG2E
+#define M_LOG2E (1.4426950408889634074)
+#endif
+
+#ifndef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+#endif
+
+
+#if defined(fpclassify)
+/* ISO C99 says that fpclassify is a macro. Assume that any implementation
+ * of fpclassify, whether it's in a C99 compiler or not, will be a macro.
+ */
+#elif defined(__cplusplus)
+/* For C++, fpclassify() should be defined in <cmath> */
+#elif defined(_MSC_VER)
+/* Not required on VS2013 and above. Oddly, the fpclassify() function
+ * doesn't exist in such a form on MSVC. This is an implementation using
+ * slightly different lower-level Windows functions.
+ */
+#include <float.h>
+
+static inline enum {FP_NAN, FP_INFINITE, FP_ZERO, FP_SUBNORMAL, FP_NORMAL}
+fpclassify(double x)
+{
+ switch(_fpclass(x)) {
+ case _FPCLASS_SNAN: /* signaling NaN */
+ case _FPCLASS_QNAN: /* quiet NaN */
+ return FP_NAN;
+ case _FPCLASS_NINF: /* negative infinity */
+ case _FPCLASS_PINF: /* positive infinity */
+ return FP_INFINITE;
+ case _FPCLASS_NN: /* negative normal */
+ case _FPCLASS_PN: /* positive normal */
+ return FP_NORMAL;
+ case _FPCLASS_ND: /* negative denormalized */
+ case _FPCLASS_PD: /* positive denormalized */
+ return FP_SUBNORMAL;
+ case _FPCLASS_NZ: /* negative zero */
+ case _FPCLASS_PZ: /* positive zero */
+ return FP_ZERO;
+ default:
+ /* Should never get here; but if we do, this will guarantee
+ * that the pattern is not treated like a number.
+ */
+ return FP_NAN;
+ }
+}
+#else
+#error "Need to include or define an fpclassify function"
+#endif
+
+
+/* Since C++11, the following functions are part of the std namespace. Their C
+ * counteparts should still exist in the global namespace, however cmath
+ * undefines those functions, which in glibc 2.23, are defined as macros rather
+ * than functions as in glibc 2.22.
+ */
+#if __cplusplus >= 201103L && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 23))
+#include <cmath>
+
+using std::fpclassify;
+using std::isfinite;
+using std::isinf;
+using std::isnan;
+using std::isnormal;
+using std::signbit;
+using std::isgreater;
+using std::isgreaterequal;
+using std::isless;
+using std::islessequal;
+using std::islessgreater;
+using std::isunordered;
+#endif
+
+
+#endif /* #define _C99_MATH_H_ */
diff --git a/src/gallium/include/no_extern_c.h b/src/mesa/compat/no_extern_c.h
index f79602c0..f2f14aaf 100644
--- a/src/gallium/include/no_extern_c.h
+++ b/src/mesa/compat/no_extern_c.h
@@ -27,7 +27,7 @@
/*
* Including system's headers inside `extern "C" { ... }` is not safe, as system
* headers may have C++ code in them, and C++ code inside extern "C"
- * leads to syntatically incorrect code.
+ * leads to syntactically incorrect code.
*
* This is because putting code inside extern "C" won't make __cplusplus define
* go away, that is, the system header being included thinks is free to use C++
diff --git a/src/mesa/meson.build b/src/mesa/meson.build
new file mode 100644
index 00000000..20501cc3
--- /dev/null
+++ b/src/mesa/meson.build
@@ -0,0 +1,34 @@
+# Copyright 2021 Google LLC
+# SPDX-License-Identifier: MIT
+
+inc_mesa = include_directories('.', 'compat', 'pipe', 'util')
+
+files_mesa = files(
+ 'util/anon_file.c',
+ 'util/bitscan.c',
+ 'util/hash_table.c',
+ 'util/os_file.c',
+ 'util/os_misc.c',
+ 'util/ralloc.c',
+ 'util/u_cpu_detect.c',
+ 'util/u_debug.c',
+ 'util/u_math.c',
+)
+
+deps_mesa = [
+ m_dep,
+ thread_dep,
+]
+
+libmesa = static_library(
+ 'mesa',
+ files_mesa,
+ include_directories: inc_mesa,
+ dependencies: deps_mesa,
+)
+
+mesa_dep = declare_dependency(
+ link_with: libmesa,
+ include_directories: inc_mesa,
+ dependencies: deps_mesa,
+)
diff --git a/src/gallium/include/pipe/p_compiler.h b/src/mesa/pipe/p_compiler.h
index 8156dd38..0ba459b0 100644
--- a/src/gallium/include/pipe/p_compiler.h
+++ b/src/mesa/pipe/p_compiler.h
@@ -29,14 +29,18 @@
#define P_COMPILER_H
+#include "c99_compat.h" /* inline, __func__, etc. */
#include "p_config.h"
+#include "util/macros.h"
+
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <stdarg.h>
#include <limits.h>
+/* (virglrenderer) To get uint typedef with musl */
#include <sys/types.h>
@@ -46,6 +50,8 @@
#if defined(_MSC_VER)
+#include <intrin.h>
+
/* Avoid 'expression is always true' warning */
#pragma warning(disable: 4296)
@@ -56,9 +62,6 @@
* Alternative stdint.h and stdbool.h headers are supplied in include/c99 for
* systems that lack it.
*/
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS 1
-#endif
#include <stdint.h>
#include <stdbool.h>
@@ -92,17 +95,6 @@ typedef unsigned char boolean;
#endif
#endif
-/* Function visibility */
-#ifndef PUBLIC
-# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
-# define PUBLIC __attribute__((visibility("default")))
-# elif defined(_MSC_VER)
-# define PUBLIC __declspec(dllexport)
-# else
-# define PUBLIC
-# endif
-#endif
-
/* XXX: Use standard `__func__` instead */
#ifndef __FUNCTION__
@@ -130,7 +122,7 @@ typedef unsigned char boolean;
/* Macros for data alignment. */
-#if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) || defined(__SUNPRO_CC)
+#if defined(__GNUC__)
/* See http://gcc.gnu.org/onlinedocs/gcc-4.4.2/gcc/Type-Attributes.html */
#define PIPE_ALIGN_TYPE(_alignment, _type) _type __attribute__((aligned(_alignment)))
@@ -138,7 +130,7 @@ typedef unsigned char boolean;
/* See http://gcc.gnu.org/onlinedocs/gcc-4.4.2/gcc/Variable-Attributes.html */
#define PIPE_ALIGN_VAR(_alignment) __attribute__((aligned(_alignment)))
-#if (__GNUC__ > 4 || (__GNUC__ == 4 &&__GNUC_MINOR__>1)) && !defined(PIPE_ARCH_X86_64)
+#if defined(__GNUC__) && defined(PIPE_ARCH_X86)
#define PIPE_ALIGN_STACK __attribute__((force_align_arg_pointer))
#else
#define PIPE_ALIGN_STACK
@@ -172,14 +164,8 @@ typedef unsigned char boolean;
#elif defined(_MSC_VER)
-void _ReadWriteBarrier(void);
-#pragma intrinsic(_ReadWriteBarrier)
#define PIPE_READ_WRITE_BARRIER() _ReadWriteBarrier()
-#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-
-#define PIPE_READ_WRITE_BARRIER() __machine_rw_barrier()
-
#else
#warning "Unsupported compiler"
@@ -187,61 +173,6 @@ void _ReadWriteBarrier(void);
#endif
-
-/* You should use these macros to mark if blocks where the if condition
- * is either likely to be true, or unlikely to be true.
- *
- * This will inform human readers of this fact, and will also inform
- * the compiler, who will in turn inform the CPU.
- *
- * CPUs often start executing code inside the if or the else blocks
- * without knowing whether the condition is true or not, and will have
- * to throw the work away if they find out later they executed the
- * wrong part of the if.
- *
- * If these macros are used, the CPU is more likely to correctly predict
- * the right path, and will avoid speculatively executing the wrong branch,
- * thus not throwing away work, resulting in better performance.
- *
- * In light of this, it is also a good idea to mark as "likely" a path
- * which is not necessarily always more likely, but that will benefit much
- * more from performance improvements since it is already much faster than
- * the other path, or viceversa with "unlikely".
- *
- * Example usage:
- * if(unlikely(do_we_need_a_software_fallback()))
- * do_software_fallback();
- * else
- * render_with_gpu();
- *
- * The macros follow the Linux kernel convention, and more examples can
- * be found there.
- *
- * Note that profile guided optimization can offer better results, but
- * needs an appropriate coverage suite and does not inform human readers.
- */
-#ifndef likely
-# if defined(__GNUC__)
-# define likely(x) __builtin_expect(!!(x), 1)
-# define unlikely(x) __builtin_expect(!!(x), 0)
-# else
-# define likely(x) (x)
-# define unlikely(x) (x)
-# endif
-#endif
-
-
-/**
- * Static (compile-time) assertion.
- * Basically, use COND to dimension an array. If COND is false/zero the
- * array size will be -1 and we'll get a compilation error.
- */
-#define STATIC_ASSERT(COND) \
- do { \
- (void) sizeof(char [1 - 2*!(COND)]); \
- } while (0)
-
-
#if defined(__cplusplus)
}
#endif
diff --git a/src/gallium/include/pipe/p_config.h b/src/mesa/pipe/p_config.h
index c8bd98dd..dd2febee 100644
--- a/src/gallium/include/pipe/p_config.h
+++ b/src/mesa/pipe/p_config.h
@@ -48,14 +48,6 @@
#include <limits.h>
/*
- * This has PIPE_ARCH_<ENDIANESS>_ENDIAN defines acquired
- * via meson and in the future might have other defines
- * if they are found to be easier done on meson than in
- * preprocessor macros
- */
-#include "config.h"
-
-/*
* Compiler
*/
@@ -85,13 +77,11 @@
#define PIPE_CC_ICL
#endif
-#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-#define PIPE_CC_SUNPRO
-#endif
-
/*
* Processor architecture
+ *
+ * (virglrenderer) This is detected by meson.
*/
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
@@ -100,96 +90,78 @@
#else
#define PIPE_ARCH_SSE
#endif
-#if defined(PIPE_CC_GCC) && !defined(__SSSE3__)
-/* #warning SSE3 support requires -msse3 compiler options */
+#if defined(PIPE_CC_GCC) && (__GNUC__ * 100 + __GNUC_MINOR__) < 409 && !defined(__SSSE3__)
+/* #warning SSE3 support requires -msse3 compiler options before GCC 4.9 */
#else
#define PIPE_ARCH_SSSE3
#endif
#endif
/*
+ * Endian detection.
+ */
+
+#include "util/u_endian.h"
+
+/*
* Auto-detect the operating system family.
- *
- * See subsystem below for a more fine-grained distinction.
*/
+#include "util/detect_os.h"
-#if defined(__linux__)
+#if DETECT_OS_LINUX
#define PIPE_OS_LINUX
+#endif
+
+#if DETECT_OS_UNIX
#define PIPE_OS_UNIX
#endif
-/*
- * Android defines __linux__ so PIPE_OS_LINUX and PIPE_OS_UNIX will also be
- * defined.
- */
-#if defined(ANDROID)
+#if DETECT_OS_ANDROID
#define PIPE_OS_ANDROID
#endif
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+#if DETECT_OS_FREEBSD
#define PIPE_OS_FREEBSD
+#endif
+
+#if DETECT_OS_BSD
#define PIPE_OS_BSD
-#define PIPE_OS_UNIX
#endif
-#if defined(__OpenBSD__)
+#if DETECT_OS_OPENBSD
#define PIPE_OS_OPENBSD
-#define PIPE_OS_BSD
-#define PIPE_OS_UNIX
#endif
-#if defined(__NetBSD__)
+#if DETECT_OS_NETBSD
#define PIPE_OS_NETBSD
-#define PIPE_OS_BSD
-#define PIPE_OS_UNIX
#endif
-#if defined(__GNU__)
+#if DETECT_OS_DRAGONFLY
+#define PIPE_OS_DRAGONFLY
+#endif
+
+#if DETECT_OS_HURD
#define PIPE_OS_HURD
-#define PIPE_OS_UNIX
#endif
-#if defined(__sun)
+#if DETECT_OS_SOLARIS
#define PIPE_OS_SOLARIS
-#define PIPE_OS_UNIX
#endif
-#if defined(__APPLE__)
+#if DETECT_OS_APPLE
#define PIPE_OS_APPLE
-#define PIPE_OS_UNIX
#endif
-#if defined(_WIN32) || defined(WIN32)
+#if DETECT_OS_WINDOWS
#define PIPE_OS_WINDOWS
#endif
-#if defined(__HAIKU__)
+#if DETECT_OS_HAIKU
#define PIPE_OS_HAIKU
-#define PIPE_OS_UNIX
#endif
-#if defined(__CYGWIN__)
+#if DETECT_OS_CYGWIN
#define PIPE_OS_CYGWIN
-#define PIPE_OS_UNIX
#endif
-/*
- * Try to auto-detect the subsystem.
- *
- * NOTE: There is no way to auto-detect most of these.
- */
-
-#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
-#define PIPE_SUBSYSTEM_DRI
-#endif /* PIPE_OS_LINUX || PIPE_OS_BSD || PIPE_OS_SOLARIS */
-
-#if defined(PIPE_OS_WINDOWS)
-#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
-/* Windows User-space Library */
-#else
-#define PIPE_SUBSYSTEM_WINDOWS_USER
-#endif
-#endif /* PIPE_OS_WINDOWS */
-
-
#endif /* P_CONFIG_H_ */
diff --git a/src/mesa/util/anon_file.c b/src/mesa/util/anon_file.c
new file mode 100644
index 00000000..bb4848b5
--- /dev/null
+++ b/src/mesa/util/anon_file.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright © 2012 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Based on weston shared/os-compatibility.c
+ */
+
+#ifndef _WIN32
+#include "anon_file.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#if defined(HAVE_MEMFD_CREATE) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <sys/mman.h>
+#elif defined(__ANDROID__)
+#include <sys/syscall.h>
+#include <linux/memfd.h>
+#else
+#include <stdio.h>
+#endif
+
+#if !(defined(__FreeBSD__) || defined(HAVE_MEMFD_CREATE) || defined(HAVE_MKOSTEMP) || defined(__ANDROID__))
+static int
+set_cloexec_or_close(int fd)
+{
+ long flags;
+
+ if (fd == -1)
+ return -1;
+
+ flags = fcntl(fd, F_GETFD);
+ if (flags == -1)
+ goto err;
+
+ if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == -1)
+ goto err;
+
+ return fd;
+
+err:
+ close(fd);
+ return -1;
+}
+#endif
+
+#if !(defined(__FreeBSD__) || defined(HAVE_MEMFD_CREATE) || defined(__ANDROID__))
+static int
+create_tmpfile_cloexec(char *tmpname)
+{
+ int fd;
+
+#ifdef HAVE_MKOSTEMP
+ fd = mkostemp(tmpname, O_CLOEXEC);
+#else
+ fd = mkstemp(tmpname);
+#endif
+
+ if (fd < 0) {
+ return fd;
+ }
+
+#ifndef HAVE_MKOSTEMP
+ fd = set_cloexec_or_close(fd);
+#endif
+
+ unlink(tmpname);
+ return fd;
+}
+#endif
+
+/*
+ * Create a new, unique, anonymous file of the given size, and
+ * return the file descriptor for it. The file descriptor is set
+ * CLOEXEC. The file is immediately suitable for mmap()'ing
+ * the given size at offset zero.
+ *
+ * An optional name for debugging can be provided as the second argument.
+ *
+ * The file should not have a permanent backing store like a disk,
+ * but may have if XDG_RUNTIME_DIR is not properly implemented in OS.
+ *
+ * If memfd or SHM_ANON is supported, the filesystem is not touched at all.
+ * Otherwise, the file name is deleted from the file system.
+ *
+ * The file is suitable for buffer sharing between processes by
+ * transmitting the file descriptor over Unix sockets using the
+ * SCM_RIGHTS methods.
+ */
+int
+os_create_anonymous_file(off_t size, const char *debug_name)
+{
+ int fd, ret;
+#if defined(HAVE_MEMFD_CREATE)
+ if (!debug_name)
+ debug_name = "mesa-shared";
+ fd = memfd_create(debug_name, MFD_CLOEXEC | MFD_ALLOW_SEALING);
+#elif defined(__ANDROID__)
+ if (!debug_name)
+ debug_name = "mesa-shared";
+ fd = syscall(SYS_memfd_create, debug_name, MFD_CLOEXEC | MFD_ALLOW_SEALING);
+#elif defined(__FreeBSD__)
+ fd = shm_open(SHM_ANON, O_CREAT | O_RDWR | O_CLOEXEC, 0600);
+#elif defined(__OpenBSD__)
+ char template[] = "/tmp/mesa-XXXXXXXXXX";
+ fd = shm_mkstemp(template);
+ if (fd != -1)
+ shm_unlink(template);
+#else
+ const char *path;
+ char *name;
+
+ path = getenv("XDG_RUNTIME_DIR");
+ if (!path) {
+ errno = ENOENT;
+ return -1;
+ }
+
+ if (debug_name)
+ asprintf(&name, "%s/mesa-shared-%s-XXXXXX", path, debug_name);
+ else
+ asprintf(&name, "%s/mesa-shared-XXXXXX", path);
+ if (!name)
+ return -1;
+
+ fd = create_tmpfile_cloexec(name);
+
+ free(name);
+#endif
+
+ if (fd < 0)
+ return -1;
+
+ ret = ftruncate(fd, size);
+ if (ret < 0) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+#endif
diff --git a/src/mesa/util/anon_file.h b/src/mesa/util/anon_file.h
new file mode 100644
index 00000000..790537b7
--- /dev/null
+++ b/src/mesa/util/anon_file.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2012 Collabora, Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _ANON_FILE_H_
+#define _ANON_FILE_H_
+
+#include <sys/types.h>
+
+int os_create_anonymous_file(off_t size, const char *debug_name);
+
+#endif
diff --git a/src/gallium/auxiliary/os/os_misc.c b/src/mesa/util/bitscan.c
index 447e7208..88d7f94e 100644
--- a/src/gallium/auxiliary/os/os_misc.c
+++ b/src/mesa/util/bitscan.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2008-2010 Vmware, Inc.
+ * Copyright 2008 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,66 +26,55 @@
**************************************************************************/
-#include "os_misc.h"
-
-#include <stdarg.h>
-
-
-#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
-
-#ifndef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
-#endif
-#include <windows.h>
-#include <stdio.h>
+#include "bitscan.h"
+#ifdef HAVE___BUILTIN_FFS
+#elif defined(_MSC_VER) && (_M_IX86 || _M_ARM || _M_AMD64 || _M_IA64)
#else
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#endif
-
-
-void
-os_log_message(const char *message)
+int
+ffs(int i)
{
- /* If the GALLIUM_LOG_FILE environment variable is set to a valid filename,
- * write all messages to that file.
- */
- static FILE *fout = NULL;
-
- if (!fout) {
- /* one-time init */
- const char *filename = os_get_option("GALLIUM_LOG_FILE");
- if (filename)
- fout = fopen(filename, "w");
- if (!fout)
- fout = stderr;
+ int bit = 0;
+ if (!i)
+ return bit;
+ if (!(i & 0xffff)) {
+ bit += 16;
+ i >>= 16;
}
-
-#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
- OutputDebugStringA(message);
- if(GetConsoleWindow() && !IsDebuggerPresent()) {
- fflush(stdout);
- fputs(message, fout);
- fflush(fout);
+ if (!(i & 0xff)) {
+ bit += 8;
+ i >>= 8;
}
- else if (fout != stderr) {
- fputs(message, fout);
- fflush(fout);
+ if (!(i & 0xf)) {
+ bit += 4;
+ i >>= 4;
}
-#else /* !PIPE_SUBSYSTEM_WINDOWS */
- fflush(stdout);
- fputs(message, fout);
- fflush(fout);
-#endif
+ if (!(i & 0x3)) {
+ bit += 2;
+ i >>= 2;
+ }
+ if (!(i & 0x1))
+ bit += 1;
+ return bit + 1;
}
+#endif
-
-const char *
-os_get_option(const char *name)
+#ifdef HAVE___BUILTIN_FFSLL
+#elif defined(_MSC_VER) && (_M_AMD64 || _M_ARM64 || _M_IA64)
+#else
+int
+ffsll(long long int val)
{
- return getenv(name);
-}
+ int bit;
+
+ bit = ffs((unsigned) (val & 0xffffffff));
+ if (bit != 0)
+ return bit;
+
+ bit = ffs((unsigned) (val >> 32));
+ if (bit != 0)
+ return 32 + bit;
+ return 0;
+}
+#endif
diff --git a/src/mesa/util/bitscan.h b/src/mesa/util/bitscan.h
new file mode 100644
index 00000000..105b7ba3
--- /dev/null
+++ b/src/mesa/util/bitscan.h
@@ -0,0 +1,356 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef BITSCAN_H
+#define BITSCAN_H
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif
+
+#if defined(__POPCNT__)
+#include <popcntintrin.h>
+#endif
+
+#include "c99_compat.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Find first bit set in word. Least significant bit is 1.
+ * Return 0 if no bits set.
+ */
+#ifdef HAVE___BUILTIN_FFS
+#define ffs __builtin_ffs
+#elif defined(_MSC_VER) && (_M_IX86 || _M_ARM || _M_AMD64 || _M_IA64)
+static inline
+int ffs(int i)
+{
+ unsigned long index;
+ if (_BitScanForward(&index, i))
+ return index + 1;
+ else
+ return 0;
+}
+#else
+extern
+int ffs(int i);
+#endif
+
+#ifdef HAVE___BUILTIN_FFSLL
+#define ffsll __builtin_ffsll
+#elif defined(_MSC_VER) && (_M_AMD64 || _M_ARM64 || _M_IA64)
+static inline int
+ffsll(long long int i)
+{
+ unsigned long index;
+ if (_BitScanForward64(&index, i))
+ return index + 1;
+ else
+ return 0;
+}
+#else
+extern int
+ffsll(long long int val);
+#endif
+
+
+/* Destructively loop over all of the bits in a mask as in:
+ *
+ * while (mymask) {
+ * int i = u_bit_scan(&mymask);
+ * ... process element i
+ * }
+ *
+ */
+static inline int
+u_bit_scan(unsigned *mask)
+{
+ const int i = ffs(*mask) - 1;
+ *mask ^= (1u << i);
+ return i;
+}
+
+#define u_foreach_bit(b, dword) \
+ for (uint32_t __dword = (dword), b; \
+ ((b) = ffs(__dword) - 1, __dword); \
+ __dword &= ~(1 << (b)))
+
+static inline int
+u_bit_scan64(uint64_t *mask)
+{
+ const int i = ffsll(*mask) - 1;
+ *mask ^= (((uint64_t)1) << i);
+ return i;
+}
+
+#define u_foreach_bit64(b, dword) \
+ for (uint64_t __dword = (dword), b; \
+ ((b) = ffsll(__dword) - 1, __dword); \
+ __dword &= ~(1ull << (b)))
+
+/* Determine if an unsigned value is a power of two.
+ *
+ * \note
+ * Zero is treated as a power of two.
+ */
+static inline bool
+util_is_power_of_two_or_zero(unsigned v)
+{
+ return (v & (v - 1)) == 0;
+}
+
+/* Determine if an uint64_t value is a power of two.
+ *
+ * \note
+ * Zero is treated as a power of two.
+ */
+static inline bool
+util_is_power_of_two_or_zero64(uint64_t v)
+{
+ return (v & (v - 1)) == 0;
+}
+
+/* Determine if an unsigned value is a power of two.
+ *
+ * \note
+ * Zero is \b not treated as a power of two.
+ */
+static inline bool
+util_is_power_of_two_nonzero(unsigned v)
+{
+ /* __POPCNT__ is different from HAVE___BUILTIN_POPCOUNT. The latter
+ * indicates the existence of the __builtin_popcount function. The former
+ * indicates that _mm_popcnt_u32 exists and is a native instruction.
+ *
+ * The other alternative is to use SSE 4.2 compile-time flags. This has
+ * two drawbacks. First, there is currently no build infrastructure for
+ * SSE 4.2 (only 4.1), so that would have to be added. Second, some AMD
+ * CPUs support POPCNT but not SSE 4.2 (e.g., Barcelona).
+ */
+#ifdef __POPCNT__
+ return _mm_popcnt_u32(v) == 1;
+#else
+ return v != 0 && (v & (v - 1)) == 0;
+#endif
+}
+
+/* For looping over a bitmask when you want to loop over consecutive bits
+ * manually, for example:
+ *
+ * while (mask) {
+ * int start, count, i;
+ *
+ * u_bit_scan_consecutive_range(&mask, &start, &count);
+ *
+ * for (i = 0; i < count; i++)
+ * ... process element (start+i)
+ * }
+ */
+static inline void
+u_bit_scan_consecutive_range(unsigned *mask, int *start, int *count)
+{
+ if (*mask == 0xffffffff) {
+ *start = 0;
+ *count = 32;
+ *mask = 0;
+ return;
+ }
+ *start = ffs(*mask) - 1;
+ *count = ffs(~(*mask >> *start)) - 1;
+ *mask &= ~(((1u << *count) - 1) << *start);
+}
+
+static inline void
+u_bit_scan_consecutive_range64(uint64_t *mask, int *start, int *count)
+{
+ if (*mask == ~0ull) {
+ *start = 0;
+ *count = 64;
+ *mask = 0;
+ return;
+ }
+ *start = ffsll(*mask) - 1;
+ *count = ffsll(~(*mask >> *start)) - 1;
+ *mask &= ~(((((uint64_t)1) << *count) - 1) << *start);
+}
+
+
+/**
+ * Find last bit set in a word. The least significant bit is 1.
+ * Return 0 if no bits are set.
+ * Essentially ffs() in the reverse direction.
+ */
+static inline unsigned
+util_last_bit(unsigned u)
+{
+#if defined(HAVE___BUILTIN_CLZ)
+ return u == 0 ? 0 : 32 - __builtin_clz(u);
+#elif defined(_MSC_VER) && (_M_IX86 || _M_ARM || _M_AMD64 || _M_IA64)
+ unsigned long index;
+ if (_BitScanReverse(&index, u))
+ return index + 1;
+ else
+ return 0;
+#else
+ unsigned r = 0;
+ while (u) {
+ r++;
+ u >>= 1;
+ }
+ return r;
+#endif
+}
+
+/**
+ * Find last bit set in a word. The least significant bit is 1.
+ * Return 0 if no bits are set.
+ * Essentially ffsll() in the reverse direction.
+ */
+static inline unsigned
+util_last_bit64(uint64_t u)
+{
+#if defined(HAVE___BUILTIN_CLZLL)
+ return u == 0 ? 0 : 64 - __builtin_clzll(u);
+#elif defined(_MSC_VER) && (_M_AMD64 || _M_ARM64 || _M_IA64)
+ unsigned long index;
+ if (_BitScanReverse64(&index, u))
+ return index + 1;
+ else
+ return 0;
+#else
+ unsigned r = 0;
+ while (u) {
+ r++;
+ u >>= 1;
+ }
+ return r;
+#endif
+}
+
+/**
+ * Find last bit in a word that does not match the sign bit. The least
+ * significant bit is 1.
+ * Return 0 if no bits are set.
+ */
+static inline unsigned
+util_last_bit_signed(int i)
+{
+ if (i >= 0)
+ return util_last_bit(i);
+ else
+ return util_last_bit(~(unsigned)i);
+}
+
+/* Returns a bitfield in which the first count bits starting at start are
+ * set.
+ */
+static inline unsigned
+u_bit_consecutive(unsigned start, unsigned count)
+{
+ assert(start + count <= 32);
+ if (count == 32)
+ return ~0;
+ return ((1u << count) - 1) << start;
+}
+
+static inline uint64_t
+u_bit_consecutive64(unsigned start, unsigned count)
+{
+ assert(start + count <= 64);
+ if (count == 64)
+ return ~(uint64_t)0;
+ return (((uint64_t)1 << count) - 1) << start;
+}
+
+/**
+ * Return number of bits set in n.
+ */
+static inline unsigned
+util_bitcount(unsigned n)
+{
+#if defined(HAVE___BUILTIN_POPCOUNT)
+ return __builtin_popcount(n);
+#else
+ /* K&R classic bitcount.
+ *
+ * For each iteration, clear the LSB from the bitfield.
+ * Requires only one iteration per set bit, instead of
+ * one iteration per bit less than highest set bit.
+ */
+ unsigned bits;
+ for (bits = 0; n; bits++) {
+ n &= n - 1;
+ }
+ return bits;
+#endif
+}
+
+/**
+ * Return the number of bits set in n using the native popcnt instruction.
+ * The caller is responsible for ensuring that popcnt is supported by the CPU.
+ *
+ * gcc doesn't use it if -mpopcnt or -march= that has popcnt is missing.
+ *
+ */
+static inline unsigned
+util_popcnt_inline_asm(unsigned n)
+{
+#if defined(USE_X86_64_ASM) || defined(USE_X86_ASM)
+ uint32_t out;
+ __asm volatile("popcnt %1, %0" : "=r"(out) : "r"(n));
+ return out;
+#else
+ /* We should never get here by accident, but I'm sure it'll happen. */
+ return util_bitcount(n);
+#endif
+}
+
+static inline unsigned
+util_bitcount64(uint64_t n)
+{
+#ifdef HAVE___BUILTIN_POPCOUNTLL
+ return __builtin_popcountll(n);
+#else
+ return util_bitcount(n) + util_bitcount(n >> 32);
+#endif
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* BITSCAN_H */
diff --git a/src/mesa/util/compiler.h b/src/mesa/util/compiler.h
new file mode 100644
index 00000000..da602cfa
--- /dev/null
+++ b/src/mesa/util/compiler.h
@@ -0,0 +1,89 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file compiler.h
+ * Compiler-related stuff.
+ */
+
+
+#ifndef COMPILER_H
+#define COMPILER_H
+
+
+#include <assert.h>
+
+#include "util/macros.h"
+
+#include "c99_compat.h" /* inline, __func__, etc. */
+
+
+/**
+ * Either define MESA_BIG_ENDIAN or MESA_LITTLE_ENDIAN, and CPU_TO_LE32.
+ * Do not use these unless absolutely necessary!
+ * Try to use a runtime test instead.
+ * For now, only used by some DRI hardware drivers for color/texel packing.
+ */
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
+#if defined(__linux__)
+#include <byteswap.h>
+#define CPU_TO_LE32( x ) bswap_32( x )
+#elif defined(__APPLE__)
+#include <CoreFoundation/CFByteOrder.h>
+#define CPU_TO_LE32( x ) CFSwapInt32HostToLittle( x )
+#elif defined(__OpenBSD__)
+#include <sys/types.h>
+#define CPU_TO_LE32( x ) htole32( x )
+#else /*__linux__ */
+#include <sys/endian.h>
+#define CPU_TO_LE32( x ) bswap32( x )
+#endif /*__linux__*/
+#define MESA_BIG_ENDIAN 1
+#else
+#define CPU_TO_LE32( x ) ( x )
+#define MESA_LITTLE_ENDIAN 1
+#endif
+#define LE32_TO_CPU( x ) CPU_TO_LE32( x )
+
+
+
+#define IEEE_ONE 0x3f800000
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+#if __cplusplus >= 201703L || __STDC_VERSION__ > 201710L
+/* Standard C++17/C23 attribute */
+#define FALLTHROUGH [[fallthrough]]
+#elif __has_attribute(fallthrough)
+/* Non-standard but supported by at least gcc and clang */
+#define FALLTHROUGH __attribute__((fallthrough))
+#else
+#define FALLTHROUGH do { } while(0)
+#endif
+
+#endif /* COMPILER_H */
diff --git a/src/mesa/util/detect_os.h b/src/mesa/util/detect_os.h
new file mode 100644
index 00000000..994a1efe
--- /dev/null
+++ b/src/mesa/util/detect_os.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2008 VMware, Inc. */
+
+/**
+ * Auto-detect the operating system family.
+ *
+ * See also:
+ * - http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+ * - echo | gcc -dM -E - | sort
+ * - http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+ *
+ * @author José Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef DETECT_OS_H
+#define DETECT_OS_H
+
+#if defined(__linux__)
+#define DETECT_OS_LINUX 1
+#define DETECT_OS_UNIX 1
+#endif
+
+/*
+ * Android defines __linux__, so DETECT_OS_LINUX and DETECT_OS_UNIX will
+ * also be defined.
+ */
+#if defined(__ANDROID__)
+#define DETECT_OS_ANDROID 1
+#endif
+
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+#define DETECT_OS_FREEBSD 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__OpenBSD__)
+#define DETECT_OS_OPENBSD 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__NetBSD__)
+#define DETECT_OS_NETBSD 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__DragonFly__)
+#define DETECT_OS_DRAGONFLY 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__GNU__)
+#define DETECT_OS_HURD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__sun)
+#define DETECT_OS_SOLARIS 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__APPLE__)
+#define DETECT_OS_APPLE 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(_WIN32) || defined(WIN32)
+#define DETECT_OS_WINDOWS 1
+#endif
+
+#if defined(__HAIKU__)
+#define DETECT_OS_HAIKU 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__CYGWIN__)
+#define DETECT_OS_CYGWIN 1
+#define DETECT_OS_UNIX 1
+#endif
+
+
+/*
+ * Make sure DETECT_OS_* are always defined, so that they can be used with #if
+ */
+#ifndef DETECT_OS_ANDROID
+#define DETECT_OS_ANDROID 0
+#endif
+#ifndef DETECT_OS_APPLE
+#define DETECT_OS_APPLE 0
+#endif
+#ifndef DETECT_OS_BSD
+#define DETECT_OS_BSD 0
+#endif
+#ifndef DETECT_OS_CYGWIN
+#define DETECT_OS_CYGWIN 0
+#endif
+#ifndef DETECT_OS_DRAGONFLY
+#define DETECT_OS_DRAGONFLY 0
+#endif
+#ifndef DETECT_OS_FREEBSD
+#define DETECT_OS_FREEBSD 0
+#endif
+#ifndef DETECT_OS_HAIKU
+#define DETECT_OS_HAIKU 0
+#endif
+#ifndef DETECT_OS_HURD
+#define DETECT_OS_HURD 0
+#endif
+#ifndef DETECT_OS_LINUX
+#define DETECT_OS_LINUX 0
+#endif
+#ifndef DETECT_OS_NETBSD
+#define DETECT_OS_NETBSD 0
+#endif
+#ifndef DETECT_OS_OPENBSD
+#define DETECT_OS_OPENBSD 0
+#endif
+#ifndef DETECT_OS_SOLARIS
+#define DETECT_OS_SOLARIS 0
+#endif
+#ifndef DETECT_OS_UNIX
+#define DETECT_OS_UNIX 0
+#endif
+#ifndef DETECT_OS_WINDOWS
+#define DETECT_OS_WINDOWS 0
+#endif
+
+#endif /* DETECT_OS_H */
diff --git a/src/mesa/util/fast_urem_by_const.h b/src/mesa/util/fast_urem_by_const.h
new file mode 100644
index 00000000..beb253d2
--- /dev/null
+++ b/src/mesa/util/fast_urem_by_const.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright © 2010 Valve Software
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+/*
+ * Code for fast 32-bit unsigned remainder, based off of "Faster Remainder by
+ * Direct Computation: Applications to Compilers and Software Libraries,"
+ * available at https://arxiv.org/pdf/1902.01961.pdf.
+ *
+ * util_fast_urem32(n, d, REMAINDER_MAGIC(d)) returns the same thing as
+ * n % d for any unsigned n and d, however it compiles down to only a few
+ * multiplications, so it should be faster than plain uint32_t modulo if the
+ * same divisor is used many times.
+ */
+
+#define REMAINDER_MAGIC(divisor) \
+ ((uint64_t) ~0ull / (divisor) + 1)
+
+/*
+ * Get bits 64-96 of a 32x64-bit multiply. If __int128_t is available, we use
+ * it, which usually compiles down to one instruction on 64-bit architectures.
+ * Otherwise on 32-bit architectures we usually get four instructions (one
+ * 32x32->64 multiply, one 32x32->32 multiply, and one 64-bit add).
+ */
+
+static inline uint32_t
+_mul32by64_hi(uint32_t a, uint64_t b)
+{
+#ifdef HAVE_UINT128
+ return ((__uint128_t) b * a) >> 64;
+#else
+ /*
+ * Let b = b0 + 2^32 * b1. Then a * b = a * b0 + 2^32 * a * b1. We would
+ * have to do a 96-bit addition to get the full result, except that only
+ * one term has non-zero lower 32 bits, which means that to get the high 32
+ * bits, we only have to add the high 64 bits of each term. Unfortunately,
+ * we have to do the 64-bit addition in case the low 32 bits overflow.
+ */
+ uint32_t b0 = (uint32_t) b;
+ uint32_t b1 = b >> 32;
+ return ((((uint64_t) a * b0) >> 32) + (uint64_t) a * b1) >> 32;
+#endif
+}
+
+static inline uint32_t
+util_fast_urem32(uint32_t n, uint32_t d, uint64_t magic)
+{
+ uint64_t lowbits = magic * n;
+ uint32_t result = _mul32by64_hi(d, lowbits);
+ assert(result == n % d);
+ return result;
+}
+
diff --git a/src/mesa/util/futex.h b/src/mesa/util/futex.h
new file mode 100644
index 00000000..43097f4c
--- /dev/null
+++ b/src/mesa/util/futex.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright © 2015 Intel
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UTIL_FUTEX_H
+#define UTIL_FUTEX_H
+
+#if defined(HAVE_LINUX_FUTEX_H)
+#define UTIL_FUTEX_SUPPORTED 1
+
+#include <limits.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+
+static inline long sys_futex(void *addr1, int op, int val1, const struct timespec *timeout, void *addr2, int val3)
+{
+ return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+static inline int futex_wake(uint32_t *addr, int count)
+{
+ return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0);
+}
+
+static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
+{
+ /* FUTEX_WAIT_BITSET with FUTEX_BITSET_MATCH_ANY is equivalent to
+ * FUTEX_WAIT, except that it treats the timeout as absolute. */
+ return sys_futex(addr, FUTEX_WAIT_BITSET, value, timeout, NULL,
+ FUTEX_BITSET_MATCH_ANY);
+}
+
+#elif defined(__FreeBSD__)
+#define UTIL_FUTEX_SUPPORTED 1
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/umtx.h>
+#include <sys/time.h>
+
+static inline int futex_wake(uint32_t *addr, int count)
+{
+ assert(count == (int)(uint32_t)count); /* Check that bits weren't discarded */
+ return _umtx_op(addr, UMTX_OP_WAKE, (uint32_t)count, NULL, NULL) == -1 ? errno : 0;
+}
+
+static inline int futex_wait(uint32_t *addr, int32_t value, struct timespec *timeout)
+{
+ void *uaddr = NULL, *uaddr2 = NULL;
+ struct _umtx_time tmo = {
+ ._flags = UMTX_ABSTIME,
+ ._clockid = CLOCK_MONOTONIC
+ };
+
+ assert(value == (int)(uint32_t)value); /* Check that bits weren't discarded */
+
+ if (timeout != NULL) {
+ tmo._timeout = *timeout;
+ uaddr = (void *)(uintptr_t)sizeof(tmo);
+ uaddr2 = (void *)&tmo;
+ }
+
+ return _umtx_op(addr, UMTX_OP_WAIT_UINT, (uint32_t)value, uaddr, uaddr2) == -1 ? errno : 0;
+}
+
+#elif defined(__OpenBSD__)
+#define UTIL_FUTEX_SUPPORTED 1
+
+#include <sys/time.h>
+#include <sys/futex.h>
+
+static inline int futex_wake(uint32_t *addr, int count)
+{
+ return futex(addr, FUTEX_WAKE, count, NULL, NULL);
+}
+
+static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
+{
+ struct timespec tsnow, tsrel;
+
+ if (timeout == NULL)
+ return futex(addr, FUTEX_WAIT, value, NULL, NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &tsnow);
+ if (timespeccmp(&tsnow, timeout, <))
+ timespecsub(timeout, &tsnow, &tsrel);
+ else
+ timespecclear(&tsrel);
+ return futex(addr, FUTEX_WAIT, value, &tsrel, NULL);
+}
+
+#else
+#define UTIL_FUTEX_SUPPORTED 0
+#endif
+
+#endif /* UTIL_FUTEX_H */
diff --git a/src/mesa/util/hash_table.c b/src/mesa/util/hash_table.c
new file mode 100644
index 00000000..1811ee74
--- /dev/null
+++ b/src/mesa/util/hash_table.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ * Copyright © 1988-2004 Keith Packard and Bart Massey.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the names of the authors
+ * or their institutions shall not be used in advertising or
+ * otherwise to promote the sale, use or other dealings in this
+ * Software without prior written authorization from the
+ * authors.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ */
+
+/**
+ * Implements an open-addressing, linear-reprobing hash table.
+ *
+ * For more information, see:
+ *
+ * http://cgit.freedesktop.org/~anholt/hash_table/tree/README
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "hash_table.h"
+#include "ralloc.h"
+#include "macros.h"
+#include "u_memory.h"
+#include "fast_urem_by_const.h"
+#include "util/u_memory.h"
+
+#define XXH_INLINE_ALL
+#include "xxhash.h"
+
+/**
+ * Magic number that gets stored outside of the struct hash_table.
+ *
+ * The hash table needs a particular pointer to be the marker for a key that
+ * was deleted from the table, along with NULL for the "never allocated in the
+ * table" marker. Legacy GL allows any GLuint to be used as a GL object name,
+ * and we use a 1:1 mapping from GLuints to key pointers, so we need to be
+ * able to track a GLuint that happens to match the deleted key outside of
+ * struct hash_table. We tell the hash table to use "1" as the deleted key
+ * value, so that we test the deleted-key-in-the-table path as best we can.
+ */
+#define DELETED_KEY_VALUE 1
+
+static inline void *
+uint_key(unsigned id)
+{
+ return (void *)(uintptr_t) id;
+}
+
+static const uint32_t deleted_key_value;
+
+/**
+ * From Knuth -- a good choice for hash/rehash values is p, p-2 where
+ * p and p-2 are both prime. These tables are sized to have an extra 10%
+ * free to avoid exponential performance degradation as the hash table fills
+ */
+static const struct {
+ uint32_t max_entries, size, rehash;
+ uint64_t size_magic, rehash_magic;
+} hash_sizes[] = {
+#define ENTRY(max_entries, size, rehash) \
+ { max_entries, size, rehash, \
+ REMAINDER_MAGIC(size), REMAINDER_MAGIC(rehash) }
+
+ ENTRY(2, 5, 3 ),
+ ENTRY(4, 7, 5 ),
+ ENTRY(8, 13, 11 ),
+ ENTRY(16, 19, 17 ),
+ ENTRY(32, 43, 41 ),
+ ENTRY(64, 73, 71 ),
+ ENTRY(128, 151, 149 ),
+ ENTRY(256, 283, 281 ),
+ ENTRY(512, 571, 569 ),
+ ENTRY(1024, 1153, 1151 ),
+ ENTRY(2048, 2269, 2267 ),
+ ENTRY(4096, 4519, 4517 ),
+ ENTRY(8192, 9013, 9011 ),
+ ENTRY(16384, 18043, 18041 ),
+ ENTRY(32768, 36109, 36107 ),
+ ENTRY(65536, 72091, 72089 ),
+ ENTRY(131072, 144409, 144407 ),
+ ENTRY(262144, 288361, 288359 ),
+ ENTRY(524288, 576883, 576881 ),
+ ENTRY(1048576, 1153459, 1153457 ),
+ ENTRY(2097152, 2307163, 2307161 ),
+ ENTRY(4194304, 4613893, 4613891 ),
+ ENTRY(8388608, 9227641, 9227639 ),
+ ENTRY(16777216, 18455029, 18455027 ),
+ ENTRY(33554432, 36911011, 36911009 ),
+ ENTRY(67108864, 73819861, 73819859 ),
+ ENTRY(134217728, 147639589, 147639587 ),
+ ENTRY(268435456, 295279081, 295279079 ),
+ ENTRY(536870912, 590559793, 590559791 ),
+ ENTRY(1073741824, 1181116273, 1181116271 ),
+ ENTRY(2147483648ul, 2362232233ul, 2362232231ul )
+};
+
+ASSERTED static inline bool
+key_pointer_is_reserved(const struct hash_table *ht, const void *key)
+{
+ return key == NULL || key == ht->deleted_key;
+}
+
+static int
+entry_is_free(const struct hash_entry *entry)
+{
+ return entry->key == NULL;
+}
+
+static int
+entry_is_deleted(const struct hash_table *ht, struct hash_entry *entry)
+{
+ return entry->key == ht->deleted_key;
+}
+
+static int
+entry_is_present(const struct hash_table *ht, struct hash_entry *entry)
+{
+ return entry->key != NULL && entry->key != ht->deleted_key;
+}
+
+bool
+_mesa_hash_table_init(struct hash_table *ht,
+ void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ ht->size_index = 0;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->key_hash_function = key_hash_function;
+ ht->key_equals_function = key_equals_function;
+ ht->table = rzalloc_array(mem_ctx, struct hash_entry, ht->size);
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+ ht->deleted_key = &deleted_key_value;
+
+ return ht->table != NULL;
+}
+
+struct hash_table *
+_mesa_hash_table_create(void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ struct hash_table *ht;
+
+ /* mem_ctx is used to allocate the hash table, but the hash table is used
+ * to allocate all of the suballocations.
+ */
+ ht = ralloc(mem_ctx, struct hash_table);
+ if (ht == NULL)
+ return NULL;
+
+ if (!_mesa_hash_table_init(ht, ht, key_hash_function, key_equals_function)) {
+ ralloc_free(ht);
+ return NULL;
+ }
+
+ return ht;
+}
+
+static uint32_t
+key_u32_hash(const void *key)
+{
+ uint32_t u = (uint32_t)(uintptr_t)key;
+ return _mesa_hash_uint(&u);
+}
+
+static bool
+key_u32_equals(const void *a, const void *b)
+{
+ return (uint32_t)(uintptr_t)a == (uint32_t)(uintptr_t)b;
+}
+
+/* key == 0 and key == deleted_key are not allowed */
+struct hash_table *
+_mesa_hash_table_create_u32_keys(void *mem_ctx)
+{
+ return _mesa_hash_table_create(mem_ctx, key_u32_hash, key_u32_equals);
+}
+
+struct hash_table *
+_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx)
+{
+ struct hash_table *ht;
+
+ ht = ralloc(dst_mem_ctx, struct hash_table);
+ if (ht == NULL)
+ return NULL;
+
+ memcpy(ht, src, sizeof(struct hash_table));
+
+ ht->table = ralloc_array(ht, struct hash_entry, ht->size);
+ if (ht->table == NULL) {
+ ralloc_free(ht);
+ return NULL;
+ }
+
+ memcpy(ht->table, src->table, ht->size * sizeof(struct hash_entry));
+
+ return ht;
+}
+
+/**
+ * Frees the given hash table.
+ *
+ * If delete_function is passed, it gets called on each entry present before
+ * freeing.
+ */
+void
+_mesa_hash_table_destroy(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ if (!ht)
+ return;
+
+ if (delete_function) {
+ hash_table_foreach(ht, entry) {
+ delete_function(entry);
+ }
+ }
+ ralloc_free(ht);
+}
+
+static void
+hash_table_clear_fast(struct hash_table *ht)
+{
+ memset(ht->table, 0, sizeof(struct hash_entry) * hash_sizes[ht->size_index].size);
+ ht->entries = ht->deleted_entries = 0;
+}
+
+/**
+ * Deletes all entries of the given hash table without deleting the table
+ * itself or changing its structure.
+ *
+ * If delete_function is passed, it gets called on each entry present.
+ */
+void
+_mesa_hash_table_clear(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ if (!ht)
+ return;
+
+ struct hash_entry *entry;
+
+ if (delete_function) {
+ for (entry = ht->table; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(ht, entry))
+ delete_function(entry);
+
+ entry->key = NULL;
+ }
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+ } else
+ hash_table_clear_fast(ht);
+}
+
+/** Sets the value of the key pointer used for deleted entries in the table.
+ *
+ * The assumption is that usually keys are actual pointers, so we use a
+ * default value of a pointer to an arbitrary piece of storage in the library.
+ * But in some cases a consumer wants to store some other sort of value in the
+ * table, like a uint32_t, in which case that pointer may conflict with one of
+ * their valid keys. This lets that user select a safe value.
+ *
+ * This must be called before any keys are actually deleted from the table.
+ */
+void
+_mesa_hash_table_set_deleted_key(struct hash_table *ht, const void *deleted_key)
+{
+ ht->deleted_key = deleted_key;
+}
+
+static struct hash_entry *
+hash_table_search(struct hash_table *ht, uint32_t hash, const void *key)
+{
+ assert(!key_pointer_is_reserved(ht, key));
+
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (entry_is_free(entry)) {
+ return NULL;
+ } else if (entry_is_present(ht, entry) && entry->hash == hash) {
+ if (ht->key_equals_function(key, entry->key)) {
+ return entry;
+ }
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_hash_address);
+
+ return NULL;
+}
+
+/**
+ * Finds a hash table entry with the given key and hash of that key.
+ *
+ * Returns NULL if no entry is found. Note that the data pointer may be
+ * modified by the user.
+ */
+struct hash_entry *
+_mesa_hash_table_search(struct hash_table *ht, const void *key)
+{
+ assert(ht->key_hash_function);
+ return hash_table_search(ht, ht->key_hash_function(key), key);
+}
+
+struct hash_entry *
+_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key)
+{
+ assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
+ return hash_table_search(ht, hash, key);
+}
+
+static struct hash_entry *
+hash_table_insert(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data);
+
+static void
+hash_table_insert_rehash(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (likely(entry->key == NULL)) {
+ entry->hash = hash;
+ entry->key = key;
+ entry->data = data;
+ return;
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (true);
+}
+
+static void
+_mesa_hash_table_rehash(struct hash_table *ht, unsigned new_size_index)
+{
+ struct hash_table old_ht;
+ struct hash_entry *table;
+
+ if (ht->size_index == new_size_index && ht->deleted_entries == ht->max_entries) {
+ hash_table_clear_fast(ht);
+ assert(!ht->entries);
+ return;
+ }
+
+ if (new_size_index >= ARRAY_SIZE(hash_sizes))
+ return;
+
+ table = rzalloc_array(ralloc_parent(ht->table), struct hash_entry,
+ hash_sizes[new_size_index].size);
+ if (table == NULL)
+ return;
+
+ old_ht = *ht;
+
+ ht->table = table;
+ ht->size_index = new_size_index;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+
+ hash_table_foreach(&old_ht, entry) {
+ hash_table_insert_rehash(ht, entry->hash, entry->key, entry->data);
+ }
+
+ ht->entries = old_ht.entries;
+
+ ralloc_free(old_ht.table);
+}
+
+static struct hash_entry *
+hash_table_insert(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ struct hash_entry *available_entry = NULL;
+
+ assert(!key_pointer_is_reserved(ht, key));
+
+ if (ht->entries >= ht->max_entries) {
+ _mesa_hash_table_rehash(ht, ht->size_index + 1);
+ } else if (ht->deleted_entries + ht->entries >= ht->max_entries) {
+ _mesa_hash_table_rehash(ht, ht->size_index);
+ }
+
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (!entry_is_present(ht, entry)) {
+ /* Stash the first available entry we find */
+ if (available_entry == NULL)
+ available_entry = entry;
+ if (entry_is_free(entry))
+ break;
+ }
+
+ /* Implement replacement when another insert happens
+ * with a matching key. This is a relatively common
+ * feature of hash tables, with the alternative
+ * generally being "insert the new value as well, and
+ * return it first when the key is searched for".
+ *
+ * Note that the hash table doesn't have a delete
+ * callback. If freeing of old data pointers is
+ * required to avoid memory leaks, perform a search
+ * before inserting.
+ */
+ if (!entry_is_deleted(ht, entry) &&
+ entry->hash == hash &&
+ ht->key_equals_function(key, entry->key)) {
+ entry->key = key;
+ entry->data = data;
+ return entry;
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_hash_address);
+
+ if (available_entry) {
+ if (entry_is_deleted(ht, available_entry))
+ ht->deleted_entries--;
+ available_entry->hash = hash;
+ available_entry->key = key;
+ available_entry->data = data;
+ ht->entries++;
+ return available_entry;
+ }
+
+ /* We could hit here if a required resize failed. An unchecked-malloc
+ * application could ignore this result.
+ */
+ return NULL;
+}
+
+/**
+ * Inserts the key with the given hash into the table.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+struct hash_entry *
+_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data)
+{
+ assert(ht->key_hash_function);
+ return hash_table_insert(ht, ht->key_hash_function(key), key, data);
+}
+
+struct hash_entry *
+_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
+ return hash_table_insert(ht, hash, key, data);
+}
+
+/**
+ * This function deletes the given hash table entry.
+ *
+ * Note that deletion doesn't otherwise modify the table, so an iteration over
+ * the table deleting entries is safe.
+ */
+void
+_mesa_hash_table_remove(struct hash_table *ht,
+ struct hash_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->key = ht->deleted_key;
+ ht->entries--;
+ ht->deleted_entries++;
+}
+
+/**
+ * Removes the entry with the corresponding key, if exists.
+ */
+void _mesa_hash_table_remove_key(struct hash_table *ht,
+ const void *key)
+{
+ _mesa_hash_table_remove(ht, _mesa_hash_table_search(ht, key));
+}
+
+/**
+ * This function is an iterator over the hash_table when no deleted entries are present.
+ *
+ * Pass in NULL for the first entry, as in the start of a for loop.
+ */
+struct hash_entry *
+_mesa_hash_table_next_entry_unsafe(const struct hash_table *ht, struct hash_entry *entry)
+{
+ assert(!ht->deleted_entries);
+ if (!ht->entries)
+ return NULL;
+ if (entry == NULL)
+ entry = ht->table;
+ else
+ entry = entry + 1;
+ if (entry != ht->table + ht->size)
+ return entry->key ? entry : _mesa_hash_table_next_entry_unsafe(ht, entry);
+
+ return NULL;
+}
+
+/**
+ * This function is an iterator over the hash table.
+ *
+ * Pass in NULL for the first entry, as in the start of a for loop. Note that
+ * an iteration over the table is O(table_size) not O(entries).
+ */
+struct hash_entry *
+_mesa_hash_table_next_entry(struct hash_table *ht,
+ struct hash_entry *entry)
+{
+ if (entry == NULL)
+ entry = ht->table;
+ else
+ entry = entry + 1;
+
+ for (; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(ht, entry)) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Returns a random entry from the hash table.
+ *
+ * This may be useful in implementing random replacement (as opposed
+ * to just removing everything) in caches based on this hash table
+ * implementation. @predicate may be used to filter entries, or may
+ * be set to NULL for no filtering.
+ */
+struct hash_entry *
+_mesa_hash_table_random_entry(struct hash_table *ht,
+ bool (*predicate)(struct hash_entry *entry))
+{
+ struct hash_entry *entry;
+ uint32_t i = rand() % ht->size;
+
+ if (ht->entries == 0)
+ return NULL;
+
+ for (entry = ht->table + i; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(ht, entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ for (entry = ht->table; entry != ht->table + i; entry++) {
+ if (entry_is_present(ht, entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+
+uint32_t
+_mesa_hash_data(const void *data, size_t size)
+{
+ return XXH32(data, size, 0);
+}
+
+uint32_t
+_mesa_hash_data_with_seed(const void *data, size_t size, uint32_t seed)
+{
+ return XXH32(data, size, seed);
+}
+
+uint32_t
+_mesa_hash_int(const void *key)
+{
+ return XXH32(key, sizeof(int), 0);
+}
+
+uint32_t
+_mesa_hash_uint(const void *key)
+{
+ return XXH32(key, sizeof(unsigned), 0);
+}
+
+uint32_t
+_mesa_hash_u32(const void *key)
+{
+ return XXH32(key, 4, 0);
+}
+
+/** FNV-1a string hash implementation */
+uint32_t
+_mesa_hash_string(const void *_key)
+{
+ uint32_t hash = 0;
+ const char *key = _key;
+ size_t len = strlen(key);
+#if defined(_WIN64) || defined(__x86_64__)
+ hash = (uint32_t)XXH64(key, len, hash);
+#else
+ hash = XXH32(key, len, hash);
+#endif
+ return hash;
+}
+
+uint32_t
+_mesa_hash_pointer(const void *pointer)
+{
+ uintptr_t num = (uintptr_t) pointer;
+ return (uint32_t) ((num >> 2) ^ (num >> 6) ^ (num >> 10) ^ (num >> 14));
+}
+
+bool
+_mesa_key_int_equal(const void *a, const void *b)
+{
+ return *((const int *)a) == *((const int *)b);
+}
+
+bool
+_mesa_key_uint_equal(const void *a, const void *b)
+{
+
+ return *((const unsigned *)a) == *((const unsigned *)b);
+}
+
+bool
+_mesa_key_u32_equal(const void *a, const void *b)
+{
+ return *((const uint32_t *)a) == *((const uint32_t *)b);
+}
+
+/**
+ * String compare function for use as the comparison callback in
+ * _mesa_hash_table_create().
+ */
+bool
+_mesa_key_string_equal(const void *a, const void *b)
+{
+ return strcmp(a, b) == 0;
+}
+
+bool
+_mesa_key_pointer_equal(const void *a, const void *b)
+{
+ return a == b;
+}
+
+/**
+ * Helper to create a hash table with pointer keys.
+ */
+struct hash_table *
+_mesa_pointer_hash_table_create(void *mem_ctx)
+{
+ return _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+}
+
+
+bool
+_mesa_hash_table_reserve(struct hash_table *ht, unsigned size)
+{
+ if (size < ht->max_entries)
+ return true;
+ for (unsigned i = ht->size_index + 1; i < ARRAY_SIZE(hash_sizes); i++) {
+ if (hash_sizes[i].max_entries >= size) {
+ _mesa_hash_table_rehash(ht, i);
+ break;
+ }
+ }
+ return ht->max_entries >= size;
+}
+
+/**
+ * Hash table wrapper which supports 64-bit keys.
+ *
+ * TODO: unify all hash table implementations.
+ */
+
+struct hash_key_u64 {
+ uint64_t value;
+};
+
+static uint32_t
+key_u64_hash(const void *key)
+{
+ return _mesa_hash_data(key, sizeof(struct hash_key_u64));
+}
+
+static bool
+key_u64_equals(const void *a, const void *b)
+{
+ const struct hash_key_u64 *aa = a;
+ const struct hash_key_u64 *bb = b;
+
+ return aa->value == bb->value;
+}
+
+#define FREED_KEY_VALUE 0
+
+struct hash_table_u64 *
+_mesa_hash_table_u64_create(void *mem_ctx)
+{
+ STATIC_ASSERT(FREED_KEY_VALUE != DELETED_KEY_VALUE);
+ struct hash_table_u64 *ht;
+
+ ht = CALLOC_STRUCT(hash_table_u64);
+ if (!ht)
+ return NULL;
+
+ if (sizeof(void *) == 8) {
+ ht->table = _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ } else {
+ ht->table = _mesa_hash_table_create(mem_ctx, key_u64_hash,
+ key_u64_equals);
+ }
+
+ if (ht->table)
+ _mesa_hash_table_set_deleted_key(ht->table, uint_key(DELETED_KEY_VALUE));
+
+ return ht;
+}
+
+static void
+_mesa_hash_table_u64_delete_key(struct hash_entry *entry)
+{
+ if (sizeof(void *) == 8)
+ return;
+
+ struct hash_key_u64 *_key = (struct hash_key_u64 *)entry->key;
+
+ if (_key)
+ free(_key);
+}
+
+void
+_mesa_hash_table_u64_clear(struct hash_table_u64 *ht)
+{
+ if (!ht)
+ return;
+
+ _mesa_hash_table_clear(ht->table, _mesa_hash_table_u64_delete_key);
+}
+
+void
+_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht)
+{
+ if (!ht)
+ return;
+
+ _mesa_hash_table_u64_clear(ht);
+ _mesa_hash_table_destroy(ht->table, NULL);
+ free(ht);
+}
+
+void
+_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
+ void *data)
+{
+ if (key == FREED_KEY_VALUE) {
+ ht->freed_key_data = data;
+ return;
+ }
+
+ if (key == DELETED_KEY_VALUE) {
+ ht->deleted_key_data = data;
+ return;
+ }
+
+ if (sizeof(void *) == 8) {
+ _mesa_hash_table_insert(ht->table, (void *)(uintptr_t)key, data);
+ } else {
+ struct hash_key_u64 *_key = CALLOC_STRUCT(hash_key_u64);
+
+ if (!_key)
+ return;
+ _key->value = key;
+
+ _mesa_hash_table_insert(ht->table, _key, data);
+ }
+}
+
+static struct hash_entry *
+hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key)
+{
+ if (sizeof(void *) == 8) {
+ return _mesa_hash_table_search(ht->table, (void *)(uintptr_t)key);
+ } else {
+ struct hash_key_u64 _key = { .value = key };
+ return _mesa_hash_table_search(ht->table, &_key);
+ }
+}
+
+void *
+_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key)
+{
+ struct hash_entry *entry;
+
+ if (key == FREED_KEY_VALUE)
+ return ht->freed_key_data;
+
+ if (key == DELETED_KEY_VALUE)
+ return ht->deleted_key_data;
+
+ entry = hash_table_u64_search(ht, key);
+ if (!entry)
+ return NULL;
+
+ return entry->data;
+}
+
+void
+_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key)
+{
+ struct hash_entry *entry;
+
+ if (key == FREED_KEY_VALUE) {
+ ht->freed_key_data = NULL;
+ return;
+ }
+
+ if (key == DELETED_KEY_VALUE) {
+ ht->deleted_key_data = NULL;
+ return;
+ }
+
+ entry = hash_table_u64_search(ht, key);
+ if (!entry)
+ return;
+
+ if (sizeof(void *) == 8) {
+ _mesa_hash_table_remove(ht->table, entry);
+ } else {
+ struct hash_key *_key = (struct hash_key *)entry->key;
+
+ _mesa_hash_table_remove(ht->table, entry);
+ free(_key);
+ }
+}
diff --git a/src/mesa/util/hash_table.h b/src/mesa/util/hash_table.h
new file mode 100644
index 00000000..8079d102
--- /dev/null
+++ b/src/mesa/util/hash_table.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _HASH_TABLE_H
+#define _HASH_TABLE_H
+
+#include <stdlib.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include "c99_compat.h"
+#include "macros.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct hash_entry {
+ uint32_t hash;
+ const void *key;
+ void *data;
+};
+
+struct hash_table {
+ struct hash_entry *table;
+ uint32_t (*key_hash_function)(const void *key);
+ bool (*key_equals_function)(const void *a, const void *b);
+ const void *deleted_key;
+ uint32_t size;
+ uint32_t rehash;
+ uint64_t size_magic;
+ uint64_t rehash_magic;
+ uint32_t max_entries;
+ uint32_t size_index;
+ uint32_t entries;
+ uint32_t deleted_entries;
+};
+
+struct hash_table *
+_mesa_hash_table_create(void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b));
+
+bool
+_mesa_hash_table_init(struct hash_table *ht,
+ void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b));
+
+struct hash_table *
+_mesa_hash_table_create_u32_keys(void *mem_ctx);
+
+struct hash_table *
+_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx);
+void _mesa_hash_table_destroy(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry));
+void _mesa_hash_table_clear(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry));
+void _mesa_hash_table_set_deleted_key(struct hash_table *ht,
+ const void *deleted_key);
+
+static inline uint32_t _mesa_hash_table_num_entries(struct hash_table *ht)
+{
+ return ht->entries;
+}
+
+struct hash_entry *
+_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data);
+struct hash_entry *
+_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data);
+struct hash_entry *
+_mesa_hash_table_search(struct hash_table *ht, const void *key);
+struct hash_entry *
+_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key);
+void _mesa_hash_table_remove(struct hash_table *ht,
+ struct hash_entry *entry);
+void _mesa_hash_table_remove_key(struct hash_table *ht,
+ const void *key);
+
+struct hash_entry *_mesa_hash_table_next_entry(struct hash_table *ht,
+ struct hash_entry *entry);
+struct hash_entry *_mesa_hash_table_next_entry_unsafe(const struct hash_table *ht,
+ struct hash_entry *entry);
+struct hash_entry *
+_mesa_hash_table_random_entry(struct hash_table *ht,
+ bool (*predicate)(struct hash_entry *entry));
+
+uint32_t _mesa_hash_data(const void *data, size_t size);
+uint32_t _mesa_hash_data_with_seed(const void *data, size_t size, uint32_t seed);
+
+uint32_t _mesa_hash_int(const void *key);
+uint32_t _mesa_hash_uint(const void *key);
+uint32_t _mesa_hash_u32(const void *key);
+uint32_t _mesa_hash_string(const void *key);
+uint32_t _mesa_hash_pointer(const void *pointer);
+
+bool _mesa_key_int_equal(const void *a, const void *b);
+bool _mesa_key_uint_equal(const void *a, const void *b);
+bool _mesa_key_u32_equal(const void *a, const void *b);
+bool _mesa_key_string_equal(const void *a, const void *b);
+bool _mesa_key_pointer_equal(const void *a, const void *b);
+
+struct hash_table *
+_mesa_pointer_hash_table_create(void *mem_ctx);
+
+bool
+_mesa_hash_table_reserve(struct hash_table *ht, unsigned size);
+/**
+ * This foreach function is safe against deletion (which just replaces
+ * an entry's data with the deleted marker), but not against insertion
+ * (which may rehash the table, making entry a dangling pointer).
+ */
+#define hash_table_foreach(ht, entry) \
+ for (struct hash_entry *entry = _mesa_hash_table_next_entry(ht, NULL); \
+ entry != NULL; \
+ entry = _mesa_hash_table_next_entry(ht, entry))
+/**
+ * This foreach function destroys the table as it iterates.
+ * It is not safe to use when inserting or removing entries.
+ */
+#define hash_table_foreach_remove(ht, entry) \
+ for (struct hash_entry *entry = _mesa_hash_table_next_entry_unsafe(ht, NULL); \
+ (ht)->entries; \
+ entry->hash = 0, entry->key = (void*)NULL, entry->data = NULL, \
+ (ht)->entries--, entry = _mesa_hash_table_next_entry_unsafe(ht, entry))
+
+static inline void
+hash_table_call_foreach(struct hash_table *ht,
+ void (*callback)(const void *key,
+ void *data,
+ void *closure),
+ void *closure)
+{
+ hash_table_foreach(ht, entry)
+ callback(entry->key, entry->data, closure);
+}
+
+/**
+ * Hash table wrapper which supports 64-bit keys.
+ */
+struct hash_table_u64 {
+ struct hash_table *table;
+ void *freed_key_data;
+ void *deleted_key_data;
+};
+
+struct hash_table_u64 *
+_mesa_hash_table_u64_create(void *mem_ctx);
+
+void
+_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht);
+
+void
+_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
+ void *data);
+
+void *
+_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key);
+
+void
+_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key);
+
+void
+_mesa_hash_table_u64_clear(struct hash_table_u64 *ht);
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* _HASH_TABLE_H */
diff --git a/src/mesa/util/list.h b/src/mesa/util/list.h
new file mode 100644
index 00000000..0e71a66a
--- /dev/null
+++ b/src/mesa/util/list.h
@@ -0,0 +1,270 @@
+/**************************************************************************
+ *
+ * Copyright 2006 VMware, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ *
+ * Is not threadsafe, so common operations need to
+ * be protected using an external mutex.
+ */
+
+#ifndef _UTIL_LIST_H_
+#define _UTIL_LIST_H_
+
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <assert.h>
+#include "c99_compat.h"
+
+#ifdef DEBUG
+# define list_assert(cond, msg) assert(cond && msg)
+#else
+# define list_assert(cond, msg) (void)(0 && (cond))
+#endif
+
+struct list_head
+{
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+static inline void list_inithead(struct list_head *item)
+{
+ item->prev = item;
+ item->next = item;
+}
+
+static inline void list_add(struct list_head *item, struct list_head *list)
+{
+ item->prev = list;
+ item->next = list->next;
+ list->next->prev = item;
+ list->next = item;
+}
+
+static inline void list_addtail(struct list_head *item, struct list_head *list)
+{
+ item->next = list;
+ item->prev = list->prev;
+ list->prev->next = item;
+ list->prev = item;
+}
+
+static inline bool list_is_empty(const struct list_head *list);
+
+static inline void list_replace(struct list_head *from, struct list_head *to)
+{
+ if (list_is_empty(from)) {
+ list_inithead(to);
+ } else {
+ to->prev = from->prev;
+ to->next = from->next;
+ from->next->prev = to;
+ from->prev->next = to;
+ }
+}
+
+static inline void list_del(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+ item->prev = item->next = NULL;
+}
+
+static inline void list_delinit(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+ item->next = item;
+ item->prev = item;
+}
+
+static inline bool list_is_empty(const struct list_head *list)
+{
+ return list->next == list;
+}
+
+static inline bool list_is_linked(const struct list_head *list)
+{
+ /* both must be NULL or both must be not NULL */
+ assert((list->prev != NULL) == (list->next != NULL));
+
+ return list->next != NULL;
+}
+
+/**
+ * Returns whether the list has exactly one element.
+ */
+static inline bool list_is_singular(const struct list_head *list)
+{
+ return list_is_linked(list) && !list_is_empty(list) && list->next->next == list;
+}
+
+static inline unsigned list_length(const struct list_head *list)
+{
+ struct list_head *node;
+ unsigned length = 0;
+ for (node = list->next; node != list; node = node->next)
+ length++;
+ return length;
+}
+
+static inline void list_splice(struct list_head *src, struct list_head *dst)
+{
+ if (list_is_empty(src))
+ return;
+
+ src->next->prev = dst;
+ src->prev->next = dst->next;
+ dst->next->prev = src->prev;
+ dst->next = src->next;
+}
+
+static inline void list_splicetail(struct list_head *src, struct list_head *dst)
+{
+ if (list_is_empty(src))
+ return;
+
+ src->prev->next = dst;
+ src->next->prev = dst->prev;
+ dst->prev->next = src->next;
+ dst->prev = src->prev;
+}
+
+static inline void list_validate(const struct list_head *list)
+{
+ struct list_head *node;
+ assert(list_is_linked(list));
+ assert(list->next->prev == list && list->prev->next == list);
+ for (node = list->next; node != list; node = node->next)
+ assert(node->next->prev == node && node->prev->next == node);
+}
+
+#define LIST_ENTRY(__type, __item, __field) \
+ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
+
+/**
+ * Cast from a pointer to a member of a struct back to the containing struct.
+ *
+ * 'sample' MUST be initialized, or else the result is undefined!
+ */
+#define list_container_of(ptr, sample, member) \
+ (void *)((char *)(ptr) \
+ - ((char *)&(sample)->member - (char *)(sample)))
+
+#define list_first_entry(ptr, type, member) \
+ LIST_ENTRY(type, (ptr)->next, member)
+
+#define list_last_entry(ptr, type, member) \
+ LIST_ENTRY(type, (ptr)->prev, member)
+
+
+#define LIST_FOR_EACH_ENTRY(pos, head, member) \
+ for (pos = NULL, pos = list_container_of((head)->next, pos, member); \
+ &pos->member != (head); \
+ pos = list_container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
+ for (pos = NULL, pos = list_container_of((head)->next, pos, member), \
+ storage = list_container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = list_container_of(storage->member.next, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
+ for (pos = NULL, pos = list_container_of((head)->prev, pos, member), \
+ storage = list_container_of(pos->member.prev, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = list_container_of(storage->member.prev, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
+ for (pos = NULL, pos = list_container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = list_container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
+ for (pos = NULL, pos = list_container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = list_container_of(pos->member.prev, pos, member))
+
+#define list_for_each_entry(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->next, member), \
+ *__next = LIST_ENTRY(type, pos->member.next, member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.next, member), \
+ list_assert(pos == __next, "use _safe iterator"), \
+ __next = LIST_ENTRY(type, __next->member.next, member))
+
+#define list_for_each_entry_safe(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->next, member), \
+ *__next = LIST_ENTRY(type, pos->member.next, member); \
+ &pos->member != (head); \
+ pos = __next, \
+ __next = LIST_ENTRY(type, __next->member.next, member))
+
+#define list_for_each_entry_rev(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
+ *__prev = LIST_ENTRY(type, pos->member.prev, member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.prev, member), \
+ list_assert(pos == __prev, "use _safe iterator"), \
+ __prev = LIST_ENTRY(type, __prev->member.prev, member))
+
+#define list_for_each_entry_safe_rev(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
+ *__prev = LIST_ENTRY(type, pos->member.prev, member); \
+ &pos->member != (head); \
+ pos = __prev, \
+ __prev = LIST_ENTRY(type, __prev->member.prev, member))
+
+#define list_for_each_entry_from(type, pos, start, head, member) \
+ for (type *pos = LIST_ENTRY(type, (start), member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.next, member))
+
+#define list_for_each_entry_from_safe(type, pos, start, head, member) \
+ for (type *pos = LIST_ENTRY(type, (start), member), \
+ *__next = LIST_ENTRY(type, pos->member.next, member); \
+ &pos->member != (head); \
+ pos = __next, \
+ __next = LIST_ENTRY(type, __next->member.next, member))
+
+#define list_for_each_entry_from_rev(type, pos, start, head, member) \
+ for (type *pos = LIST_ENTRY(type, (start), member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.prev, member))
+
+#define list_pair_for_each_entry(type, pos1, pos2, head1, head2, member) \
+ for (type *pos1 = LIST_ENTRY(type, (head1)->next, member), \
+ *pos2 = LIST_ENTRY(type, (head2)->next, member); \
+ &pos1->member != (head1) && &pos2->member != (head2); \
+ pos1 = LIST_ENTRY(type, pos1->member.next, member), \
+ pos2 = LIST_ENTRY(type, pos2->member.next, member))
+
+#endif /*_UTIL_LIST_H_*/
diff --git a/src/mesa/util/macros.h b/src/mesa/util/macros.h
new file mode 100644
index 00000000..e179479f
--- /dev/null
+++ b/src/mesa/util/macros.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UTIL_MACROS_H
+#define UTIL_MACROS_H
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "c99_compat.h"
+#include "c11_compat.h"
+
+#include <stdint.h>
+
+/* Compute the size of an array */
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/* For compatibility with Clang's __has_builtin() */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+/**
+ * __builtin_expect macros
+ */
+#if !defined(HAVE___BUILTIN_EXPECT)
+# define __builtin_expect(x, y) (x)
+#endif
+
+#ifndef likely
+# ifdef HAVE___BUILTIN_EXPECT
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+# else
+# define likely(x) (x)
+# define unlikely(x) (x)
+# endif
+#endif
+
+/**
+ * __builtin_types_compatible_p compat
+ */
+#if defined(__cplusplus) || !defined(HAVE___BUILTIN_TYPES_COMPATIBLE_P)
+# define __builtin_types_compatible_p(type1, type2) (1)
+#endif
+
+/**
+ * Static (compile-time) assertion.
+ */
+#if defined(_MSC_VER)
+ /* MSVC doesn't like VLA's, but it also dislikes zero length arrays
+ * (which gcc is happy with), so we have to define STATIC_ASSERT()
+ * slightly differently.
+ */
+# define STATIC_ASSERT(COND) do { \
+ (void) sizeof(char [(COND) != 0]); \
+ } while (0)
+#elif defined(__GNUC__)
+ /* This version of STATIC_ASSERT() relies on VLAs. If COND is
+ * false/zero, the array size will be -1 and we'll get a compile
+ * error
+ */
+# define STATIC_ASSERT(COND) do { \
+ (void) sizeof(char [1 - 2*!(COND)]); \
+ } while (0)
+#else
+# define STATIC_ASSERT(COND) do { } while (0)
+#endif
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ */
+#ifndef __GNUC__
+ /* a grown-up compiler is required for the extra type checking: */
+# define container_of(ptr, type, member) \
+ (type*)((uint8_t *)ptr - offsetof(type, member))
+#else
+# define __same_type(a, b) \
+ __builtin_types_compatible_p(__typeof__(a), __typeof__(b))
+# define container_of(ptr, type, member) ({ \
+ uint8_t *__mptr = (uint8_t *)(ptr); \
+ STATIC_ASSERT(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void) || \
+ !"pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+#endif
+
+/**
+ * Unreachable macro. Useful for suppressing "control reaches end of non-void
+ * function" warnings.
+ */
+#if defined(HAVE___BUILTIN_UNREACHABLE) || __has_builtin(__builtin_unreachable)
+#define unreachable(str) \
+do { \
+ assert(!str); \
+ __builtin_unreachable(); \
+} while (0)
+#elif defined (_MSC_VER)
+#define unreachable(str) \
+do { \
+ assert(!str); \
+ __assume(0); \
+} while (0)
+#else
+#define unreachable(str) assert(!str)
+#endif
+
+/**
+ * Assume macro. Useful for expressing our assumptions to the compiler,
+ * typically for purposes of silencing warnings.
+ */
+#if __has_builtin(__builtin_assume)
+#define assume(expr) \
+do { \
+ assert(expr); \
+ __builtin_assume(expr); \
+} while (0)
+#elif defined HAVE___BUILTIN_UNREACHABLE
+#define assume(expr) ((expr) ? ((void) 0) \
+ : (assert(!"assumption failed"), \
+ __builtin_unreachable()))
+#elif defined (_MSC_VER)
+#define assume(expr) __assume(expr)
+#else
+#define assume(expr) assert(expr)
+#endif
+
+/* Attribute const is used for functions that have no effects other than their
+ * return value, and only rely on the argument values to compute the return
+ * value. As a result, calls to it can be CSEed. Note that using memory
+ * pointed to by the arguments is not allowed for const functions.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_CONST
+#define ATTRIBUTE_CONST __attribute__((__const__))
+#else
+#define ATTRIBUTE_CONST
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_FLATTEN
+#define FLATTEN __attribute__((__flatten__))
+#else
+#define FLATTEN
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT
+#if defined (__MINGW_PRINTF_FORMAT)
+# define PRINTFLIKE(f, a) __attribute__ ((format(__MINGW_PRINTF_FORMAT, f, a)))
+#else
+# define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#endif
+#else
+#define PRINTFLIKE(f, a)
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC
+#define MALLOCLIKE __attribute__((__malloc__))
+#else
+#define MALLOCLIKE
+#endif
+
+/* Forced function inlining */
+/* Note: Clang also sets __GNUC__ (see other cases below) */
+#ifndef ALWAYS_INLINE
+# if defined(__GNUC__)
+# define ALWAYS_INLINE inline __attribute__((always_inline))
+# elif defined(_MSC_VER)
+# define ALWAYS_INLINE __forceinline
+# else
+# define ALWAYS_INLINE inline
+# endif
+#endif
+
+/* Used to optionally mark structures with misaligned elements or size as
+ * packed, to trade off performance for space.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_PACKED
+#define PACKED __attribute__((__packed__))
+#else
+#define PACKED
+#endif
+
+/* Attribute pure is used for functions that have no effects other than their
+ * return value. As a result, calls to it can be dead code eliminated.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_PURE
+#define ATTRIBUTE_PURE __attribute__((__pure__))
+#else
+#define ATTRIBUTE_PURE
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+#define ATTRIBUTE_RETURNS_NONNULL __attribute__((__returns_nonnull__))
+#else
+#define ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+#ifndef NORETURN
+# ifdef _MSC_VER
+# define NORETURN __declspec(noreturn)
+# elif defined HAVE_FUNC_ATTRIBUTE_NORETURN
+# define NORETURN __attribute__((__noreturn__))
+# else
+# define NORETURN
+# endif
+#endif
+
+#ifdef _MSC_VER
+#define ALIGN16 __declspec(align(16))
+#else
+#define ALIGN16 __attribute__((aligned(16)))
+#endif
+
+#ifdef __cplusplus
+/**
+ * Macro function that evaluates to true if T is a trivially
+ * destructible type -- that is, if its (non-virtual) destructor
+ * performs no action and all member variables and base classes are
+ * trivially destructible themselves.
+ */
+# if (defined(__clang__) && defined(__has_feature))
+# if __has_feature(has_trivial_destructor)
+# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)
+# endif
+# elif defined(__GNUC__)
+# if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)))
+# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)
+# endif
+# elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)
+# endif
+# ifndef HAS_TRIVIAL_DESTRUCTOR
+ /* It's always safe (if inefficient) to assume that a
+ * destructor is non-trivial.
+ */
+# define HAS_TRIVIAL_DESTRUCTOR(T) (false)
+# endif
+#endif
+
+/**
+ * PUBLIC/USED macros
+ *
+ * If we build the library with gcc's -fvisibility=hidden flag, we'll
+ * use the PUBLIC macro to mark functions that are to be exported.
+ *
+ * We also need to define a USED attribute, so the optimizer doesn't
+ * inline a static function that we later use in an alias. - ajax
+ */
+#ifndef PUBLIC
+# if defined(_WIN32)
+# define PUBLIC __declspec(dllexport)
+# define USED
+# elif defined(__GNUC__)
+# define PUBLIC __attribute__((visibility("default")))
+# define USED __attribute__((used))
+# else
+# define PUBLIC
+# define USED
+# endif
+#endif
+
+/**
+ * UNUSED marks variables (or sometimes functions) that have to be defined,
+ * but are sometimes (or always) unused beyond that. A common case is for
+ * a function parameter to be used in some build configurations but not others.
+ * Another case is fallback vfuncs that don't do anything with their params.
+ *
+ * Note that this should not be used for identifiers used in `assert()`;
+ * see ASSERTED below.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_UNUSED
+#define UNUSED __attribute__((unused))
+#else
+#define UNUSED
+#endif
+
+/**
+ * Use ASSERTED to indicate that an identifier is unused outside of an `assert()`,
+ * so that assert-free builds don't get "unused variable" warnings.
+ */
+#ifdef NDEBUG
+#define ASSERTED UNUSED
+#else
+#define ASSERTED
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+#define MUST_CHECK __attribute__((warn_unused_result))
+#else
+#define MUST_CHECK
+#endif
+
+#if defined(__GNUC__)
+#define ATTRIBUTE_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER)
+#define ATTRIBUTE_NOINLINE __declspec(noinline)
+#else
+#define ATTRIBUTE_NOINLINE
+#endif
+
+/* Use as: enum name { X, Y } ENUM_PACKED; */
+#if defined(__GNUC__)
+#define ENUM_PACKED __attribute__((packed))
+#else
+#define ENUM_PACKED
+#endif
+
+
+/**
+ * Check that STRUCT::FIELD can hold MAXVAL. We use a lot of bitfields
+ * in Mesa/gallium. We have to be sure they're of sufficient size to
+ * hold the largest expected value.
+ * Note that with MSVC, enums are signed and enum bitfields need one extra
+ * high bit (always zero) to ensure the max value is handled correctly.
+ * This macro will detect that with MSVC, but not GCC.
+ */
+#define ASSERT_BITFIELD_SIZE(STRUCT, FIELD, MAXVAL) \
+ do { \
+ ASSERTED STRUCT s; \
+ s.FIELD = (MAXVAL); \
+ assert((int) s.FIELD == (MAXVAL) && "Insufficient bitfield size!"); \
+ } while (0)
+
+
+/** Compute ceiling of integer quotient of A divided by B. */
+#define DIV_ROUND_UP( A, B ) ( ((A) + (B) - 1) / (B) )
+
+/** Clamp X to [MIN,MAX]. Turn NaN into MIN, arbitrarily. */
+#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
+
+/* Syntax sugar occuring frequently in graphics code */
+#define SATURATE( X ) CLAMP(X, 0.0f, 1.0f)
+
+/** Minimum of two values: */
+#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
+
+/** Maximum of two values: */
+#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
+
+/** Minimum and maximum of three values: */
+#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
+#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
+
+/** Align a value to a power of two */
+#define ALIGN_POT(x, pot_align) (((x) + (pot_align) - 1) & ~((pot_align) - 1))
+
+/**
+ * Macro for declaring an explicit conversion operator. Defaults to an
+ * implicit conversion if C++11 is not supported.
+ */
+#if __cplusplus >= 201103L
+#define EXPLICIT_CONVERSION explicit
+#elif defined(__cplusplus)
+#define EXPLICIT_CONVERSION
+#endif
+
+/** Set a single bit */
+#define BITFIELD_BIT(b) (1u << (b))
+/** Set all bits up to excluding bit b */
+#define BITFIELD_MASK(b) \
+ ((b) == 32 ? (~0u) : BITFIELD_BIT((b) % 32) - 1)
+/** Set count bits starting from bit b */
+#define BITFIELD_RANGE(b, count) \
+ (BITFIELD_MASK((b) + (count)) & ~BITFIELD_MASK(b))
+
+/** Set a single bit */
+#define BITFIELD64_BIT(b) (1ull << (b))
+/** Set all bits up to excluding bit b */
+#define BITFIELD64_MASK(b) \
+ ((b) == 64 ? (~0ull) : BITFIELD64_BIT(b) - 1)
+/** Set count bits starting from bit b */
+#define BITFIELD64_RANGE(b, count) \
+ (BITFIELD64_MASK((b) + (count)) & ~BITFIELD64_MASK(b))
+
+static inline int64_t
+u_intN_max(unsigned bit_size)
+{
+ assert(bit_size <= 64 && bit_size > 0);
+ return INT64_MAX >> (64 - bit_size);
+}
+
+static inline int64_t
+u_intN_min(unsigned bit_size)
+{
+ /* On 2's compliment platforms, which is every platform Mesa is likely to
+ * every worry about, stdint.h generally calculated INT##_MIN in this
+ * manner.
+ */
+ return (-u_intN_max(bit_size)) - 1;
+}
+
+static inline uint64_t
+u_uintN_max(unsigned bit_size)
+{
+ assert(bit_size <= 64 && bit_size > 0);
+ return UINT64_MAX >> (64 - bit_size);
+}
+
+/* TODO: In future we should try to move this to u_debug.h once header
+ * dependencies are reorganised to allow this.
+ */
+enum pipe_debug_type
+{
+ PIPE_DEBUG_TYPE_OUT_OF_MEMORY = 1,
+ PIPE_DEBUG_TYPE_ERROR,
+ PIPE_DEBUG_TYPE_SHADER_INFO,
+ PIPE_DEBUG_TYPE_PERF_INFO,
+ PIPE_DEBUG_TYPE_INFO,
+ PIPE_DEBUG_TYPE_FALLBACK,
+ PIPE_DEBUG_TYPE_CONFORMANCE,
+};
+
+#if !defined(alignof) && !defined(__cplusplus)
+#if __STDC_VERSION__ >= 201112L
+#define alignof(t) _Alignof(t)
+#elif defined(_MSC_VER)
+#define alignof(t) __alignof(t)
+#else
+#define alignof(t) __alignof__(t)
+#endif
+#endif
+
+/* Macros for static type-safety checking.
+ *
+ * https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+ */
+
+#if __has_attribute(capability)
+typedef int __attribute__((capability("mutex"))) lock_cap_t;
+
+#define guarded_by(l) __attribute__((guarded_by(l)))
+#define acquire_cap(l) __attribute((acquire_capability(l), no_thread_safety_analysis))
+#define release_cap(l) __attribute((release_capability(l), no_thread_safety_analysis))
+#define assert_cap(l) __attribute((assert_capability(l), no_thread_safety_analysis))
+#define requires_cap(l) __attribute((requires_capability(l)))
+#define disable_thread_safety_analysis __attribute((no_thread_safety_analysis))
+
+#else
+
+typedef int lock_cap_t;
+
+#define guarded_by(l)
+#define acquire_cap(l)
+#define release_cap(l)
+#define assert_cap(l)
+#define requires_cap(l)
+#define disable_thread_safety_analysis
+
+#endif
+
+#endif /* UTIL_MACROS_H */
diff --git a/src/mesa/util/os_file.c b/src/mesa/util/os_file.c
new file mode 100644
index 00000000..5f79284e
--- /dev/null
+++ b/src/mesa/util/os_file.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2019 Intel Corporation
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "os_file.h"
+#include "detect_os.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+
+#if DETECT_OS_WINDOWS
+#include <io.h>
+#define open _open
+#define fdopen _fdopen
+#define O_CREAT _O_CREAT
+#define O_EXCL _O_EXCL
+#define O_WRONLY _O_WRONLY
+#else
+#include <unistd.h>
+#ifndef F_DUPFD_CLOEXEC
+#define F_DUPFD_CLOEXEC 1030
+#endif
+#endif
+
+
+FILE *
+os_file_create_unique(const char *filename, int filemode)
+{
+ int fd = open(filename, O_CREAT | O_EXCL | O_WRONLY, filemode);
+ if (fd == -1)
+ return NULL;
+ return fdopen(fd, "w");
+}
+
+
+#if DETECT_OS_WINDOWS
+int
+os_dupfd_cloexec(int fd)
+{
+ /*
+ * On Windows child processes don't inherit handles by default:
+ * https://devblogs.microsoft.com/oldnewthing/20111216-00/?p=8873
+ */
+ return dup(fd);
+}
+#else
+int
+os_dupfd_cloexec(int fd)
+{
+ int minfd = 3;
+ int newfd = fcntl(fd, F_DUPFD_CLOEXEC, minfd);
+
+ if (newfd >= 0)
+ return newfd;
+
+ if (errno != EINVAL)
+ return -1;
+
+ newfd = fcntl(fd, F_DUPFD, minfd);
+
+ if (newfd < 0)
+ return -1;
+
+ long flags = fcntl(newfd, F_GETFD);
+ if (flags == -1) {
+ close(newfd);
+ return -1;
+ }
+
+ if (fcntl(newfd, F_SETFD, flags | FD_CLOEXEC) == -1) {
+ close(newfd);
+ return -1;
+ }
+
+ return newfd;
+}
+#endif
+
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#if DETECT_OS_WINDOWS
+typedef ptrdiff_t ssize_t;
+#endif
+
+static ssize_t
+readN(int fd, char *buf, size_t len)
+{
+ /* err was initially set to -ENODATA but in some BSD systems
+ * ENODATA is not defined and ENOATTR is used instead.
+ * As err is not returned by any function it can be initialized
+ * to -EFAULT that exists everywhere.
+ */
+ int err = -EFAULT;
+ size_t total = 0;
+ do {
+ ssize_t ret = read(fd, buf + total, len - total);
+
+ if (ret < 0)
+ ret = -errno;
+
+ if (ret == -EINTR || ret == -EAGAIN)
+ continue;
+
+ if (ret <= 0) {
+ err = ret;
+ break;
+ }
+
+ total += ret;
+ } while (total != len);
+
+ return total ? (ssize_t)total : err;
+}
+
+#ifndef O_BINARY
+/* Unix makes no distinction between text and binary files. */
+#define O_BINARY 0
+#endif
+
+char *
+os_read_file(const char *filename, size_t *size)
+{
+ /* Note that this also serves as a slight margin to avoid a 2x grow when
+ * the file is just a few bytes larger when we read it than when we
+ * fstat'ed it.
+ * The string's NULL terminator is also included in here.
+ */
+ size_t len = 64;
+
+ int fd = open(filename, O_RDONLY | O_BINARY);
+ if (fd == -1) {
+ /* errno set by open() */
+ return NULL;
+ }
+
+ /* Pre-allocate a buffer at least the size of the file if we can read
+ * that information.
+ */
+ struct stat stat;
+ if (fstat(fd, &stat) == 0)
+ len += stat.st_size;
+
+ char *buf = malloc(len);
+ if (!buf) {
+ close(fd);
+ errno = -ENOMEM;
+ return NULL;
+ }
+
+ ssize_t actually_read;
+ size_t offset = 0, remaining = len - 1;
+ while ((actually_read = readN(fd, buf + offset, remaining)) == (ssize_t)remaining) {
+ char *newbuf = realloc(buf, 2 * len);
+ if (!newbuf) {
+ free(buf);
+ close(fd);
+ errno = -ENOMEM;
+ return NULL;
+ }
+
+ buf = newbuf;
+ len *= 2;
+ offset += actually_read;
+ remaining = len - offset - 1;
+ }
+
+ close(fd);
+
+ if (actually_read > 0)
+ offset += actually_read;
+
+ /* Final resize to actual size */
+ len = offset + 1;
+ char *newbuf = realloc(buf, len);
+ if (!newbuf) {
+ free(buf);
+ errno = -ENOMEM;
+ return NULL;
+ }
+ buf = newbuf;
+
+ buf[offset] = '\0';
+
+ if (size)
+ *size = offset;
+
+ return buf;
+}
+
+#if DETECT_OS_LINUX
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+/* copied from <linux/kcmp.h> */
+#define KCMP_FILE 0
+
+int
+os_same_file_description(int fd1, int fd2)
+{
+ pid_t pid = getpid();
+
+ /* Same file descriptor trivially implies same file description */
+ if (fd1 == fd2)
+ return 0;
+
+ return syscall(SYS_kcmp, pid, pid, KCMP_FILE, fd1, fd2);
+}
+
+#else
+
+int
+os_same_file_description(int fd1, int fd2)
+{
+ /* Same file descriptor trivially implies same file description */
+ if (fd1 == fd2)
+ return 0;
+
+ /* Otherwise we can't tell */
+ return -1;
+}
+
+#endif
diff --git a/src/mesa/util/os_file.h b/src/mesa/util/os_file.h
new file mode 100644
index 00000000..0c69eeaa
--- /dev/null
+++ b/src/mesa/util/os_file.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2019 Intel Corporation
+ * SPDX-License-Identifier: MIT
+ *
+ * File operations helpers
+ */
+
+#ifndef _OS_FILE_H_
+#define _OS_FILE_H_
+
+#include <stdbool.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Create a new file and opens it for writing-only.
+ * If the given filename already exists, nothing is done and NULL is returned.
+ * `errno` gets set to the failure reason; if that is not EEXIST, the caller
+ * might want to do something other than trying again.
+ */
+FILE *
+os_file_create_unique(const char *filename, int filemode);
+
+/*
+ * Duplicate a file descriptor, making sure not to keep it open after an exec*()
+ */
+int
+os_dupfd_cloexec(int fd);
+
+/*
+ * Read a file.
+ * Returns a char* that the caller must free(), or NULL and sets errno.
+ * If size is not null and no error occurred it's set to the size of the
+ * file.
+ * Reads files as binary and includes a NUL terminator after the end of the
+ * returned buffer.
+ */
+char *
+os_read_file(const char *filename, size_t *size);
+
+/*
+ * Try to determine if two file descriptors reference the same file description
+ *
+ * Return values:
+ * - 0: They reference the same file description
+ * - > 0: They do not reference the same file description
+ * - < 0: Unable to determine whether they reference the same file description
+ */
+int
+os_same_file_description(int fd1, int fd2);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OS_FILE_H_ */
diff --git a/src/gallium/auxiliary/os/os_memory.h b/src/mesa/util/os_memory.h
index 46a6b6e4..ad88e70f 100644
--- a/src/gallium/auxiliary/os/os_memory.h
+++ b/src/mesa/util/os_memory.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2010 Vmware, Inc.
+ * Copyright 2010 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,12 +34,7 @@
#ifndef _OS_MEMORY_H_
#define _OS_MEMORY_H_
-
-#include "pipe/p_config.h"
-#include "pipe/p_compiler.h"
-
-
-#if defined(PIPE_SUBSYSTEM_EMBEDDED)
+#if defined(EMBEDDED_DEVICE)
#ifdef __cplusplus
extern "C" {
@@ -63,14 +58,13 @@ os_malloc_aligned(size_t size, size_t alignment);
void
os_free_aligned(void *ptr);
+void *
+os_realloc_aligned(void *ptr, size_t oldsize, size_t newsize, size_t alignemnt);
+
#ifdef __cplusplus
}
#endif
-#elif defined(PIPE_OS_WINDOWS) && defined(DEBUG) && !defined(DEBUG_MEMORY_IMPLEMENTATION)
-
-# include "os_memory_debug.h"
-
#else
# include "os_memory_stdc.h"
diff --git a/src/mesa/util/os_memory_aligned.h b/src/mesa/util/os_memory_aligned.h
new file mode 100644
index 00000000..08f12062
--- /dev/null
+++ b/src/mesa/util/os_memory_aligned.h
@@ -0,0 +1,128 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * Memory alignment wrappers.
+ */
+
+
+#ifndef _OS_MEMORY_H_
+#error "Must not be included directly. Include os_memory.h instead"
+#endif
+
+
+/**
+ * Add two size_t values with integer overflow check.
+ * TODO: leverage __builtin_add_overflow where available
+ */
+static inline bool
+add_overflow_size_t(size_t a, size_t b, size_t *res)
+{
+ *res = a + b;
+ return *res < a || *res < b;
+}
+
+
+#if defined(HAVE_POSIX_MEMALIGN)
+
+static inline void *
+os_malloc_aligned(size_t size, size_t alignment)
+{
+ void *ptr;
+ alignment = (alignment + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
+ if(posix_memalign(&ptr, alignment, size) != 0)
+ return NULL;
+ return ptr;
+}
+
+#define os_free_aligned(_ptr) free(_ptr)
+
+#else
+
+/**
+ * Return memory on given byte alignment
+ */
+static inline void *
+os_malloc_aligned(size_t size, size_t alignment)
+{
+ char *ptr, *buf;
+ size_t alloc_size;
+
+ /*
+ * Calculate
+ *
+ * alloc_size = size + alignment + sizeof(void *)
+ *
+ * while checking for overflow.
+ */
+ if (add_overflow_size_t(size, alignment, &alloc_size) ||
+ add_overflow_size_t(alloc_size, sizeof(void *), &alloc_size)) {
+ return NULL;
+ }
+
+ ptr = (char *) os_malloc(alloc_size);
+ if (!ptr)
+ return NULL;
+
+ buf = (char *)(((uintptr_t)ptr + sizeof(void *) + alignment - 1) & ~((uintptr_t)(alignment - 1)));
+ *(char **)(buf - sizeof(void *)) = ptr;
+
+ return buf;
+}
+
+
+/**
+ * Free memory returned by os_malloc_aligned().
+ */
+static inline void
+os_free_aligned(void *ptr)
+{
+ if (ptr) {
+ void **cubbyHole = (void **) ((char *) ptr - sizeof(void *));
+ void *realAddr = *cubbyHole;
+ os_free(realAddr);
+ }
+}
+
+#endif
+
+/**
+ * Reallocate memeory, with alignment
+ */
+static inline void *
+os_realloc_aligned(void *ptr, size_t oldsize, size_t newsize, size_t alignment)
+{
+ const size_t copySize = MIN2(oldsize, newsize);
+ void *newBuf = os_malloc_aligned(newsize, alignment);
+ if (newBuf && ptr && copySize > 0) {
+ memcpy(newBuf, ptr, copySize);
+ }
+
+ os_free_aligned(ptr);
+ return newBuf;
+}
diff --git a/src/gallium/auxiliary/os/os_memory_stdc.h b/src/mesa/util/os_memory_stdc.h
index c9fde06d..bda57159 100644
--- a/src/gallium/auxiliary/os/os_memory_stdc.h
+++ b/src/mesa/util/os_memory_stdc.h
@@ -37,8 +37,6 @@
#include <stdlib.h>
-#include "pipe/p_compiler.h"
-
#define os_malloc(_size) malloc(_size)
#define os_calloc(_count, _size ) calloc(_count, _size )
@@ -47,27 +45,13 @@
#define os_realloc( _old_ptr, _old_size, _new_size) \
realloc(_old_ptr, _new_size + 0*(_old_size))
-
-#if defined(HAVE_POSIX_MEMALIGN)
-
-static inline void *
-os_malloc_aligned(size_t size, size_t alignment)
-{
- void *ptr;
- alignment = (alignment + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
- if(posix_memalign(&ptr, alignment, size) != 0)
- return NULL;
- return ptr;
-}
-
-#define os_free_aligned(_ptr) free(_ptr)
-
-#elif defined(PIPE_OS_WINDOWS)
+#if DETECT_OS_WINDOWS
#include <malloc.h>
#define os_malloc_aligned(_size, _align) _aligned_malloc(_size, _align)
#define os_free_aligned(_ptr) _aligned_free(_ptr)
+#define os_realloc_aligned(_ptr, _oldsize, _newsize, _alignment) _aligned_realloc(_ptr, _newsize, _alignment)
#else
diff --git a/src/mesa/util/os_misc.c b/src/mesa/util/os_misc.c
new file mode 100644
index 00000000..31f1c55d
--- /dev/null
+++ b/src/mesa/util/os_misc.c
@@ -0,0 +1,361 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "os_misc.h"
+#include "os_file.h"
+#include "macros.h"
+
+#include <stdarg.h>
+
+
+#if DETECT_OS_WINDOWS
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
+#endif
+#include <windows.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#else
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#endif
+
+
+#if DETECT_OS_ANDROID
+# define LOG_TAG "MESA"
+# include <unistd.h>
+# include <log/log.h>
+# include <cutils/properties.h>
+#elif DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS || DETECT_OS_HURD
+# include <unistd.h>
+#elif DETECT_OS_OPENBSD || DETECT_OS_FREEBSD
+# include <sys/resource.h>
+# include <sys/sysctl.h>
+#elif DETECT_OS_APPLE || DETECT_OS_BSD
+# include <sys/sysctl.h>
+#elif DETECT_OS_HAIKU
+# include <kernel/OS.h>
+#elif DETECT_OS_WINDOWS
+# include <windows.h>
+#else
+#error unexpected platform in os_sysinfo.c
+#endif
+
+
+void
+os_log_message(const char *message)
+{
+ /* If the GALLIUM_LOG_FILE environment variable is set to a valid filename,
+ * write all messages to that file.
+ */
+ static FILE *fout = NULL;
+
+ if (!fout) {
+#ifdef DEBUG
+ /* one-time init */
+ const char *filename = os_get_option("GALLIUM_LOG_FILE");
+ if (filename) {
+ const char *mode = "w";
+ if (filename[0] == '+') {
+ /* If the filename is prefixed with '+' then open the file for
+ * appending instead of normal writing.
+ */
+ mode = "a";
+ filename++; /* skip the '+' */
+ }
+ fout = fopen(filename, mode);
+ }
+#endif
+ if (!fout)
+ fout = stderr;
+ }
+
+#if DETECT_OS_WINDOWS
+ OutputDebugStringA(message);
+ if(GetConsoleWindow() && !IsDebuggerPresent()) {
+ fflush(stdout);
+ fputs(message, fout);
+ fflush(fout);
+ }
+ else if (fout != stderr) {
+ fputs(message, fout);
+ fflush(fout);
+ }
+#else /* !DETECT_OS_WINDOWS */
+ fflush(stdout);
+ fputs(message, fout);
+ fflush(fout);
+# if DETECT_OS_ANDROID
+ LOG_PRI(ANDROID_LOG_ERROR, LOG_TAG, "%s", message);
+# endif
+#endif
+}
+
+#if DETECT_OS_ANDROID
+# include <ctype.h>
+# include "hash_table.h"
+# include "ralloc.h"
+# include "simple_mtx.h"
+
+static struct hash_table *options_tbl;
+
+static void
+options_tbl_fini(void)
+{
+ _mesa_hash_table_destroy(options_tbl, NULL);
+}
+
+/**
+ * Get an option value from android's property system, as a fallback to
+ * getenv() (which is generally less useful on android due to processes
+ * typically being forked from the zygote.
+ *
+ * The option name used for getenv is translated into a property name
+ * by:
+ *
+ * 1) convert to lowercase
+ * 2) replace '_' with '.'
+ * 3) if necessary, prepend "mesa."
+ *
+ * For example:
+ * - MESA_EXTENSION_OVERRIDE -> mesa.extension.override
+ * - GALLIUM_HUD -> mesa.gallium.hud
+ *
+ * Note that we use a hashtable for two purposes:
+ * 1) Avoid re-translating the option name on subsequent lookups
+ * 2) Avoid leaking memory. Because property_get() returns the
+ * property value into a user allocated buffer, we cannot return
+ * that directly to the caller, so we need to strdup(). With the
+ * hashtable, subsquent lookups can return the existing string.
+ */
+static const char *
+os_get_android_option(const char *name)
+{
+ if (!options_tbl) {
+ options_tbl = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+ atexit(options_tbl_fini);
+ }
+
+ struct hash_entry *entry = _mesa_hash_table_search(options_tbl, name);
+ if (entry) {
+ return entry->data;
+ }
+
+ char value[PROPERTY_VALUE_MAX];
+ char key[PROPERTY_KEY_MAX];
+ char *p = key, *end = key + PROPERTY_KEY_MAX;
+ /* add "mesa." prefix if necessary: */
+ if (strstr(name, "MESA_") != name)
+ p += strlcpy(p, "mesa.", end - p);
+ p += strlcpy(p, name, end - p);
+ for (int i = 0; key[i]; i++) {
+ if (key[i] == '_') {
+ key[i] = '.';
+ } else {
+ key[i] = tolower(key[i]);
+ }
+ }
+
+ const char *opt = NULL;
+ int len = property_get(key, value, NULL);
+ if (len > 1) {
+ opt = ralloc_strdup(options_tbl, value);
+ }
+
+ _mesa_hash_table_insert(options_tbl, name, (void *)opt);
+
+ return opt;
+}
+#endif
+
+
+#if !defined(EMBEDDED_DEVICE)
+const char *
+os_get_option(const char *name)
+{
+ const char *opt = getenv(name);
+#if DETECT_OS_ANDROID
+ if (!opt) {
+ opt = os_get_android_option(name);
+ }
+#endif
+ return opt;
+}
+#endif /* !EMBEDDED_DEVICE */
+
+/**
+ * Return the size of the total physical memory.
+ * \param size returns the size of the total physical memory
+ * \return true for success, or false on failure
+ */
+bool
+os_get_total_physical_memory(uint64_t *size)
+{
+#if DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS || DETECT_OS_HURD
+ const long phys_pages = sysconf(_SC_PHYS_PAGES);
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+
+ if (phys_pages <= 0 || page_size <= 0)
+ return false;
+
+ *size = (uint64_t)phys_pages * (uint64_t)page_size;
+ return true;
+#elif DETECT_OS_APPLE || DETECT_OS_BSD
+ size_t len = sizeof(*size);
+ int mib[2];
+
+ mib[0] = CTL_HW;
+#if DETECT_OS_APPLE
+ mib[1] = HW_MEMSIZE;
+#elif DETECT_OS_NETBSD || DETECT_OS_OPENBSD
+ mib[1] = HW_PHYSMEM64;
+#elif DETECT_OS_FREEBSD
+ mib[1] = HW_REALMEM;
+#elif DETECT_OS_DRAGONFLY
+ mib[1] = HW_PHYSMEM;
+#else
+#error Unsupported *BSD
+#endif
+
+ return (sysctl(mib, 2, size, &len, NULL, 0) == 0);
+#elif DETECT_OS_HAIKU
+ system_info info;
+ status_t ret;
+
+ ret = get_system_info(&info);
+ if (ret != B_OK || info.max_pages <= 0)
+ return false;
+
+ *size = (uint64_t)info.max_pages * (uint64_t)B_PAGE_SIZE;
+ return true;
+#elif DETECT_OS_WINDOWS
+ MEMORYSTATUSEX status;
+ BOOL ret;
+
+ status.dwLength = sizeof(status);
+ ret = GlobalMemoryStatusEx(&status);
+ *size = status.ullTotalPhys;
+ return (ret == TRUE);
+#else
+#error unexpected platform in os_sysinfo.c
+ return false;
+#endif
+}
+
+bool
+os_get_available_system_memory(uint64_t *size)
+{
+#if DETECT_OS_LINUX
+ char *meminfo = os_read_file("/proc/meminfo", NULL);
+ if (!meminfo)
+ return false;
+
+ char *str = strstr(meminfo, "MemAvailable:");
+ if (!str) {
+ free(meminfo);
+ return false;
+ }
+
+ uint64_t kb_mem_available;
+ if (sscanf(str, "MemAvailable: %" PRIu64, &kb_mem_available) == 1) {
+ free(meminfo);
+ *size = kb_mem_available << 10;
+ return true;
+ }
+
+ free(meminfo);
+ return false;
+#elif DETECT_OS_OPENBSD || DETECT_OS_FREEBSD
+ struct rlimit rl;
+#if DETECT_OS_OPENBSD
+ int mib[] = { CTL_HW, HW_USERMEM64 };
+#elif DETECT_OS_FREEBSD
+ int mib[] = { CTL_HW, HW_USERMEM };
+#endif
+ int64_t mem_available;
+ size_t len = sizeof(mem_available);
+
+ /* physmem - wired */
+ if (sysctl(mib, 2, &mem_available, &len, NULL, 0) == -1)
+ return false;
+
+ /* static login.conf limit */
+ if (getrlimit(RLIMIT_DATA, &rl) == -1)
+ return false;
+
+ *size = MIN2(mem_available, rl.rlim_cur);
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * Return the size of a page
+ * \param size returns the size of a page
+ * \return true for success, or false on failure
+ */
+bool
+os_get_page_size(uint64_t *size)
+{
+#if DETECT_OS_UNIX && !DETECT_OS_APPLE && !DETECT_OS_HAIKU
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+
+ if (page_size <= 0)
+ return false;
+
+ *size = (uint64_t)page_size;
+ return true;
+#elif DETECT_OS_HAIKU
+ *size = (uint64_t)B_PAGE_SIZE;
+ return true;
+#elif DETECT_OS_WINDOWS
+ SYSTEM_INFO SysInfo;
+
+ GetSystemInfo(&SysInfo);
+ *size = SysInfo.dwPageSize;
+ return true;
+#elif DETECT_OS_APPLE
+ size_t len = sizeof(*size);
+ int mib[2];
+
+ mib[0] = CTL_HW;
+ mib[1] = HW_PAGESIZE;
+ return (sysctl(mib, 2, size, &len, NULL, 0) == 0);
+#else
+#error unexpected platform in os_sysinfo.c
+ return false;
+#endif
+}
diff --git a/src/gallium/auxiliary/os/os_misc.h b/src/mesa/util/os_misc.h
index a89256e8..432bfe1a 100644
--- a/src/gallium/auxiliary/os/os_misc.h
+++ b/src/mesa/util/os_misc.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2010 Vmware, Inc.
+ * Copyright 2010 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,26 +34,17 @@
#ifndef _OS_MISC_H_
#define _OS_MISC_H_
+#include <stdint.h>
+#include <stdbool.h>
-#include "pipe/p_compiler.h"
+#include "detect_os.h"
-#if defined(PIPE_OS_UNIX)
+#if DETECT_OS_UNIX
# include <signal.h> /* for kill() */
# include <unistd.h> /* getpid() */
#endif
-#ifdef __GNUC__
-#define UNUSED __attribute__((unused))
-#ifdef NDEBUG
-#define MAYBE_UNUSED __attribute__((unused))
-#else
-#define MAYBE_UNUSED
-#endif
-#else
-#define UNUSED
-#define MAYBE_UNUSED
-#endif
#ifdef __cplusplus
extern "C" {
@@ -67,7 +58,7 @@ extern "C" {
# define os_break() __asm("int3")
#elif defined(PIPE_CC_MSVC)
# define os_break() __debugbreak()
-#elif defined(PIPE_OS_UNIX)
+#elif DETECT_OS_UNIX
# define os_break() kill(getpid(), SIGTRAP)
#else
# define os_break() abort()
@@ -78,7 +69,7 @@ extern "C" {
* Abort the program.
*/
#if defined(DEBUG)
-# define os_abort() os_break()
+# define os_abort() do { os_break(); abort(); } while(0)
#else
# define os_abort() abort()
#endif
@@ -98,6 +89,25 @@ const char *
os_get_option(const char *name);
+/*
+ * Get the total amount of physical memory available on the system.
+ */
+bool
+os_get_total_physical_memory(uint64_t *size);
+
+/*
+ * Amount of physical memory available to a process
+ */
+bool
+os_get_available_system_memory(uint64_t *size);
+
+/*
+ * Size of a page
+ */
+bool
+os_get_page_size(uint64_t *size);
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/mesa/util/ralloc.c b/src/mesa/util/ralloc.c
new file mode 100644
index 00000000..4c2cf077
--- /dev/null
+++ b/src/mesa/util/ralloc.c
@@ -0,0 +1,936 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+
+#include "util/macros.h"
+#include "util/u_math.h"
+
+/* Some versions of MinGW are missing _vscprintf's declaration, although they
+ * still provide the symbol in the import library. */
+#ifdef __MINGW32__
+_CRTIMP int _vscprintf(const char *format, va_list argptr);
+#endif
+
+#include "ralloc.h"
+
+#ifndef va_copy
+#ifdef __va_copy
+#define va_copy(dest, src) __va_copy((dest), (src))
+#else
+#define va_copy(dest, src) (dest) = (src)
+#endif
+#endif
+
+#define CANARY 0x5A1106
+
+/* Align the header's size so that ralloc() allocations will return with the
+ * same alignment as a libc malloc would have (8 on 32-bit GLIBC, 16 on
+ * 64-bit), avoiding performance penalities on x86 and alignment faults on
+ * ARM.
+ */
+struct
+#ifdef _MSC_VER
+#if _WIN64
+__declspec(align(16))
+#else
+ __declspec(align(8))
+#endif
+#elif defined(__LP64__)
+ __attribute__((aligned(16)))
+#else
+ __attribute__((aligned(8)))
+#endif
+ ralloc_header
+{
+#ifndef NDEBUG
+ /* A canary value used to determine whether a pointer is ralloc'd. */
+ unsigned canary;
+#endif
+
+ struct ralloc_header *parent;
+
+ /* The first child (head of a linked list) */
+ struct ralloc_header *child;
+
+ /* Linked list of siblings */
+ struct ralloc_header *prev;
+ struct ralloc_header *next;
+
+ void (*destructor)(void *);
+};
+
+typedef struct ralloc_header ralloc_header;
+
+static void unlink_block(ralloc_header *info);
+static void unsafe_free(ralloc_header *info);
+
+static ralloc_header *
+get_header(const void *ptr)
+{
+ ralloc_header *info = (ralloc_header *) (((char *) ptr) -
+ sizeof(ralloc_header));
+ assert(info->canary == CANARY);
+ return info;
+}
+
+#define PTR_FROM_HEADER(info) (((char *) info) + sizeof(ralloc_header))
+
+static void
+add_child(ralloc_header *parent, ralloc_header *info)
+{
+ if (parent != NULL) {
+ info->parent = parent;
+ info->next = parent->child;
+ parent->child = info;
+
+ if (info->next != NULL)
+ info->next->prev = info;
+ }
+}
+
+void *
+ralloc_context(const void *ctx)
+{
+ return ralloc_size(ctx, 0);
+}
+
+void *
+ralloc_size(const void *ctx, size_t size)
+{
+ /* Some malloc allocation doesn't always align to 16 bytes even on 64 bits
+ * system, from Android bionic/tests/malloc_test.cpp:
+ * - Allocations of a size that rounds up to a multiple of 16 bytes
+ * must have at least 16 byte alignment.
+ * - Allocations of a size that rounds up to a multiple of 8 bytes and
+ * not 16 bytes, are only required to have at least 8 byte alignment.
+ */
+ void *block = malloc(align64(size + sizeof(ralloc_header),
+ alignof(ralloc_header)));
+ ralloc_header *info;
+ ralloc_header *parent;
+
+ if (unlikely(block == NULL))
+ return NULL;
+
+ info = (ralloc_header *) block;
+ /* measurements have shown that calloc is slower (because of
+ * the multiplication overflow checking?), so clear things
+ * manually
+ */
+ info->parent = NULL;
+ info->child = NULL;
+ info->prev = NULL;
+ info->next = NULL;
+ info->destructor = NULL;
+
+ parent = ctx != NULL ? get_header(ctx) : NULL;
+
+ add_child(parent, info);
+
+#ifndef NDEBUG
+ info->canary = CANARY;
+#endif
+
+ return PTR_FROM_HEADER(info);
+}
+
+void *
+rzalloc_size(const void *ctx, size_t size)
+{
+ void *ptr = ralloc_size(ctx, size);
+
+ if (likely(ptr))
+ memset(ptr, 0, size);
+
+ return ptr;
+}
+
+/* helper function - assumes ptr != NULL */
+static void *
+resize(void *ptr, size_t size)
+{
+ ralloc_header *child, *old, *info;
+
+ old = get_header(ptr);
+ info = realloc(old, align64(size + sizeof(ralloc_header),
+ alignof(ralloc_header)));
+
+ if (info == NULL)
+ return NULL;
+
+ /* Update parent and sibling's links to the reallocated node. */
+ if (info != old && info->parent != NULL) {
+ if (info->parent->child == old)
+ info->parent->child = info;
+
+ if (info->prev != NULL)
+ info->prev->next = info;
+
+ if (info->next != NULL)
+ info->next->prev = info;
+ }
+
+ /* Update child->parent links for all children */
+ for (child = info->child; child != NULL; child = child->next)
+ child->parent = info;
+
+ return PTR_FROM_HEADER(info);
+}
+
+void *
+reralloc_size(const void *ctx, void *ptr, size_t size)
+{
+ if (unlikely(ptr == NULL))
+ return ralloc_size(ctx, size);
+
+ assert(ralloc_parent(ptr) == ctx);
+ return resize(ptr, size);
+}
+
+void *
+rerzalloc_size(const void *ctx, void *ptr, size_t old_size, size_t new_size)
+{
+ if (unlikely(ptr == NULL))
+ return rzalloc_size(ctx, new_size);
+
+ assert(ralloc_parent(ptr) == ctx);
+ ptr = resize(ptr, new_size);
+
+ if (new_size > old_size)
+ memset((char *)ptr + old_size, 0, new_size - old_size);
+
+ return ptr;
+}
+
+void *
+ralloc_array_size(const void *ctx, size_t size, unsigned count)
+{
+ if (count > SIZE_MAX/size)
+ return NULL;
+
+ return ralloc_size(ctx, size * count);
+}
+
+void *
+rzalloc_array_size(const void *ctx, size_t size, unsigned count)
+{
+ if (count > SIZE_MAX/size)
+ return NULL;
+
+ return rzalloc_size(ctx, size * count);
+}
+
+void *
+reralloc_array_size(const void *ctx, void *ptr, size_t size, unsigned count)
+{
+ if (count > SIZE_MAX/size)
+ return NULL;
+
+ return reralloc_size(ctx, ptr, size * count);
+}
+
+void *
+rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
+ unsigned old_count, unsigned new_count)
+{
+ if (new_count > SIZE_MAX/size)
+ return NULL;
+
+ return rerzalloc_size(ctx, ptr, size * old_count, size * new_count);
+}
+
+void
+ralloc_free(void *ptr)
+{
+ ralloc_header *info;
+
+ if (ptr == NULL)
+ return;
+
+ info = get_header(ptr);
+ unlink_block(info);
+ unsafe_free(info);
+}
+
+static void
+unlink_block(ralloc_header *info)
+{
+ /* Unlink from parent & siblings */
+ if (info->parent != NULL) {
+ if (info->parent->child == info)
+ info->parent->child = info->next;
+
+ if (info->prev != NULL)
+ info->prev->next = info->next;
+
+ if (info->next != NULL)
+ info->next->prev = info->prev;
+ }
+ info->parent = NULL;
+ info->prev = NULL;
+ info->next = NULL;
+}
+
+static void
+unsafe_free(ralloc_header *info)
+{
+ /* Recursively free any children...don't waste time unlinking them. */
+ ralloc_header *temp;
+ while (info->child != NULL) {
+ temp = info->child;
+ info->child = temp->next;
+ unsafe_free(temp);
+ }
+
+ /* Free the block itself. Call the destructor first, if any. */
+ if (info->destructor != NULL)
+ info->destructor(PTR_FROM_HEADER(info));
+
+ free(info);
+}
+
+void
+ralloc_steal(const void *new_ctx, void *ptr)
+{
+ ralloc_header *info, *parent;
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ info = get_header(ptr);
+ parent = new_ctx ? get_header(new_ctx) : NULL;
+
+ unlink_block(info);
+
+ add_child(parent, info);
+}
+
+void
+ralloc_adopt(const void *new_ctx, void *old_ctx)
+{
+ ralloc_header *new_info, *old_info, *child;
+
+ if (unlikely(old_ctx == NULL))
+ return;
+
+ old_info = get_header(old_ctx);
+ new_info = get_header(new_ctx);
+
+ /* If there are no children, bail. */
+ if (unlikely(old_info->child == NULL))
+ return;
+
+ /* Set all the children's parent to new_ctx; get a pointer to the last child. */
+ for (child = old_info->child; child->next != NULL; child = child->next) {
+ child->parent = new_info;
+ }
+ child->parent = new_info;
+
+ /* Connect the two lists together; parent them to new_ctx; make old_ctx empty. */
+ child->next = new_info->child;
+ if (child->next)
+ child->next->prev = child;
+ new_info->child = old_info->child;
+ old_info->child = NULL;
+}
+
+void *
+ralloc_parent(const void *ptr)
+{
+ ralloc_header *info;
+
+ if (unlikely(ptr == NULL))
+ return NULL;
+
+ info = get_header(ptr);
+ return info->parent ? PTR_FROM_HEADER(info->parent) : NULL;
+}
+
+void
+ralloc_set_destructor(const void *ptr, void(*destructor)(void *))
+{
+ ralloc_header *info = get_header(ptr);
+ info->destructor = destructor;
+}
+
+char *
+ralloc_strdup(const void *ctx, const char *str)
+{
+ size_t n;
+ char *ptr;
+
+ if (unlikely(str == NULL))
+ return NULL;
+
+ n = strlen(str);
+ ptr = ralloc_array(ctx, char, n + 1);
+ memcpy(ptr, str, n);
+ ptr[n] = '\0';
+ return ptr;
+}
+
+char *
+ralloc_strndup(const void *ctx, const char *str, size_t max)
+{
+ size_t n;
+ char *ptr;
+
+ if (unlikely(str == NULL))
+ return NULL;
+
+ n = strnlen(str, max);
+ ptr = ralloc_array(ctx, char, n + 1);
+ memcpy(ptr, str, n);
+ ptr[n] = '\0';
+ return ptr;
+}
+
+/* helper routine for strcat/strncat - n is the exact amount to copy */
+static bool
+cat(char **dest, const char *str, size_t n)
+{
+ char *both;
+ size_t existing_length;
+ assert(dest != NULL && *dest != NULL);
+
+ existing_length = strlen(*dest);
+ both = resize(*dest, existing_length + n + 1);
+ if (unlikely(both == NULL))
+ return false;
+
+ memcpy(both + existing_length, str, n);
+ both[existing_length + n] = '\0';
+
+ *dest = both;
+ return true;
+}
+
+
+bool
+ralloc_strcat(char **dest, const char *str)
+{
+ return cat(dest, str, strlen(str));
+}
+
+bool
+ralloc_strncat(char **dest, const char *str, size_t n)
+{
+ return cat(dest, str, strnlen(str, n));
+}
+
+bool
+ralloc_str_append(char **dest, const char *str,
+ size_t existing_length, size_t str_size)
+{
+ char *both;
+ assert(dest != NULL && *dest != NULL);
+
+ both = resize(*dest, existing_length + str_size + 1);
+ if (unlikely(both == NULL))
+ return false;
+
+ memcpy(both + existing_length, str, str_size);
+ both[existing_length + str_size] = '\0';
+
+ *dest = both;
+
+ return true;
+}
+
+char *
+ralloc_asprintf(const void *ctx, const char *fmt, ...)
+{
+ char *ptr;
+ va_list args;
+ va_start(args, fmt);
+ ptr = ralloc_vasprintf(ctx, fmt, args);
+ va_end(args);
+ return ptr;
+}
+
+/* Return the length of the string that would be generated by a printf-style
+ * format and argument list, not including the \0 byte.
+ */
+static size_t
+printf_length(const char *fmt, va_list untouched_args)
+{
+ int size;
+ char junk;
+
+ /* Make a copy of the va_list so the original caller can still use it */
+ va_list args;
+ va_copy(args, untouched_args);
+
+#ifdef _WIN32
+ /* We need to use _vcsprintf to calculate the size as vsnprintf returns -1
+ * if the number of characters to write is greater than count.
+ */
+ size = _vscprintf(fmt, args);
+ (void)junk;
+#else
+ size = vsnprintf(&junk, 1, fmt, args);
+#endif
+ assert(size >= 0);
+
+ va_end(args);
+
+ return size;
+}
+
+char *
+ralloc_vasprintf(const void *ctx, const char *fmt, va_list args)
+{
+ size_t size = printf_length(fmt, args) + 1;
+
+ char *ptr = ralloc_size(ctx, size);
+ if (ptr != NULL)
+ vsnprintf(ptr, size, fmt, args);
+
+ return ptr;
+}
+
+bool
+ralloc_asprintf_append(char **str, const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = ralloc_vasprintf_append(str, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+ralloc_vasprintf_append(char **str, const char *fmt, va_list args)
+{
+ size_t existing_length;
+ assert(str != NULL);
+ existing_length = *str ? strlen(*str) : 0;
+ return ralloc_vasprintf_rewrite_tail(str, &existing_length, fmt, args);
+}
+
+bool
+ralloc_asprintf_rewrite_tail(char **str, size_t *start, const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = ralloc_vasprintf_rewrite_tail(str, start, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
+ va_list args)
+{
+ size_t new_length;
+ char *ptr;
+
+ assert(str != NULL);
+
+ if (unlikely(*str == NULL)) {
+ // Assuming a NULL context is probably bad, but it's expected behavior.
+ *str = ralloc_vasprintf(NULL, fmt, args);
+ *start = strlen(*str);
+ return true;
+ }
+
+ new_length = printf_length(fmt, args);
+
+ ptr = resize(*str, *start + new_length + 1);
+ if (unlikely(ptr == NULL))
+ return false;
+
+ vsnprintf(ptr + *start, new_length + 1, fmt, args);
+ *str = ptr;
+ *start += new_length;
+ return true;
+}
+
+/***************************************************************************
+ * Linear allocator for short-lived allocations.
+ ***************************************************************************
+ *
+ * The allocator consists of a parent node (2K buffer), which requires
+ * a ralloc parent, and child nodes (allocations). Child nodes can't be freed
+ * directly, because the parent doesn't track them. You have to release
+ * the parent node in order to release all its children.
+ *
+ * The allocator uses a fixed-sized buffer with a monotonically increasing
+ * offset after each allocation. If the buffer is all used, another buffer
+ * is allocated, sharing the same ralloc parent, so all buffers are at
+ * the same level in the ralloc hierarchy.
+ *
+ * The linear parent node is always the first buffer and keeps track of all
+ * other buffers.
+ */
+
+#define MIN_LINEAR_BUFSIZE 2048
+#define SUBALLOC_ALIGNMENT 8
+#define LMAGIC 0x87b9c7d3
+
+struct
+#ifdef _MSC_VER
+ __declspec(align(8))
+#elif defined(__LP64__)
+ __attribute__((aligned(16)))
+#else
+ __attribute__((aligned(8)))
+#endif
+ linear_header {
+#ifndef NDEBUG
+ unsigned magic; /* for debugging */
+#endif
+ unsigned offset; /* points to the first unused byte in the buffer */
+ unsigned size; /* size of the buffer */
+ void *ralloc_parent; /* new buffers will use this */
+ struct linear_header *next; /* next buffer if we have more */
+ struct linear_header *latest; /* the only buffer that has free space */
+
+ /* After this structure, the buffer begins.
+ * Each suballocation consists of linear_size_chunk as its header followed
+ * by the suballocation, so it goes:
+ *
+ * - linear_size_chunk
+ * - allocated space
+ * - linear_size_chunk
+ * - allocated space
+ * etc.
+ *
+ * linear_size_chunk is only needed by linear_realloc.
+ */
+};
+
+struct linear_size_chunk {
+ unsigned size; /* for realloc */
+ unsigned _padding;
+};
+
+typedef struct linear_header linear_header;
+typedef struct linear_size_chunk linear_size_chunk;
+
+#define LINEAR_PARENT_TO_HEADER(parent) \
+ (linear_header*) \
+ ((char*)(parent) - sizeof(linear_size_chunk) - sizeof(linear_header))
+
+/* Allocate the linear buffer with its header. */
+static linear_header *
+create_linear_node(void *ralloc_ctx, unsigned min_size)
+{
+ linear_header *node;
+
+ min_size += sizeof(linear_size_chunk);
+
+ if (likely(min_size < MIN_LINEAR_BUFSIZE))
+ min_size = MIN_LINEAR_BUFSIZE;
+
+ node = ralloc_size(ralloc_ctx, sizeof(linear_header) + min_size);
+ if (unlikely(!node))
+ return NULL;
+
+#ifndef NDEBUG
+ node->magic = LMAGIC;
+#endif
+ node->offset = 0;
+ node->size = min_size;
+ node->ralloc_parent = ralloc_ctx;
+ node->next = NULL;
+ node->latest = node;
+ return node;
+}
+
+void *
+linear_alloc_child(void *parent, unsigned size)
+{
+ linear_header *first = LINEAR_PARENT_TO_HEADER(parent);
+ linear_header *latest = first->latest;
+ linear_header *new_node;
+ linear_size_chunk *ptr;
+ unsigned full_size;
+
+ assert(first->magic == LMAGIC);
+ assert(!latest->next);
+
+ size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
+ full_size = sizeof(linear_size_chunk) + size;
+
+ if (unlikely(latest->offset + full_size > latest->size)) {
+ /* allocate a new node */
+ new_node = create_linear_node(latest->ralloc_parent, size);
+ if (unlikely(!new_node))
+ return NULL;
+
+ first->latest = new_node;
+ latest->latest = new_node;
+ latest->next = new_node;
+ latest = new_node;
+ }
+
+ ptr = (linear_size_chunk *)((char*)&latest[1] + latest->offset);
+ ptr->size = size;
+ latest->offset += full_size;
+
+ assert((uintptr_t)&ptr[1] % SUBALLOC_ALIGNMENT == 0);
+ return &ptr[1];
+}
+
+void *
+linear_alloc_parent(void *ralloc_ctx, unsigned size)
+{
+ linear_header *node;
+
+ if (unlikely(!ralloc_ctx))
+ return NULL;
+
+ size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
+
+ node = create_linear_node(ralloc_ctx, size);
+ if (unlikely(!node))
+ return NULL;
+
+ return linear_alloc_child((char*)node +
+ sizeof(linear_header) +
+ sizeof(linear_size_chunk), size);
+}
+
+void *
+linear_zalloc_child(void *parent, unsigned size)
+{
+ void *ptr = linear_alloc_child(parent, size);
+
+ if (likely(ptr))
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void *
+linear_zalloc_parent(void *parent, unsigned size)
+{
+ void *ptr = linear_alloc_parent(parent, size);
+
+ if (likely(ptr))
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void
+linear_free_parent(void *ptr)
+{
+ linear_header *node;
+
+ if (unlikely(!ptr))
+ return;
+
+ node = LINEAR_PARENT_TO_HEADER(ptr);
+ assert(node->magic == LMAGIC);
+
+ while (node) {
+ void *ptr = node;
+
+ node = node->next;
+ ralloc_free(ptr);
+ }
+}
+
+void
+ralloc_steal_linear_parent(void *new_ralloc_ctx, void *ptr)
+{
+ linear_header *node;
+
+ if (unlikely(!ptr))
+ return;
+
+ node = LINEAR_PARENT_TO_HEADER(ptr);
+ assert(node->magic == LMAGIC);
+
+ while (node) {
+ ralloc_steal(new_ralloc_ctx, node);
+ node->ralloc_parent = new_ralloc_ctx;
+ node = node->next;
+ }
+}
+
+void *
+ralloc_parent_of_linear_parent(void *ptr)
+{
+ linear_header *node = LINEAR_PARENT_TO_HEADER(ptr);
+ assert(node->magic == LMAGIC);
+ return node->ralloc_parent;
+}
+
+void *
+linear_realloc(void *parent, void *old, unsigned new_size)
+{
+ unsigned old_size = 0;
+ ralloc_header *new_ptr;
+
+ new_ptr = linear_alloc_child(parent, new_size);
+
+ if (unlikely(!old))
+ return new_ptr;
+
+ old_size = ((linear_size_chunk*)old)[-1].size;
+
+ if (likely(new_ptr && old_size))
+ memcpy(new_ptr, old, MIN2(old_size, new_size));
+
+ return new_ptr;
+}
+
+/* All code below is pretty much copied from ralloc and only the alloc
+ * calls are different.
+ */
+
+char *
+linear_strdup(void *parent, const char *str)
+{
+ unsigned n;
+ char *ptr;
+
+ if (unlikely(!str))
+ return NULL;
+
+ n = strlen(str);
+ ptr = linear_alloc_child(parent, n + 1);
+ if (unlikely(!ptr))
+ return NULL;
+
+ memcpy(ptr, str, n);
+ ptr[n] = '\0';
+ return ptr;
+}
+
+char *
+linear_asprintf(void *parent, const char *fmt, ...)
+{
+ char *ptr;
+ va_list args;
+ va_start(args, fmt);
+ ptr = linear_vasprintf(parent, fmt, args);
+ va_end(args);
+ return ptr;
+}
+
+char *
+linear_vasprintf(void *parent, const char *fmt, va_list args)
+{
+ unsigned size = printf_length(fmt, args) + 1;
+
+ char *ptr = linear_alloc_child(parent, size);
+ if (ptr != NULL)
+ vsnprintf(ptr, size, fmt, args);
+
+ return ptr;
+}
+
+bool
+linear_asprintf_append(void *parent, char **str, const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = linear_vasprintf_append(parent, str, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+linear_vasprintf_append(void *parent, char **str, const char *fmt, va_list args)
+{
+ size_t existing_length;
+ assert(str != NULL);
+ existing_length = *str ? strlen(*str) : 0;
+ return linear_vasprintf_rewrite_tail(parent, str, &existing_length, fmt, args);
+}
+
+bool
+linear_asprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = linear_vasprintf_rewrite_tail(parent, str, start, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+linear_vasprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, va_list args)
+{
+ size_t new_length;
+ char *ptr;
+
+ assert(str != NULL);
+
+ if (unlikely(*str == NULL)) {
+ *str = linear_vasprintf(parent, fmt, args);
+ *start = strlen(*str);
+ return true;
+ }
+
+ new_length = printf_length(fmt, args);
+
+ ptr = linear_realloc(parent, *str, *start + new_length + 1);
+ if (unlikely(ptr == NULL))
+ return false;
+
+ vsnprintf(ptr + *start, new_length + 1, fmt, args);
+ *str = ptr;
+ *start += new_length;
+ return true;
+}
+
+/* helper routine for strcat/strncat - n is the exact amount to copy */
+static bool
+linear_cat(void *parent, char **dest, const char *str, unsigned n)
+{
+ char *both;
+ unsigned existing_length;
+ assert(dest != NULL && *dest != NULL);
+
+ existing_length = strlen(*dest);
+ both = linear_realloc(parent, *dest, existing_length + n + 1);
+ if (unlikely(both == NULL))
+ return false;
+
+ memcpy(both + existing_length, str, n);
+ both[existing_length + n] = '\0';
+
+ *dest = both;
+ return true;
+}
+
+bool
+linear_strcat(void *parent, char **dest, const char *str)
+{
+ return linear_cat(parent, dest, str, strlen(str));
+}
diff --git a/src/mesa/util/ralloc.h b/src/mesa/util/ralloc.h
new file mode 100644
index 00000000..857ca5f7
--- /dev/null
+++ b/src/mesa/util/ralloc.h
@@ -0,0 +1,604 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ralloc.h
+ *
+ * ralloc: a recursive memory allocator
+ *
+ * The ralloc memory allocator creates a hierarchy of allocated
+ * objects. Every allocation is in reference to some parent, and
+ * every allocated object can in turn be used as the parent of a
+ * subsequent allocation. This allows for extremely convenient
+ * discarding of an entire tree/sub-tree of allocations by calling
+ * ralloc_free on any particular object to free it and all of its
+ * children.
+ *
+ * The conceptual working of ralloc was directly inspired by Andrew
+ * Tridgell's talloc, but ralloc is an independent implementation
+ * released under the MIT license and tuned for Mesa.
+ *
+ * talloc is more sophisticated than ralloc in that it includes reference
+ * counting and useful debugging features. However, it is released under
+ * a non-permissive open source license.
+ */
+
+#ifndef RALLOC_H
+#define RALLOC_H
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <stdbool.h>
+
+#include "macros.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \def ralloc(ctx, type)
+ * Allocate a new object chained off of the given context.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) ralloc_size(ctx, sizeof(type))
+ * \endcode
+ */
+#define ralloc(ctx, type) ((type *) ralloc_size(ctx, sizeof(type)))
+
+/**
+ * \def rzalloc(ctx, type)
+ * Allocate a new object out of the given context and initialize it to zero.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) rzalloc_size(ctx, sizeof(type))
+ * \endcode
+ */
+#define rzalloc(ctx, type) ((type *) rzalloc_size(ctx, sizeof(type)))
+
+/**
+ * Allocate a new ralloc context.
+ *
+ * While any ralloc'd pointer can be used as a context, sometimes it is useful
+ * to simply allocate a context with no associated memory.
+ *
+ * It is equivalent to:
+ * \code
+ * ((type *) ralloc_size(ctx, 0)
+ * \endcode
+ */
+void *ralloc_context(const void *ctx);
+
+/**
+ * Allocate memory chained off of the given context.
+ *
+ * This is the core allocation routine which is used by all others. It
+ * simply allocates storage for \p size bytes and returns the pointer,
+ * similar to \c malloc.
+ */
+void *ralloc_size(const void *ctx, size_t size) MALLOCLIKE;
+
+/**
+ * Allocate zero-initialized memory chained off of the given context.
+ *
+ * This is similar to \c calloc with a size of 1.
+ */
+void *rzalloc_size(const void *ctx, size_t size) MALLOCLIKE;
+
+/**
+ * Resize a piece of ralloc-managed memory, preserving data.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the memory to be resized. May be NULL.
+ * \param size The amount of memory to allocate, in bytes.
+ */
+void *reralloc_size(const void *ctx, void *ptr, size_t size);
+
+/**
+ * Resize a ralloc-managed array, preserving data and initializing any newly
+ * allocated data to zero.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the memory to be resized. May be NULL.
+ * \param old_size The amount of memory in the previous allocation, in bytes.
+ * \param new_size The amount of memory to allocate, in bytes.
+ */
+void *rerzalloc_size(const void *ctx, void *ptr,
+ size_t old_size, size_t new_size);
+
+/// \defgroup array Array Allocators @{
+
+/**
+ * \def ralloc_array(ctx, type, count)
+ * Allocate an array of objects chained off the given context.
+ *
+ * Similar to \c calloc, but does not initialize the memory to zero.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) ralloc_array_size(ctx, sizeof(type), count)
+ * \endcode
+ */
+#define ralloc_array(ctx, type, count) \
+ ((type *) ralloc_array_size(ctx, sizeof(type), count))
+
+/**
+ * \def rzalloc_array(ctx, type, count)
+ * Allocate a zero-initialized array chained off the given context.
+ *
+ * Similar to \c calloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) rzalloc_array_size(ctx, sizeof(type), count)
+ * \endcode
+ */
+#define rzalloc_array(ctx, type, count) \
+ ((type *) rzalloc_array_size(ctx, sizeof(type), count))
+
+/**
+ * \def reralloc(ctx, ptr, type, count)
+ * Resize a ralloc-managed array, preserving data.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param type The element type.
+ * \param count The number of elements to allocate.
+ */
+#define reralloc(ctx, ptr, type, count) \
+ ((type *) reralloc_array_size(ctx, ptr, sizeof(type), count))
+
+/**
+ * \def rerzalloc(ctx, ptr, type, count)
+ * Resize a ralloc-managed array, preserving data and initializing any newly
+ * allocated data to zero.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param type The element type.
+ * \param old_count The number of elements in the previous allocation.
+ * \param new_count The number of elements to allocate.
+ */
+#define rerzalloc(ctx, ptr, type, old_count, new_count) \
+ ((type *) rerzalloc_array_size(ctx, ptr, sizeof(type), old_count, new_count))
+
+/**
+ * Allocate memory for an array chained off the given context.
+ *
+ * Similar to \c calloc, but does not initialize the memory to zero.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \p size and \p count. This is necessary for security.
+ */
+void *ralloc_array_size(const void *ctx, size_t size, unsigned count) MALLOCLIKE;
+
+/**
+ * Allocate a zero-initialized array chained off the given context.
+ *
+ * Similar to \c calloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \p size and \p count. This is necessary for security.
+ */
+void *rzalloc_array_size(const void *ctx, size_t size, unsigned count) MALLOCLIKE;
+
+/**
+ * Resize a ralloc-managed array, preserving data.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param size The size of an individual element.
+ * \param count The number of elements to allocate.
+ *
+ * \return True unless allocation failed.
+ */
+void *reralloc_array_size(const void *ctx, void *ptr, size_t size,
+ unsigned count);
+
+/**
+ * Resize a ralloc-managed array, preserving data and initializing any newly
+ * allocated data to zero.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param size The size of an individual element.
+ * \param old_count The number of elements in the previous allocation.
+ * \param new_count The number of elements to allocate.
+ *
+ * \return True unless allocation failed.
+ */
+void *rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
+ unsigned old_count, unsigned new_count);
+/// @}
+
+/**
+ * Free a piece of ralloc-managed memory.
+ *
+ * This will also free the memory of any children allocated this context.
+ */
+void ralloc_free(void *ptr);
+
+/**
+ * "Steal" memory from one context, changing it to another.
+ *
+ * This changes \p ptr's context to \p new_ctx. This is quite useful if
+ * memory is allocated out of a temporary context.
+ */
+void ralloc_steal(const void *new_ctx, void *ptr);
+
+/**
+ * Reparent all children from one context to another.
+ *
+ * This effectively calls ralloc_steal(new_ctx, child) for all children of \p old_ctx.
+ */
+void ralloc_adopt(const void *new_ctx, void *old_ctx);
+
+/**
+ * Return the given pointer's ralloc context.
+ */
+void *ralloc_parent(const void *ptr);
+
+/**
+ * Set a callback to occur just before an object is freed.
+ */
+void ralloc_set_destructor(const void *ptr, void(*destructor)(void *));
+
+/// \defgroup array String Functions @{
+/**
+ * Duplicate a string, allocating the memory from the given context.
+ */
+char *ralloc_strdup(const void *ctx, const char *str) MALLOCLIKE;
+
+/**
+ * Duplicate a string, allocating the memory from the given context.
+ *
+ * Like \c strndup, at most \p n characters are copied. If \p str is longer
+ * than \p n characters, \p n are copied, and a termining \c '\0' byte is added.
+ */
+char *ralloc_strndup(const void *ctx, const char *str, size_t n) MALLOCLIKE;
+
+/**
+ * Concatenate two strings, allocating the necessary space.
+ *
+ * This appends \p str to \p *dest, similar to \c strcat, using ralloc_resize
+ * to expand \p *dest to the appropriate size. \p dest will be updated to the
+ * new pointer unless allocation fails.
+ *
+ * The result will always be null-terminated.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_strcat(char **dest, const char *str);
+
+/**
+ * Concatenate two strings, allocating the necessary space.
+ *
+ * This appends at most \p n bytes of \p str to \p *dest, using ralloc_resize
+ * to expand \p *dest to the appropriate size. \p dest will be updated to the
+ * new pointer unless allocation fails.
+ *
+ * The result will always be null-terminated; \p str does not need to be null
+ * terminated if it is longer than \p n.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_strncat(char **dest, const char *str, size_t n);
+
+/**
+ * Concatenate two strings, allocating the necessary space.
+ *
+ * This appends \p n bytes of \p str to \p *dest, using ralloc_resize
+ * to expand \p *dest to the appropriate size. \p dest will be updated to the
+ * new pointer unless allocation fails.
+ *
+ * The result will always be null-terminated.
+ *
+ * This function differs from ralloc_strcat() and ralloc_strncat() in that it
+ * does not do any strlen() calls which can become costly on large strings.
+ *
+ * \return True unless allocation failed.
+ */
+bool
+ralloc_str_append(char **dest, const char *str,
+ size_t existing_length, size_t str_size);
+
+/**
+ * Print to a string.
+ *
+ * This is analogous to \c sprintf, but allocates enough space (using \p ctx
+ * as the context) for the resulting string.
+ *
+ * \return The newly allocated string.
+ */
+char *ralloc_asprintf (const void *ctx, const char *fmt, ...) PRINTFLIKE(2, 3) MALLOCLIKE;
+
+/**
+ * Print to a string, given a va_list.
+ *
+ * This is analogous to \c vsprintf, but allocates enough space (using \p ctx
+ * as the context) for the resulting string.
+ *
+ * \return The newly allocated string.
+ */
+char *ralloc_vasprintf(const void *ctx, const char *fmt, va_list args) MALLOCLIKE;
+
+/**
+ * Rewrite the tail of an existing string, starting at a given index.
+ *
+ * Overwrites the contents of *str starting at \p start with newly formatted
+ * text, including a new null-terminator. Allocates more memory as necessary.
+ *
+ * This can be used to append formatted text when the length of the existing
+ * string is already known, saving a strlen() call.
+ *
+ * \sa ralloc_asprintf_append
+ *
+ * \param str The string to be updated.
+ * \param start The index to start appending new data at.
+ * \param fmt A printf-style formatting string
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ * \p start will be increased by the length of the newly formatted text.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_asprintf_rewrite_tail(char **str, size_t *start,
+ const char *fmt, ...)
+ PRINTFLIKE(3, 4);
+
+/**
+ * Rewrite the tail of an existing string, starting at a given index.
+ *
+ * Overwrites the contents of *str starting at \p start with newly formatted
+ * text, including a new null-terminator. Allocates more memory as necessary.
+ *
+ * This can be used to append formatted text when the length of the existing
+ * string is already known, saving a strlen() call.
+ *
+ * \sa ralloc_vasprintf_append
+ *
+ * \param str The string to be updated.
+ * \param start The index to start appending new data at.
+ * \param fmt A printf-style formatting string
+ * \param args A va_list containing the data to be formatted
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ * \p start will be increased by the length of the newly formatted text.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
+ va_list args);
+
+/**
+ * Append formatted text to the supplied string.
+ *
+ * This is equivalent to
+ * \code
+ * ralloc_asprintf_rewrite_tail(str, strlen(*str), fmt, ...)
+ * \endcode
+ *
+ * \sa ralloc_asprintf
+ * \sa ralloc_asprintf_rewrite_tail
+ * \sa ralloc_strcat
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_asprintf_append (char **str, const char *fmt, ...)
+ PRINTFLIKE(2, 3);
+
+/**
+ * Append formatted text to the supplied string, given a va_list.
+ *
+ * This is equivalent to
+ * \code
+ * ralloc_vasprintf_rewrite_tail(str, strlen(*str), fmt, args)
+ * \endcode
+ *
+ * \sa ralloc_vasprintf
+ * \sa ralloc_vasprintf_rewrite_tail
+ * \sa ralloc_strcat
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_vasprintf_append(char **str, const char *fmt, va_list args);
+/// @}
+
+/**
+ * Declare C++ new and delete operators which use ralloc.
+ *
+ * Placing this macro in the body of a class makes it possible to do:
+ *
+ * TYPE *var = new(mem_ctx) TYPE(...);
+ * delete var;
+ *
+ * which is more idiomatic in C++ than calling ralloc.
+ */
+#define DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(TYPE, ALLOC_FUNC) \
+private: \
+ static void _ralloc_destructor(void *p) \
+ { \
+ reinterpret_cast<TYPE *>(p)->TYPE::~TYPE(); \
+ } \
+public: \
+ static void* operator new(size_t size, void *mem_ctx) \
+ { \
+ void *p = ALLOC_FUNC(mem_ctx, size); \
+ assert(p != NULL); \
+ if (!HAS_TRIVIAL_DESTRUCTOR(TYPE)) \
+ ralloc_set_destructor(p, _ralloc_destructor); \
+ return p; \
+ } \
+ \
+ static void operator delete(void *p) \
+ { \
+ /* The object's destructor is guaranteed to have already been \
+ * called by the delete operator at this point -- Make sure it's \
+ * not called again. \
+ */ \
+ if (!HAS_TRIVIAL_DESTRUCTOR(TYPE)) \
+ ralloc_set_destructor(p, NULL); \
+ ralloc_free(p); \
+ }
+
+#define DECLARE_RALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, ralloc_size)
+
+#define DECLARE_RZALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, rzalloc_size)
+
+#define DECLARE_LINEAR_ALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_alloc_child)
+
+#define DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_zalloc_child)
+
+
+/**
+ * Do a fast allocation from the linear buffer, also known as the child node
+ * from the allocator's point of view. It can't be freed directly. You have
+ * to free the parent or the ralloc parent.
+ *
+ * \param parent parent node of the linear allocator
+ * \param size size to allocate (max 32 bits)
+ */
+void *linear_alloc_child(void *parent, unsigned size);
+
+/**
+ * Allocate a parent node that will hold linear buffers. The returned
+ * allocation is actually the first child node, but it's also the handle
+ * of the parent node. Use it for all child node allocations.
+ *
+ * \param ralloc_ctx ralloc context, must not be NULL
+ * \param size size to allocate (max 32 bits)
+ */
+void *linear_alloc_parent(void *ralloc_ctx, unsigned size);
+
+/**
+ * Same as linear_alloc_child, but also clears memory.
+ */
+void *linear_zalloc_child(void *parent, unsigned size);
+
+/**
+ * Same as linear_alloc_parent, but also clears memory.
+ */
+void *linear_zalloc_parent(void *ralloc_ctx, unsigned size);
+
+/**
+ * Free the linear parent node. This will free all child nodes too.
+ * Freeing the ralloc parent will also free this.
+ */
+void linear_free_parent(void *ptr);
+
+/**
+ * Same as ralloc_steal, but steals the linear parent node.
+ */
+void ralloc_steal_linear_parent(void *new_ralloc_ctx, void *ptr);
+
+/**
+ * Return the ralloc parent of the linear parent node.
+ */
+void *ralloc_parent_of_linear_parent(void *ptr);
+
+/**
+ * Same as realloc except that the linear allocator doesn't free child nodes,
+ * so it's reduced to memory duplication. It's used in places where
+ * reallocation is required. Don't use it often. It's much slower than
+ * realloc.
+ */
+void *linear_realloc(void *parent, void *old, unsigned new_size);
+
+/* The functions below have the same semantics as their ralloc counterparts,
+ * except that they always allocate a linear child node.
+ */
+char *linear_strdup(void *parent, const char *str);
+char *linear_asprintf(void *parent, const char *fmt, ...);
+char *linear_vasprintf(void *parent, const char *fmt, va_list args);
+bool linear_asprintf_append(void *parent, char **str, const char *fmt, ...);
+bool linear_vasprintf_append(void *parent, char **str, const char *fmt,
+ va_list args);
+bool linear_asprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, ...);
+bool linear_vasprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, va_list args);
+bool linear_strcat(void *parent, char **dest, const char *str);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif
diff --git a/src/mesa/util/simple_mtx.h b/src/mesa/util/simple_mtx.h
new file mode 100644
index 00000000..1bd57ac8
--- /dev/null
+++ b/src/mesa/util/simple_mtx.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2015 Intel
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _SIMPLE_MTX_H
+#define _SIMPLE_MTX_H
+
+#include "util/futex.h"
+#include "util/macros.h"
+
+#include "c11/threads.h"
+
+#if UTIL_FUTEX_SUPPORTED
+
+/* mtx_t - Fast, simple mutex
+ *
+ * While modern pthread mutexes are very fast (implemented using futex), they
+ * still incur a call to an external DSO and overhead of the generality and
+ * features of pthread mutexes. Most mutexes in mesa only needs lock/unlock,
+ * and the idea here is that we can inline the atomic operation and make the
+ * fast case just two intructions. Mutexes are subtle and finicky to
+ * implement, so we carefully copy the implementation from Ulrich Dreppers
+ * well-written and well-reviewed paper:
+ *
+ * "Futexes Are Tricky"
+ * http://www.akkadia.org/drepper/futex.pdf
+ *
+ * We implement "mutex3", which gives us a mutex that has no syscalls on
+ * uncontended lock or unlock. Further, the uncontended case boils down to a
+ * locked cmpxchg and an untaken branch, the uncontended unlock is just a
+ * locked decr and an untaken branch. We use __builtin_expect() to indicate
+ * that contention is unlikely so that gcc will put the contention code out of
+ * the main code flow.
+ *
+ * A fast mutex only supports lock/unlock, can't be recursive or used with
+ * condition variables.
+ */
+
+typedef struct {
+ uint32_t val;
+} simple_mtx_t;
+
+#define _SIMPLE_MTX_INITIALIZER_NP { 0 }
+
+#define _SIMPLE_MTX_INVALID_VALUE 0xd0d0d0d0
+
+static inline void
+simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
+{
+ assert(type == mtx_plain);
+
+ mtx->val = 0;
+}
+
+static inline void
+simple_mtx_destroy(ASSERTED simple_mtx_t *mtx)
+{
+#ifndef NDEBUG
+ mtx->val = _SIMPLE_MTX_INVALID_VALUE;
+#endif
+}
+
+static inline void
+simple_mtx_lock(simple_mtx_t *mtx)
+{
+ uint32_t c;
+
+ c = __sync_val_compare_and_swap(&mtx->val, 0, 1);
+
+ assert(c != _SIMPLE_MTX_INVALID_VALUE);
+
+ if (__builtin_expect(c != 0, 0)) {
+ if (c != 2)
+ c = __sync_lock_test_and_set(&mtx->val, 2);
+ while (c != 0) {
+ futex_wait(&mtx->val, 2, NULL);
+ c = __sync_lock_test_and_set(&mtx->val, 2);
+ }
+ }
+}
+
+static inline void
+simple_mtx_unlock(simple_mtx_t *mtx)
+{
+ uint32_t c;
+
+ c = __sync_fetch_and_sub(&mtx->val, 1);
+
+ assert(c != _SIMPLE_MTX_INVALID_VALUE);
+
+ if (__builtin_expect(c != 1, 0)) {
+ mtx->val = 0;
+ futex_wake(&mtx->val, 1);
+ }
+}
+
+static inline void
+simple_mtx_assert_locked(simple_mtx_t *mtx)
+{
+ assert(mtx->val);
+}
+
+#else
+
+typedef mtx_t simple_mtx_t;
+
+#define _SIMPLE_MTX_INITIALIZER_NP _MTX_INITIALIZER_NP
+
+static inline void
+simple_mtx_init(simple_mtx_t *mtx, int type)
+{
+ mtx_init(mtx, type);
+}
+
+static inline void
+simple_mtx_destroy(simple_mtx_t *mtx)
+{
+ mtx_destroy(mtx);
+}
+
+static inline void
+simple_mtx_lock(simple_mtx_t *mtx)
+{
+ mtx_lock(mtx);
+}
+
+static inline void
+simple_mtx_unlock(simple_mtx_t *mtx)
+{
+ mtx_unlock(mtx);
+}
+
+static inline void
+simple_mtx_assert_locked(simple_mtx_t *mtx)
+{
+#ifdef DEBUG
+ /* NOTE: this would not work for recursive mutexes, but
+ * mtx_t doesn't support those
+ */
+ int ret = mtx_trylock(mtx);
+ assert(ret == thrd_busy);
+ if (ret == thrd_success)
+ mtx_unlock(mtx);
+#else
+ (void)mtx;
+#endif
+}
+
+#endif
+
+#endif
diff --git a/src/mesa/util/u_atomic.h b/src/mesa/util/u_atomic.h
new file mode 100644
index 00000000..0bd6a4aa
--- /dev/null
+++ b/src/mesa/util/u_atomic.h
@@ -0,0 +1,272 @@
+/**
+ * Many similar implementations exist. See for example libwsbm
+ * or the linux kernel include/atomic.h
+ *
+ * No copyright claimed on this file.
+ *
+ */
+
+#include "no_extern_c.h"
+
+#ifndef U_ATOMIC_H
+#define U_ATOMIC_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/* Favor OS-provided implementations.
+ *
+ * Where no OS-provided implementation is available, fall back to
+ * locally coded assembly, compiler intrinsic or ultimately a
+ * mutex-based implementation.
+ */
+#if defined(__sun)
+#define PIPE_ATOMIC_OS_SOLARIS
+#elif defined(_MSC_VER)
+#define PIPE_ATOMIC_MSVC_INTRINSIC
+#elif defined(__GNUC__)
+#define PIPE_ATOMIC_GCC_INTRINSIC
+#else
+#error "Unsupported platform"
+#endif
+
+
+/* Implementation using GCC-provided synchronization intrinsics
+ */
+#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
+
+#define PIPE_ATOMIC "GCC Sync Intrinsics"
+
+#if defined(USE_GCC_ATOMIC_BUILTINS)
+
+/* The builtins with explicit memory model are available since GCC 4.7. */
+#define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
+#define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
+#define p_atomic_read_relaxed(_v) __atomic_load_n((_v), __ATOMIC_RELAXED)
+#define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
+#define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
+#define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_add_return(v, i) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
+#define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
+#define PIPE_NATIVE_ATOMIC_XCHG
+
+#else
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+#define p_atomic_read_relaxed(_v) (*(_v))
+#define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
+#define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
+#define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
+#define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
+#define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
+#define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
+#define p_atomic_add_return(v, i) __sync_add_and_fetch((v), (i))
+
+#endif
+
+/* There is no __atomic_* compare and exchange that returns the current value.
+ * Also, GCC 5.4 seems unable to optimize a compound statement expression that
+ * uses an additional stack variable with __atomic_compare_exchange[_n].
+ */
+#define p_atomic_cmpxchg(v, old, _new) \
+ __sync_val_compare_and_swap((v), (old), (_new))
+
+#endif
+
+
+
+/* Unlocked version for single threaded environments, such as some
+ * windows kernel modules.
+ */
+#if defined(PIPE_ATOMIC_OS_UNLOCKED)
+
+#define PIPE_ATOMIC "Unlocked"
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+#define p_atomic_read_relaxed(_v) (*(_v))
+#define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
+#define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
+#define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
+#define p_atomic_add(_v, _i) ((void) p_atomic_add_return((_v), (_i)))
+#define p_atomic_inc_return(_v) (++(*(_v)))
+#define p_atomic_dec_return(_v) (--(*(_v)))
+#define p_atomic_add_return(_v, _i) (*(_v) = *(_v) + (_i))
+#define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
+
+#endif
+
+
+#if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
+
+#define PIPE_ATOMIC "MSVC Intrinsics"
+
+/* We use the Windows header's Interlocked*64 functions instead of the
+ * _Interlocked*64 intrinsics wherever we can, as support for the latter varies
+ * with target CPU, whereas Windows headers take care of all portability
+ * issues: using intrinsics where available, falling back to library
+ * implementations where not.
+ */
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN 1
+#endif
+#include <windows.h>
+#include <intrin.h>
+#include <assert.h>
+
+/* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
+ * quite work here; and if a C++-only solution is worthwhile, then it would be
+ * better to use templates / function overloading, instead of decltype magic.
+ * Therefore, we rely on implicit casting to LONGLONG for the functions that return
+ */
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+#define p_atomic_read_relaxed(_v) (*(_v))
+
+#define p_atomic_dec_zero(_v) \
+ (p_atomic_dec_return(_v) == 0)
+
+#define p_atomic_inc(_v) \
+ ((void) p_atomic_inc_return(_v))
+
+#define p_atomic_inc_return(_v) (\
+ sizeof *(_v) == sizeof(short) ? _InterlockedIncrement16((short *) (_v)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedIncrement ((long *) (_v)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_dec(_v) \
+ ((void) p_atomic_dec_return(_v))
+
+#define p_atomic_dec_return(_v) (\
+ sizeof *(_v) == sizeof(short) ? _InterlockedDecrement16((short *) (_v)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedDecrement ((long *) (_v)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_add(_v, _i) \
+ ((void) p_atomic_add_return((_v), (_i)))
+
+#define p_atomic_add_return(_v, _i) (\
+ sizeof *(_v) == sizeof(char) ? _InterlockedExchangeAdd8 ((char *) (_v), (_i)) : \
+ sizeof *(_v) == sizeof(short) ? _InterlockedExchangeAdd16((short *) (_v), (_i)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedExchangeAdd ((long *) (_v), (_i)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_cmpxchg(_v, _old, _new) (\
+ sizeof *(_v) == sizeof(char) ? _InterlockedCompareExchange8 ((char *) (_v), (char) (_new), (char) (_old)) : \
+ sizeof *(_v) == sizeof(short) ? _InterlockedCompareExchange16((short *) (_v), (short) (_new), (short) (_old)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedCompareExchange ((long *) (_v), (long) (_new), (long) (_old)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
+ (assert(!"should not get here"), 0))
+
+#endif
+
+#if defined(PIPE_ATOMIC_OS_SOLARIS)
+
+#define PIPE_ATOMIC "Solaris OS atomic functions"
+
+#include <atomic.h>
+#include <assert.h>
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+
+#define p_atomic_dec_zero(v) (\
+ sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) == 0 : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_inc(v) (void) (\
+ sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8 ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_inc_return(v) (__typeof(*v))( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8_nv ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_dec(v) (void) ( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8 ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_dec_return(v) (__typeof(*v))( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_add(v, i) (void) ( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_add_8 ((uint8_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_add_return(v, i) (__typeof(*v)) ( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_add_8_nv ((uint8_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_cmpxchg(v, old, _new) (__typeof(*v))( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_cas_8 ((uint8_t *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
+ (assert(!"should not get here"), 0))
+
+#endif
+
+#ifndef PIPE_ATOMIC
+#error "No pipe_atomic implementation selected"
+#endif
+
+#ifndef PIPE_NATIVE_ATOMIC_XCHG
+static inline uint32_t p_atomic_xchg_32(uint32_t *v, uint32_t i)
+{
+ uint32_t actual = p_atomic_read(v);
+ uint32_t expected;
+ do {
+ expected = actual;
+ actual = p_atomic_cmpxchg(v, expected, i);
+ } while (expected != actual);
+ return actual;
+}
+
+static inline uint64_t p_atomic_xchg_64(uint64_t *v, uint64_t i)
+{
+ uint64_t actual = p_atomic_read(v);
+ uint64_t expected;
+ do {
+ expected = actual;
+ actual = p_atomic_cmpxchg(v, expected, i);
+ } while (expected != actual);
+ return actual;
+}
+
+#define p_atomic_xchg(v, i) (__typeof(*(v)))( \
+ sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
+ sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
+ (assert(!"should not get here"), 0))
+#endif
+
+#endif /* U_ATOMIC_H */
diff --git a/src/mesa/util/u_cpu_detect.c b/src/mesa/util/u_cpu_detect.c
new file mode 100644
index 00000000..d6e51a11
--- /dev/null
+++ b/src/mesa/util/u_cpu_detect.c
@@ -0,0 +1,868 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Dennis Smit
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * CPU feature detection.
+ *
+ * @author Dennis Smit
+ * @author Based on the work of Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#include "pipe/p_config.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_debug.h"
+#include "u_cpu_detect.h"
+#include "u_math.h"
+#include "c11/threads.h"
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#if defined(PIPE_ARCH_PPC)
+#if defined(PIPE_OS_APPLE)
+#include <sys/sysctl.h>
+#else
+#include <signal.h>
+#include <setjmp.h>
+#endif
+#endif
+
+#if defined(PIPE_OS_BSD)
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <machine/cpu.h>
+#endif
+
+#if defined(PIPE_OS_FREEBSD)
+#if __has_include(<sys/auxv.h>)
+#include <sys/auxv.h>
+#define HAVE_ELF_AUX_INFO
+#endif
+#endif
+
+#if defined(PIPE_OS_LINUX)
+#include <signal.h>
+#include <fcntl.h>
+#include <elf.h>
+#endif
+
+#ifdef PIPE_OS_UNIX
+#include <unistd.h>
+#endif
+
+#if defined(HAS_ANDROID_CPUFEATURES)
+#include <cpu-features.h>
+#endif
+
+#if defined(PIPE_OS_WINDOWS)
+#include <windows.h>
+#if defined(PIPE_CC_MSVC)
+#include <intrin.h>
+#endif
+#endif
+
+#if defined(HAS_SCHED_H)
+#include <sched.h>
+#endif
+
+DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu, "GALLIUM_DUMP_CPU", false)
+
+
+struct util_cpu_caps_t util_cpu_caps;
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+static int has_cpuid(void);
+#endif
+
+
+#if defined(PIPE_ARCH_PPC) && !defined(PIPE_OS_APPLE) && !defined(PIPE_OS_BSD) && !defined(PIPE_OS_LINUX)
+static jmp_buf __lv_powerpc_jmpbuf;
+static volatile sig_atomic_t __lv_powerpc_canjump = 0;
+
+static void
+sigill_handler(int sig)
+{
+ if (!__lv_powerpc_canjump) {
+ signal (sig, SIG_DFL);
+ raise (sig);
+ }
+
+ __lv_powerpc_canjump = 0;
+ longjmp(__lv_powerpc_jmpbuf, 1);
+}
+#endif
+
+#if defined(PIPE_ARCH_PPC)
+static void
+check_os_altivec_support(void)
+{
+#if defined(__ALTIVEC__)
+ util_cpu_caps.has_altivec = 1;
+#endif
+#if defined(__VSX__)
+ util_cpu_caps.has_vsx = 1;
+#endif
+#if defined(__ALTIVEC__) && defined(__VSX__)
+/* Do nothing */
+#elif defined(PIPE_OS_APPLE) || defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD)
+#ifdef HW_VECTORUNIT
+ int sels[2] = {CTL_HW, HW_VECTORUNIT};
+#else
+ int sels[2] = {CTL_MACHDEP, CPU_ALTIVEC};
+#endif
+ int has_vu = 0;
+ int len = sizeof (has_vu);
+ int err;
+
+ err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
+
+ if (err == 0) {
+ if (has_vu != 0) {
+ util_cpu_caps.has_altivec = 1;
+ }
+ }
+#elif defined(PIPE_OS_FREEBSD) /* !PIPE_OS_APPLE && !PIPE_OS_NETBSD && !PIPE_OS_OPENBSD */
+ unsigned long hwcap = 0;
+#ifdef HAVE_ELF_AUX_INFO
+ elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
+#else
+ size_t len = sizeof(hwcap);
+ sysctlbyname("hw.cpu_features", &hwcap, &len, NULL, 0);
+#endif
+ if (hwcap & PPC_FEATURE_HAS_ALTIVEC)
+ util_cpu_caps.has_altivec = 1;
+ if (hwcap & PPC_FEATURE_HAS_VSX)
+ util_cpu_caps.has_vsx = 1;
+#elif defined(PIPE_OS_LINUX) /* !PIPE_OS_FREEBSD */
+#if defined(PIPE_ARCH_PPC_64)
+ Elf64_auxv_t aux;
+#else
+ Elf32_auxv_t aux;
+#endif
+ int fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
+ if (fd >= 0) {
+ while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
+ if (aux.a_type == AT_HWCAP) {
+ char *env_vsx = getenv("GALLIVM_VSX");
+ uint64_t hwcap = aux.a_un.a_val;
+ util_cpu_caps.has_altivec = (hwcap >> 28) & 1;
+ if (!env_vsx || env_vsx[0] != '0') {
+ util_cpu_caps.has_vsx = (hwcap >> 7) & 1;
+ }
+ break;
+ }
+ }
+ close(fd);
+ }
+#else /* !PIPE_OS_APPLE && !PIPE_OS_BSD && !PIPE_OS_LINUX */
+ /* not on Apple/Darwin or Linux, do it the brute-force way */
+ /* this is borrowed from the libmpeg2 library */
+ signal(SIGILL, sigill_handler);
+ if (setjmp(__lv_powerpc_jmpbuf)) {
+ signal(SIGILL, SIG_DFL);
+ } else {
+ boolean enable_altivec = TRUE; /* Default: enable if available, and if not overridden */
+ boolean enable_vsx = TRUE;
+#ifdef DEBUG
+ /* Disabling Altivec code generation is not the same as disabling VSX code generation,
+ * which can be done simply by passing -mattr=-vsx to the LLVM compiler; cf.
+ * lp_build_create_jit_compiler_for_module().
+ * If you want to disable Altivec code generation, the best place to do it is here.
+ */
+ char *env_control = getenv("GALLIVM_ALTIVEC"); /* 1=enable (default); 0=disable */
+ if (env_control && env_control[0] == '0') {
+ enable_altivec = FALSE;
+ }
+#endif
+ /* VSX instructions can be explicitly enabled/disabled via GALLIVM_VSX=1 or 0 */
+ char *env_vsx = getenv("GALLIVM_VSX");
+ if (env_vsx && env_vsx[0] == '0') {
+ enable_vsx = FALSE;
+ }
+ if (enable_altivec) {
+ __lv_powerpc_canjump = 1;
+
+ __asm __volatile
+ ("mtspr 256, %0\n\t"
+ "vand %%v0, %%v0, %%v0"
+ :
+ : "r" (-1));
+
+ util_cpu_caps.has_altivec = 1;
+
+ if (enable_vsx) {
+ __asm __volatile("xxland %vs0, %vs0, %vs0");
+ util_cpu_caps.has_vsx = 1;
+ }
+ signal(SIGILL, SIG_DFL);
+ } else {
+ util_cpu_caps.has_altivec = 0;
+ }
+ }
+#endif /* !PIPE_OS_APPLE && !PIPE_OS_LINUX */
+}
+#endif /* PIPE_ARCH_PPC */
+
+
+#if defined(PIPE_ARCH_X86) || defined (PIPE_ARCH_X86_64)
+static int has_cpuid(void)
+{
+#if defined(PIPE_ARCH_X86)
+#if defined(PIPE_OS_GCC)
+ int a, c;
+
+ __asm __volatile
+ ("pushf\n"
+ "popl %0\n"
+ "movl %0, %1\n"
+ "xorl $0x200000, %0\n"
+ "push %0\n"
+ "popf\n"
+ "pushf\n"
+ "popl %0\n"
+ : "=a" (a), "=c" (c)
+ :
+ : "cc");
+
+ return a != c;
+#else
+ /* FIXME */
+ return 1;
+#endif
+#elif defined(PIPE_ARCH_X86_64)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+
+/**
+ * @sa cpuid.h included in gcc-4.3 onwards.
+ * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
+ */
+static inline void
+cpuid(uint32_t ax, uint32_t *p)
+{
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
+ __asm __volatile (
+ "xchgl %%ebx, %1\n\t"
+ "cpuid\n\t"
+ "xchgl %%ebx, %1"
+ : "=a" (p[0]),
+ "=S" (p[1]),
+ "=c" (p[2]),
+ "=d" (p[3])
+ : "0" (ax)
+ );
+#elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
+ __asm __volatile (
+ "cpuid\n\t"
+ : "=a" (p[0]),
+ "=b" (p[1]),
+ "=c" (p[2]),
+ "=d" (p[3])
+ : "0" (ax)
+ );
+#elif defined(PIPE_CC_MSVC)
+ __cpuid(p, ax);
+#else
+ p[0] = 0;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+#endif
+}
+
+/**
+ * @sa cpuid.h included in gcc-4.4 onwards.
+ * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
+ */
+static inline void
+cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p)
+{
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
+ __asm __volatile (
+ "xchgl %%ebx, %1\n\t"
+ "cpuid\n\t"
+ "xchgl %%ebx, %1"
+ : "=a" (p[0]),
+ "=S" (p[1]),
+ "=c" (p[2]),
+ "=d" (p[3])
+ : "0" (ax), "2" (cx)
+ );
+#elif defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64)
+ __asm __volatile (
+ "cpuid\n\t"
+ : "=a" (p[0]),
+ "=b" (p[1]),
+ "=c" (p[2]),
+ "=d" (p[3])
+ : "0" (ax), "2" (cx)
+ );
+#elif defined(PIPE_CC_MSVC)
+ __cpuidex(p, ax, cx);
+#else
+ p[0] = 0;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+#endif
+}
+
+
+static inline uint64_t xgetbv(void)
+{
+#if defined(PIPE_CC_GCC)
+ uint32_t eax, edx;
+
+ __asm __volatile (
+ ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
+ : "=a"(eax),
+ "=d"(edx)
+ : "c"(0)
+ );
+
+ return ((uint64_t)edx << 32) | eax;
+#elif defined(PIPE_CC_MSVC) && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
+ return _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+#else
+ return 0;
+#endif
+}
+
+
+#if defined(PIPE_ARCH_X86)
+PIPE_ALIGN_STACK static inline boolean sse2_has_daz(void)
+{
+ struct {
+ uint32_t pad1[7];
+ uint32_t mxcsr_mask;
+ uint32_t pad2[128-8];
+ } PIPE_ALIGN_VAR(16) fxarea;
+
+ fxarea.mxcsr_mask = 0;
+#if defined(PIPE_CC_GCC)
+ __asm __volatile ("fxsave %0" : "+m" (fxarea));
+#elif defined(PIPE_CC_MSVC) || defined(PIPE_CC_ICL)
+ _fxsave(&fxarea);
+#else
+ fxarea.mxcsr_mask = 0;
+#endif
+ return !!(fxarea.mxcsr_mask & (1 << 6));
+}
+#endif
+
+#endif /* X86 or X86_64 */
+
+#if defined(PIPE_ARCH_ARM)
+static void
+check_os_arm_support(void)
+{
+ /*
+ * On Android, the cpufeatures library is preferred way of checking
+ * CPU capabilities. However, it is not available for standalone Mesa
+ * builds, i.e. when Android build system (Android.mk-based) is not
+ * used. Because of this we cannot use PIPE_OS_ANDROID here, but rather
+ * have a separate macro that only gets enabled from respective Android.mk.
+ */
+#if defined(__ARM_NEON) || defined(__ARM_NEON__)
+ util_cpu_caps.has_neon = 1;
+#elif defined(PIPE_OS_FREEBSD) && defined(HAVE_ELF_AUX_INFO)
+ unsigned long hwcap = 0;
+ elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
+ if (hwcap & HWCAP_NEON)
+ util_cpu_caps.has_neon = 1;
+#elif defined(HAS_ANDROID_CPUFEATURES)
+ AndroidCpuFamily cpu_family = android_getCpuFamily();
+ uint64_t cpu_features = android_getCpuFeatures();
+
+ if (cpu_family == ANDROID_CPU_FAMILY_ARM) {
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON)
+ util_cpu_caps.has_neon = 1;
+ }
+#elif defined(PIPE_OS_LINUX)
+ Elf32_auxv_t aux;
+ int fd;
+
+ fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
+ if (fd >= 0) {
+ while (read(fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t)) {
+ if (aux.a_type == AT_HWCAP) {
+ uint32_t hwcap = aux.a_un.a_val;
+
+ util_cpu_caps.has_neon = (hwcap >> 12) & 1;
+ break;
+ }
+ }
+ close (fd);
+ }
+#endif /* PIPE_OS_LINUX */
+}
+
+#elif defined(PIPE_ARCH_AARCH64)
+static void
+check_os_arm_support(void)
+{
+ util_cpu_caps.has_neon = true;
+}
+#endif /* PIPE_ARCH_ARM || PIPE_ARCH_AARCH64 */
+
+#if defined(PIPE_ARCH_MIPS64)
+static void
+check_os_mips64_support(void)
+{
+ Elf64_auxv_t aux;
+ int fd;
+
+ fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
+ if (fd >= 0) {
+ while (read(fd, &aux, sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
+ if (aux.a_type == AT_HWCAP) {
+ uint64_t hwcap = aux.a_un.a_val;
+
+ util_cpu_caps.has_msa = (hwcap >> 1) & 1;
+ break;
+ }
+ }
+ close (fd);
+ }
+}
+#endif /* PIPE_ARCH_MIPS64 */
+
+
+static void
+get_cpu_topology(void)
+{
+ /* Default. This is OK if L3 is not present or there is only one. */
+ util_cpu_caps.num_L3_caches = 1;
+
+ memset(util_cpu_caps.cpu_to_L3, 0xff, sizeof(util_cpu_caps.cpu_to_L3));
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ /* AMD Zen */
+ if (util_cpu_caps.family >= CPU_AMD_ZEN1_ZEN2 &&
+ util_cpu_caps.family < CPU_AMD_LAST) {
+ uint32_t regs[4];
+
+ uint32_t saved_mask[UTIL_MAX_CPUS / 32] = {0};
+ uint32_t mask[UTIL_MAX_CPUS / 32] = {0};
+ bool saved = false;
+
+ uint32_t L3_found[UTIL_MAX_CPUS] = {0};
+ uint32_t num_L3_caches = 0;
+ util_affinity_mask *L3_affinity_masks = NULL;
+
+ /* Query APIC IDs from each CPU core.
+ *
+ * An APIC ID is a logical ID of the CPU with respect to the cache
+ * hierarchy, meaning that consecutive APIC IDs are neighbours in
+ * the hierarchy, e.g. sharing the same cache.
+ *
+ * For example, CPU 0 can have APIC ID 0 and CPU 12 can have APIC ID 1,
+ * which means that both CPU 0 and 12 are next to each other.
+ * (e.g. they are 2 threads belonging to 1 SMT2 core)
+ *
+ * We need to find out which CPUs share the same L3 cache and they can
+ * be all over the place.
+ *
+ * Querying the APIC ID can only be done by pinning the current thread
+ * to each core. The original affinity mask is saved.
+ *
+ * Loop over all possible CPUs even though some may be offline.
+ */
+ for (int16_t i = 0; i < util_cpu_caps.max_cpus && i < UTIL_MAX_CPUS; i++) {
+ uint32_t cpu_bit = 1u << (i % 32);
+
+ mask[i / 32] = cpu_bit;
+
+ /* The assumption is that trying to bind the thread to a CPU that is
+ * offline will fail.
+ */
+ if (util_set_current_thread_affinity(mask,
+ !saved ? saved_mask : NULL,
+ util_cpu_caps.num_cpu_mask_bits)) {
+ saved = true;
+
+ /* Query the APIC ID of the current core. */
+ cpuid(0x00000001, regs);
+ unsigned apic_id = regs[1] >> 24;
+
+ /* Query the total core count for the CPU */
+ uint32_t core_count = 1;
+ if (regs[3] & (1 << 28))
+ core_count = (regs[1] >> 16) & 0xff;
+
+ core_count = util_next_power_of_two(core_count);
+
+ /* Query the L3 cache count. */
+ cpuid_count(0x8000001D, 3, regs);
+ unsigned cache_level = (regs[0] >> 5) & 0x7;
+ unsigned cores_per_L3 = ((regs[0] >> 14) & 0xfff) + 1;
+
+ if (cache_level != 3)
+ continue;
+
+ unsigned local_core_id = apic_id & (core_count - 1);
+ unsigned phys_id = (apic_id & ~(core_count - 1)) >> util_logbase2(core_count);
+ unsigned local_l3_cache_index = local_core_id / util_next_power_of_two(cores_per_L3);
+#define L3_ID(p, i) (p << 16 | i << 1 | 1);
+
+ unsigned l3_id = L3_ID(phys_id, local_l3_cache_index);
+ int idx = -1;
+ for (unsigned c = 0; c < num_L3_caches; c++) {
+ if (L3_found[c] == l3_id) {
+ idx = c;
+ break;
+ }
+ }
+ if (idx == -1) {
+ idx = num_L3_caches;
+ L3_found[num_L3_caches++] = l3_id;
+ L3_affinity_masks = realloc(L3_affinity_masks, sizeof(util_affinity_mask) * num_L3_caches);
+ if (!L3_affinity_masks)
+ return;
+ memset(&L3_affinity_masks[num_L3_caches - 1], 0, sizeof(util_affinity_mask));
+ }
+ util_cpu_caps.cpu_to_L3[i] = idx;
+ L3_affinity_masks[idx][i / 32] |= cpu_bit;
+
+ }
+ mask[i / 32] = 0;
+ }
+
+ util_cpu_caps.num_L3_caches = num_L3_caches;
+ util_cpu_caps.L3_affinity_mask = L3_affinity_masks;
+
+ if (saved) {
+ if (debug_get_option_dump_cpu()) {
+ fprintf(stderr, "CPU <-> L3 cache mapping:\n");
+ for (unsigned i = 0; i < util_cpu_caps.num_L3_caches; i++) {
+ fprintf(stderr, " - L3 %u mask = ", i);
+ for (int j = util_cpu_caps.max_cpus - 1; j >= 0; j -= 32)
+ fprintf(stderr, "%08x ", util_cpu_caps.L3_affinity_mask[i][j / 32]);
+ fprintf(stderr, "\n");
+ }
+ }
+
+ /* Restore the original affinity mask. */
+ util_set_current_thread_affinity(saved_mask, NULL,
+ util_cpu_caps.num_cpu_mask_bits);
+ } else {
+ if (debug_get_option_dump_cpu())
+ fprintf(stderr, "Cannot set thread affinity for any thread.\n");
+ }
+ }
+#endif
+}
+
+static void
+util_cpu_detect_once(void)
+{
+ int available_cpus = 0;
+ int total_cpus = 0;
+
+ memset(&util_cpu_caps, 0, sizeof util_cpu_caps);
+
+ /* Count the number of CPUs in system */
+#if defined(PIPE_OS_WINDOWS)
+ {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ available_cpus = MAX2(1, system_info.dwNumberOfProcessors);
+ }
+#elif defined(PIPE_OS_UNIX)
+# if defined(HAS_SCHED_GETAFFINITY)
+ {
+ /* sched_setaffinity() can be used to further restrict the number of
+ * CPUs on which the process can run. Use sched_getaffinity() to
+ * determine the true number of available CPUs.
+ *
+ * FIXME: The Linux manual page for sched_getaffinity describes how this
+ * simple implementation will fail with > 1024 CPUs, and we'll fall back
+ * to the _SC_NPROCESSORS_ONLN path. Support for > 1024 CPUs can be
+ * added to this path once someone has such a system for testing.
+ */
+ cpu_set_t affin;
+ if (sched_getaffinity(getpid(), sizeof(affin), &affin) == 0)
+ available_cpus = CPU_COUNT(&affin);
+ }
+# endif
+
+ /* Linux, FreeBSD, DragonFly, and Mac OS X should have
+ * _SC_NOPROCESSORS_ONLN. NetBSD and OpenBSD should have HW_NCPUONLINE.
+ * This is what FFmpeg uses on those platforms.
+ */
+# if defined(PIPE_OS_BSD) && defined(HW_NCPUONLINE)
+ if (available_cpus == 0) {
+ const int mib[] = { CTL_HW, HW_NCPUONLINE };
+ int ncpu;
+ int len = sizeof(ncpu);
+
+ sysctl(mib, 2, &ncpu, &len, NULL, 0);
+ available_cpus = ncpu;
+ }
+# elif defined(_SC_NPROCESSORS_ONLN)
+ if (available_cpus == 0) {
+ available_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (available_cpus == ~0)
+ available_cpus = 1;
+ }
+# elif defined(PIPE_OS_BSD)
+ if (available_cpus == 0) {
+ const int mib[] = { CTL_HW, HW_NCPU };
+ int ncpu;
+ int len = sizeof(ncpu);
+
+ sysctl(mib, 2, &ncpu, &len, NULL, 0);
+ available_cpus = ncpu;
+ }
+# endif /* defined(PIPE_OS_BSD) */
+
+ /* Determine the maximum number of CPUs configured in the system. This is
+ * used to properly set num_cpu_mask_bits below. On BSDs that don't have
+ * HW_NCPUONLINE, it was not clear whether HW_NCPU is the number of
+ * configured or the number of online CPUs. For that reason, prefer the
+ * _SC_NPROCESSORS_CONF path on all BSDs.
+ */
+# if defined(_SC_NPROCESSORS_CONF)
+ total_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (total_cpus == ~0)
+ total_cpus = 1;
+# elif defined(PIPE_OS_BSD)
+ {
+ const int mib[] = { CTL_HW, HW_NCPU };
+ int ncpu;
+ int len = sizeof(ncpu);
+
+ sysctl(mib, 2, &ncpu, &len, NULL, 0);
+ total_cpus = ncpu;
+ }
+# endif /* defined(PIPE_OS_BSD) */
+#endif /* defined(PIPE_OS_UNIX) */
+
+ util_cpu_caps.nr_cpus = MAX2(1, available_cpus);
+ total_cpus = MAX2(total_cpus, util_cpu_caps.nr_cpus);
+
+ util_cpu_caps.max_cpus = total_cpus;
+ util_cpu_caps.num_cpu_mask_bits = align(total_cpus, 32);
+
+ /* Make the fallback cacheline size nonzero so that it can be
+ * safely passed to align().
+ */
+ util_cpu_caps.cacheline = sizeof(void *);
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if (has_cpuid()) {
+ uint32_t regs[4];
+
+ util_cpu_caps.cacheline = 32;
+
+ /* Get max cpuid level */
+ cpuid(0x00000000, regs);
+
+ if (regs[0] >= 0x00000001) {
+ unsigned int cacheline;
+ uint32_t regs2[4];
+
+ cpuid (0x00000001, regs2);
+
+ util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf;
+ /* Add "extended family". */
+ if (util_cpu_caps.x86_cpu_type == 0xf)
+ util_cpu_caps.x86_cpu_type += ((regs2[0] >> 20) & 0xff);
+
+ switch (util_cpu_caps.x86_cpu_type) {
+ case 0x17:
+ util_cpu_caps.family = CPU_AMD_ZEN1_ZEN2;
+ break;
+ case 0x18:
+ util_cpu_caps.family = CPU_AMD_ZEN_HYGON;
+ break;
+ case 0x19:
+ util_cpu_caps.family = CPU_AMD_ZEN3;
+ break;
+ default:
+ if (util_cpu_caps.x86_cpu_type > 0x19)
+ util_cpu_caps.family = CPU_AMD_ZEN_NEXT;
+ }
+
+ /* general feature flags */
+ util_cpu_caps.has_tsc = (regs2[3] >> 4) & 1; /* 0x0000010 */
+ util_cpu_caps.has_mmx = (regs2[3] >> 23) & 1; /* 0x0800000 */
+ util_cpu_caps.has_sse = (regs2[3] >> 25) & 1; /* 0x2000000 */
+ util_cpu_caps.has_sse2 = (regs2[3] >> 26) & 1; /* 0x4000000 */
+ util_cpu_caps.has_sse3 = (regs2[2] >> 0) & 1; /* 0x0000001 */
+ util_cpu_caps.has_ssse3 = (regs2[2] >> 9) & 1; /* 0x0000020 */
+ util_cpu_caps.has_sse4_1 = (regs2[2] >> 19) & 1;
+ util_cpu_caps.has_sse4_2 = (regs2[2] >> 20) & 1;
+ util_cpu_caps.has_popcnt = (regs2[2] >> 23) & 1;
+ util_cpu_caps.has_avx = ((regs2[2] >> 28) & 1) && // AVX
+ ((regs2[2] >> 27) & 1) && // OSXSAVE
+ ((xgetbv() & 6) == 6); // XMM & YMM
+ util_cpu_caps.has_f16c = ((regs2[2] >> 29) & 1) && util_cpu_caps.has_avx;
+ util_cpu_caps.has_fma = ((regs2[2] >> 12) & 1) && util_cpu_caps.has_avx;
+ util_cpu_caps.has_mmx2 = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */
+#if defined(PIPE_ARCH_X86_64)
+ util_cpu_caps.has_daz = 1;
+#else
+ util_cpu_caps.has_daz = util_cpu_caps.has_sse3 ||
+ (util_cpu_caps.has_sse2 && sse2_has_daz());
+#endif
+
+ cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
+ if (cacheline > 0)
+ util_cpu_caps.cacheline = cacheline;
+
+ // check for avx512
+ if (((regs2[2] >> 27) & 1) && // OSXSAVE
+ (xgetbv() & (0x7 << 5)) && // OPMASK: upper-256 enabled by OS
+ ((xgetbv() & 6) == 6)) { // XMM/YMM enabled by OS
+ uint32_t regs3[4];
+ cpuid_count(0x00000007, 0x00000000, regs3);
+ util_cpu_caps.has_avx512f = (regs3[1] >> 16) & 1;
+ util_cpu_caps.has_avx512dq = (regs3[1] >> 17) & 1;
+ util_cpu_caps.has_avx512ifma = (regs3[1] >> 21) & 1;
+ util_cpu_caps.has_avx512pf = (regs3[1] >> 26) & 1;
+ util_cpu_caps.has_avx512er = (regs3[1] >> 27) & 1;
+ util_cpu_caps.has_avx512cd = (regs3[1] >> 28) & 1;
+ util_cpu_caps.has_avx512bw = (regs3[1] >> 30) & 1;
+ util_cpu_caps.has_avx512vl = (regs3[1] >> 31) & 1;
+ util_cpu_caps.has_avx512vbmi = (regs3[2] >> 1) & 1;
+ }
+
+ }
+ if (util_cpu_caps.has_avx && regs[0] >= 0x00000007) {
+ uint32_t regs7[4];
+ cpuid_count(0x00000007, 0x00000000, regs7);
+ util_cpu_caps.has_avx2 = (regs7[1] >> 5) & 1;
+ }
+
+
+ if (regs[1] == 0x756e6547 && regs[2] == 0x6c65746e && regs[3] == 0x49656e69) {
+ /* GenuineIntel */
+ util_cpu_caps.has_intel = 1;
+ }
+
+ cpuid(0x80000000, regs);
+
+ if (regs[0] >= 0x80000001) {
+ uint32_t regs2[4];
+ cpuid(0x80000001, regs2);
+
+ util_cpu_caps.has_mmx |= (regs2[3] >> 23) & 1;
+ util_cpu_caps.has_mmx2 |= (regs2[3] >> 22) & 1;
+ util_cpu_caps.has_3dnow = (regs2[3] >> 31) & 1;
+ util_cpu_caps.has_3dnow_ext = (regs2[3] >> 30) & 1;
+
+ util_cpu_caps.has_xop = util_cpu_caps.has_avx &&
+ ((regs2[2] >> 11) & 1);
+ }
+
+ if (regs[0] >= 0x80000006) {
+ uint32_t regs2[4];
+ /* should we really do this if the clflush size above worked? */
+ unsigned int cacheline;
+ cpuid(0x80000006, regs2);
+ cacheline = regs2[2] & 0xFF;
+ if (cacheline > 0)
+ util_cpu_caps.cacheline = cacheline;
+ }
+
+ if (!util_cpu_caps.has_sse) {
+ util_cpu_caps.has_sse2 = 0;
+ util_cpu_caps.has_sse3 = 0;
+ util_cpu_caps.has_ssse3 = 0;
+ util_cpu_caps.has_sse4_1 = 0;
+ }
+ }
+#endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */
+
+#if defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
+ check_os_arm_support();
+#endif
+
+#if defined(PIPE_ARCH_PPC)
+ check_os_altivec_support();
+#endif /* PIPE_ARCH_PPC */
+
+#if defined(PIPE_ARCH_MIPS64)
+ check_os_mips64_support();
+#endif /* PIPE_ARCH_MIPS64 */
+
+ get_cpu_topology();
+
+ if (debug_get_option_dump_cpu()) {
+ printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus);
+
+ printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type);
+ printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline);
+
+ printf("util_cpu_caps.has_tsc = %u\n", util_cpu_caps.has_tsc);
+ printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx);
+ printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2);
+ printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse);
+ printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2);
+ printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3);
+ printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3);
+ printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1);
+ printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps.has_sse4_2);
+ printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps.has_avx);
+ printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps.has_avx2);
+ printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps.has_f16c);
+ printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps.has_popcnt);
+ printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow);
+ printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext);
+ printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps.has_xop);
+ printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec);
+ printf("util_cpu_caps.has_vsx = %u\n", util_cpu_caps.has_vsx);
+ printf("util_cpu_caps.has_neon = %u\n", util_cpu_caps.has_neon);
+ printf("util_cpu_caps.has_msa = %u\n", util_cpu_caps.has_msa);
+ printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps.has_daz);
+ printf("util_cpu_caps.has_avx512f = %u\n", util_cpu_caps.has_avx512f);
+ printf("util_cpu_caps.has_avx512dq = %u\n", util_cpu_caps.has_avx512dq);
+ printf("util_cpu_caps.has_avx512ifma = %u\n", util_cpu_caps.has_avx512ifma);
+ printf("util_cpu_caps.has_avx512pf = %u\n", util_cpu_caps.has_avx512pf);
+ printf("util_cpu_caps.has_avx512er = %u\n", util_cpu_caps.has_avx512er);
+ printf("util_cpu_caps.has_avx512cd = %u\n", util_cpu_caps.has_avx512cd);
+ printf("util_cpu_caps.has_avx512bw = %u\n", util_cpu_caps.has_avx512bw);
+ printf("util_cpu_caps.has_avx512vl = %u\n", util_cpu_caps.has_avx512vl);
+ printf("util_cpu_caps.has_avx512vbmi = %u\n", util_cpu_caps.has_avx512vbmi);
+ printf("util_cpu_caps.num_L3_caches = %u\n", util_cpu_caps.num_L3_caches);
+ printf("util_cpu_caps.num_cpu_mask_bits = %u\n", util_cpu_caps.num_cpu_mask_bits);
+ }
+}
+
+static once_flag cpu_once_flag = ONCE_FLAG_INIT;
+
+void
+util_cpu_detect(void)
+{
+ call_once(&cpu_once_flag, util_cpu_detect_once);
+}
diff --git a/src/gallium/auxiliary/util/u_cpu_detect.h b/src/mesa/util/u_cpu_detect.h
index 01f38963..59dd2304 100644
--- a/src/gallium/auxiliary/util/u_cpu_detect.h
+++ b/src/mesa/util/u_cpu_detect.h
@@ -36,17 +36,45 @@
#define _UTIL_CPU_DETECT_H
-#include "pipe/p_compiler.h"
#include "pipe/p_config.h"
+#include "util/u_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
+enum cpu_family {
+ CPU_UNKNOWN,
-struct util_cpu_caps {
- int nr_cpus;
+ CPU_AMD_ZEN1_ZEN2,
+ CPU_AMD_ZEN_HYGON,
+ CPU_AMD_ZEN3,
+ CPU_AMD_ZEN_NEXT,
+ CPU_AMD_LAST,
+};
+
+typedef uint32_t util_affinity_mask[UTIL_MAX_CPUS / 32];
+
+struct util_cpu_caps_t {
+ /**
+ * Number of CPUs available to the process.
+ *
+ * This will be less than or equal to \c max_cpus. This is the number of
+ * CPUs that are online and available to the process.
+ */
+ int16_t nr_cpus;
+
+ /**
+ * Maximum number of CPUs that can be online in the system.
+ *
+ * This will be greater than or equal to \c nr_cpus. This is the number of
+ * CPUs installed in the system. \c nr_cpus will be less if some CPUs are
+ * offline.
+ */
+ int16_t max_cpus;
+
+ enum cpu_family family;
/* Feature flags */
int x86_cpu_type;
@@ -66,15 +94,48 @@ struct util_cpu_caps {
unsigned has_avx:1;
unsigned has_avx2:1;
unsigned has_f16c:1;
+ unsigned has_fma:1;
unsigned has_3dnow:1;
unsigned has_3dnow_ext:1;
unsigned has_xop:1;
unsigned has_altivec:1;
+ unsigned has_vsx:1;
unsigned has_daz:1;
+ unsigned has_neon:1;
+ unsigned has_msa:1;
+
+ unsigned has_avx512f:1;
+ unsigned has_avx512dq:1;
+ unsigned has_avx512ifma:1;
+ unsigned has_avx512pf:1;
+ unsigned has_avx512er:1;
+ unsigned has_avx512cd:1;
+ unsigned has_avx512bw:1;
+ unsigned has_avx512vl:1;
+ unsigned has_avx512vbmi:1;
+
+ unsigned num_L3_caches;
+ unsigned num_cpu_mask_bits;
+
+ uint16_t cpu_to_L3[UTIL_MAX_CPUS];
+ /* Affinity masks for each L3 cache. */
+ util_affinity_mask *L3_affinity_mask;
};
-extern struct util_cpu_caps
-util_cpu_caps;
+#define U_CPU_INVALID_L3 0xffff
+
+static inline const struct util_cpu_caps_t *
+util_get_cpu_caps(void)
+{
+ extern struct util_cpu_caps_t util_cpu_caps;
+
+ /* If you hit this assert, it means that something is using the
+ * cpu-caps without having first called util_cpu_detect()
+ */
+ assert(util_cpu_caps.nr_cpus >= 1);
+
+ return &util_cpu_caps;
+}
void util_cpu_detect(void);
diff --git a/src/gallium/auxiliary/util/u_debug.c b/src/mesa/util/u_debug.c
index f861168b..d8f17ddb 100644
--- a/src/gallium/auxiliary/util/u_debug.c
+++ b/src/mesa/util/u_debug.c
@@ -1,9 +1,9 @@
/**************************************************************************
- *
+ *
* Copyright 2008 VMware, Inc.
* Copyright (c) 2008 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -11,11 +11,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@@ -23,23 +23,17 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
-#include "pipe/p_config.h"
-
+/* (virglrenderer) removed includes for p_format.h and p_state.h */
#include "pipe/p_compiler.h"
-#include "util/u_debug.h"
-#include "pipe/p_format.h"
-#include "pipe/p_state.h"
-#include "util/u_inlines.h"
-#include "util/u_format.h"
-#include "util/u_memory.h"
-#include "util/u_string.h"
-#include "util/u_math.h"
-#include "util/u_prim.h"
-#include "util/u_surface.h"
+#include "pipe/p_config.h"
+
+#include "util/u_debug.h"
+#include "util/u_string.h"
+#include <inttypes.h>
#include <stdio.h>
#include <limits.h> /* CHAR_BIT */
@@ -51,24 +45,28 @@
#endif
-void _debug_vprintf(const char *format, va_list ap)
+void
+_debug_vprintf(const char *format, va_list ap)
{
static char buf[4096] = {'\0'};
-#if defined(PIPE_OS_WINDOWS) || defined(PIPE_SUBSYSTEM_EMBEDDED)
+#if DETECT_OS_WINDOWS || defined(EMBEDDED_DEVICE)
/* We buffer until we find a newline. */
size_t len = strlen(buf);
- int ret = util_vsnprintf(buf + len, sizeof(buf) - len, format, ap);
- if(ret > (int)(sizeof(buf) - len - 1) || util_strchr(buf + len, '\n')) {
+ int ret = vsnprintf(buf + len, sizeof(buf) - len, format, ap);
+ if (ret > (int)(sizeof(buf) - len - 1) || strchr(buf + len, '\n')) {
os_log_message(buf);
buf[0] = '\0';
}
#else
- util_vsnprintf(buf, sizeof(buf), format, ap);
+ vsnprintf(buf, sizeof(buf), format, ap);
os_log_message(buf);
#endif
}
+/* (virglrenderer) removed _pipe_debug_message */
+
+
void
debug_disable_error_message_boxes(void)
{
@@ -96,9 +94,8 @@ debug_disable_error_message_boxes(void)
#ifdef DEBUG
-void debug_print_blob( const char *name,
- const void *blob,
- unsigned size )
+void
+debug_print_blob(const char *name, const void *blob, unsigned size)
{
const unsigned *ublob = (const unsigned *)blob;
unsigned i;
@@ -113,11 +110,11 @@ void debug_print_blob( const char *name,
#endif
-static boolean
+static bool
debug_get_option_should_print(void)
{
- static boolean first = TRUE;
- static boolean value = FALSE;
+ static bool first = true;
+ static bool value = false;
if (!first)
return value;
@@ -125,55 +122,59 @@ debug_get_option_should_print(void)
/* Oh hey this will call into this function,
* but its cool since we set first to false
*/
- first = FALSE;
- value = debug_get_bool_option("GALLIUM_PRINT_OPTIONS", FALSE);
+ first = false;
+ value = debug_get_bool_option("GALLIUM_PRINT_OPTIONS", false);
/* XXX should we print this option? Currently it wont */
return value;
}
+
const char *
debug_get_option(const char *name, const char *dfault)
{
const char *result;
result = os_get_option(name);
- if(!result)
+ if (!result)
result = dfault;
if (debug_get_option_should_print())
- debug_printf("%s: %s = %s\n", __FUNCTION__, name, result ? result : "(null)");
-
+ debug_printf("%s: %s = %s\n", __FUNCTION__, name,
+ result ? result : "(null)");
+
return result;
}
-boolean
-debug_get_bool_option(const char *name, boolean dfault)
+
+bool
+debug_get_bool_option(const char *name, bool dfault)
{
const char *str = os_get_option(name);
- boolean result;
-
- if(str == NULL)
+ bool result;
+
+ if (str == NULL)
result = dfault;
- else if(!util_strcmp(str, "n"))
- result = FALSE;
- else if(!util_strcmp(str, "no"))
- result = FALSE;
- else if(!util_strcmp(str, "0"))
- result = FALSE;
- else if(!util_strcmp(str, "f"))
- result = FALSE;
- else if(!util_strcmp(str, "F"))
- result = FALSE;
- else if(!util_strcmp(str, "false"))
- result = FALSE;
- else if(!util_strcmp(str, "FALSE"))
- result = FALSE;
+ else if (!strcmp(str, "n"))
+ result = false;
+ else if (!strcmp(str, "no"))
+ result = false;
+ else if (!strcmp(str, "0"))
+ result = false;
+ else if (!strcmp(str, "f"))
+ result = false;
+ else if (!strcmp(str, "F"))
+ result = false;
+ else if (!strcmp(str, "false"))
+ result = false;
+ else if (!strcmp(str, "FALSE"))
+ result = false;
else
- result = TRUE;
+ result = true;
if (debug_get_option_should_print())
- debug_printf("%s: %s = %s\n", __FUNCTION__, name, result ? "TRUE" : "FALSE");
-
+ debug_printf("%s: %s = %s\n", __FUNCTION__, name,
+ result ? "TRUE" : "FALSE");
+
return result;
}
@@ -183,27 +184,18 @@ debug_get_num_option(const char *name, long dfault)
{
long result;
const char *str;
-
+
str = os_get_option(name);
- if(!str)
+ if (!str) {
result = dfault;
- else {
- long sign;
- char c;
- c = *str++;
- if(c == '-') {
- sign = -1;
- c = *str++;
- }
- else {
- sign = 1;
- }
- result = 0;
- while('0' <= c && c <= '9') {
- result = result*10 + (c - '0');
- c = *str++;
+ } else {
+ char *endptr;
+
+ result = strtol(str, &endptr, 0);
+ if (str == endptr) {
+ /* Restore the default value when no digits were found. */
+ result = dfault;
}
- result *= sign;
}
if (debug_get_option_should_print())
@@ -212,16 +204,42 @@ debug_get_num_option(const char *name, long dfault)
return result;
}
-static boolean str_has_option(const char *str, const char *name)
+void
+debug_get_version_option(const char *name, unsigned *major, unsigned *minor)
+{
+ const char *str;
+
+ str = os_get_option(name);
+ if (str) {
+ unsigned v_maj, v_min;
+ int n;
+
+ n = sscanf(str, "%u.%u", &v_maj, &v_min);
+ if (n != 2) {
+ debug_printf("Illegal version specified for %s : %s\n", name, str);
+ return;
+ }
+ *major = v_maj;
+ *minor = v_min;
+ }
+
+ if (debug_get_option_should_print())
+ debug_printf("%s: %s = %u.%u\n", __FUNCTION__, name, *major, *minor);
+
+ return;
+}
+
+static bool
+str_has_option(const char *str, const char *name)
{
/* Empty string. */
if (!*str) {
- return FALSE;
+ return false;
}
/* OPTION=all */
- if (!util_strcmp(str, "all")) {
- return TRUE;
+ if (!strcmp(str, "all")) {
+ return true;
}
/* Find 'name' in 'str' surrounded by non-alphanumeric characters. */
@@ -238,11 +256,11 @@ static boolean str_has_option(const char *str, const char *name)
if (!*str || !(isalnum(*str) || *str == '_')) {
if (str-start == name_len &&
!memcmp(start, name, name_len)) {
- return TRUE;
+ return true;
}
if (!*str) {
- return FALSE;
+ return false;
}
start = str+1;
@@ -252,35 +270,36 @@ static boolean str_has_option(const char *str, const char *name)
}
}
- return FALSE;
+ return false;
}
-unsigned long
-debug_get_flags_option(const char *name,
+
+uint64_t
+debug_get_flags_option(const char *name,
const struct debug_named_value *flags,
- unsigned long dfault)
+ uint64_t dfault)
{
- unsigned long result;
+ uint64_t result;
const char *str;
const struct debug_named_value *orig = flags;
unsigned namealign = 0;
-
+
str = os_get_option(name);
- if(!str)
+ if (!str)
result = dfault;
- else if (!util_strcmp(str, "help")) {
+ else if (!strcmp(str, "help")) {
result = dfault;
_debug_printf("%s: help for %s:\n", __FUNCTION__, name);
for (; flags->name; ++flags)
namealign = MAX2(namealign, strlen(flags->name));
for (flags = orig; flags->name; ++flags)
- _debug_printf("| %*s [0x%0*lx]%s%s\n", namealign, flags->name,
- (int)sizeof(unsigned long)*CHAR_BIT/4, flags->value,
+ _debug_printf("| %*s [0x%0*"PRIx64"]%s%s\n", namealign, flags->name,
+ (int)sizeof(uint64_t)*CHAR_BIT/4, flags->value,
flags->desc ? " " : "", flags->desc ? flags->desc : "");
}
else {
result = 0;
- while( flags->name ) {
+ while (flags->name) {
if (str_has_option(str, flags->name))
result |= flags->value;
++flags;
@@ -289,9 +308,10 @@ debug_get_flags_option(const char *name,
if (debug_get_option_should_print()) {
if (str) {
- debug_printf("%s: %s = 0x%lx (%s)\n", __FUNCTION__, name, result, str);
+ debug_printf("%s: %s = 0x%"PRIx64" (%s)\n",
+ __FUNCTION__, name, result, str);
} else {
- debug_printf("%s: %s = 0x%lx\n", __FUNCTION__, name, result);
+ debug_printf("%s: %s = 0x%"PRIx64"\n", __FUNCTION__, name, result);
}
}
@@ -299,42 +319,42 @@ debug_get_flags_option(const char *name,
}
-void _debug_assert_fail(const char *expr,
- const char *file,
- unsigned line,
- const char *function)
+void
+_debug_assert_fail(const char *expr, const char *file, unsigned line,
+ const char *function)
{
- _debug_printf("%s:%u:%s: Assertion `%s' failed.\n", file, line, function, expr);
+ _debug_printf("%s:%u:%s: Assertion `%s' failed.\n",
+ file, line, function, expr);
os_abort();
}
const char *
-debug_dump_enum(const struct debug_named_value *names,
+debug_dump_enum(const struct debug_named_value *names,
unsigned long value)
{
static char rest[64];
-
- while(names->name) {
- if(names->value == value)
+
+ while (names->name) {
+ if (names->value == value)
return names->name;
++names;
}
- util_snprintf(rest, sizeof(rest), "0x%08lx", value);
+ snprintf(rest, sizeof(rest), "0x%08lx", value);
return rest;
}
const char *
-debug_dump_enum_noprefix(const struct debug_named_value *names,
+debug_dump_enum_noprefix(const struct debug_named_value *names,
const char *prefix,
unsigned long value)
{
static char rest[64];
-
- while(names->name) {
- if(names->value == value) {
+
+ while (names->name) {
+ if (names->value == value) {
const char *name = names->name;
while (*name == *prefix) {
name++;
@@ -345,16 +365,13 @@ debug_dump_enum_noprefix(const struct debug_named_value *names,
++names;
}
-
-
- util_snprintf(rest, sizeof(rest), "0x%08lx", value);
+ snprintf(rest, sizeof(rest), "0x%08lx", value);
return rest;
}
const char *
-debug_dump_flags(const struct debug_named_value *names,
- unsigned long value)
+debug_dump_flags(const struct debug_named_value *names, unsigned long value)
{
static char output[4096];
static char rest[256];
@@ -362,70 +379,34 @@ debug_dump_flags(const struct debug_named_value *names,
output[0] = '\0';
- while(names->name) {
- if((names->value & value) == names->value) {
+ while (names->name) {
+ if ((names->value & value) == names->value) {
if (!first)
- util_strncat(output, "|", sizeof(output) - strlen(output) - 1);
+ strncat(output, "|", sizeof(output) - strlen(output) - 1);
else
first = 0;
- util_strncat(output, names->name, sizeof(output) - strlen(output) - 1);
+ strncat(output, names->name, sizeof(output) - strlen(output) - 1);
output[sizeof(output) - 1] = '\0';
value &= ~names->value;
}
++names;
}
-
+
if (value) {
if (!first)
- util_strncat(output, "|", sizeof(output) - strlen(output) - 1);
+ strncat(output, "|", sizeof(output) - strlen(output) - 1);
else
first = 0;
-
- util_snprintf(rest, sizeof(rest), "0x%08lx", value);
- util_strncat(output, rest, sizeof(output) - strlen(output) - 1);
+
+ snprintf(rest, sizeof(rest), "0x%08lx", value);
+ strncat(output, rest, sizeof(output) - strlen(output) - 1);
output[sizeof(output) - 1] = '\0';
}
-
- if(first)
- return "0";
-
- return output;
-}
-
-
-#ifdef DEBUG
-void debug_print_format(const char *msg, unsigned fmt )
-{
- debug_printf("%s: %s\n", msg, util_format_name(fmt));
-}
-#endif
-
-
-
-static const struct debug_named_value pipe_prim_names[] = {
-#ifdef DEBUG
- DEBUG_NAMED_VALUE(PIPE_PRIM_POINTS),
- DEBUG_NAMED_VALUE(PIPE_PRIM_LINES),
- DEBUG_NAMED_VALUE(PIPE_PRIM_LINE_LOOP),
- DEBUG_NAMED_VALUE(PIPE_PRIM_LINE_STRIP),
- DEBUG_NAMED_VALUE(PIPE_PRIM_TRIANGLES),
- DEBUG_NAMED_VALUE(PIPE_PRIM_TRIANGLE_STRIP),
- DEBUG_NAMED_VALUE(PIPE_PRIM_TRIANGLE_FAN),
- DEBUG_NAMED_VALUE(PIPE_PRIM_QUADS),
- DEBUG_NAMED_VALUE(PIPE_PRIM_QUAD_STRIP),
- DEBUG_NAMED_VALUE(PIPE_PRIM_POLYGON),
- DEBUG_NAMED_VALUE(PIPE_PRIM_LINES_ADJACENCY),
- DEBUG_NAMED_VALUE(PIPE_PRIM_LINE_STRIP_ADJACENCY),
- DEBUG_NAMED_VALUE(PIPE_PRIM_TRIANGLES_ADJACENCY),
- DEBUG_NAMED_VALUE(PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY),
-#endif
- DEBUG_NAMED_VALUE_END
-};
+ if (first)
+ return "0";
-const char *u_prim_name( unsigned prim )
-{
- return debug_dump_enum(pipe_prim_names, prim);
+ return output;
}
@@ -469,48 +450,3 @@ debug_funclog_enter_exit(const char* f, UNUSED const int line,
debug_printf("%s\n", f);
}
#endif
-
-
-
-#ifdef DEBUG
-/**
- * Print PIPE_TRANSFER_x flags with a message.
- */
-void
-debug_print_transfer_flags(const char *msg, unsigned usage)
-{
-#define FLAG(x) { x, #x }
- static const struct {
- unsigned bit;
- const char *name;
- } flags[] = {
- FLAG(PIPE_TRANSFER_READ),
- FLAG(PIPE_TRANSFER_WRITE),
- FLAG(PIPE_TRANSFER_MAP_DIRECTLY),
- FLAG(PIPE_TRANSFER_DISCARD_RANGE),
- FLAG(PIPE_TRANSFER_DONTBLOCK),
- FLAG(PIPE_TRANSFER_UNSYNCHRONIZED),
- FLAG(PIPE_TRANSFER_FLUSH_EXPLICIT),
- FLAG(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
- };
- unsigned i;
-
- debug_printf("%s ", msg);
-
- for (i = 0; i < ARRAY_SIZE(flags); i++) {
- if (usage & flags[i].bit) {
- debug_printf("%s", flags[i].name);
- usage &= ~flags[i].bit;
- if (usage) {
- debug_printf(" | ");
- }
- }
- }
-
- debug_printf("\n");
-#undef FLAG
-}
-
-
-
-#endif
diff --git a/src/gallium/auxiliary/util/u_debug.h b/src/mesa/util/u_debug.h
index 26fb6bb5..5172b940 100644
--- a/src/gallium/auxiliary/util/u_debug.h
+++ b/src/mesa/util/u_debug.h
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright 2008 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,11 +10,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@@ -22,41 +22,47 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
/**
* @file
* Cross-platform debugging helpers.
- *
- * For now it just has assert and printf replacements, but it might be extended
- * with stack trace reports and more advanced logging in the near future.
- *
+ *
+ * For now it just has assert and printf replacements, but it might be extended
+ * with stack trace reports and more advanced logging in the near future.
+ *
* @author Jose Fonseca <jfonseca@vmware.com>
*/
#ifndef U_DEBUG_H_
#define U_DEBUG_H_
+#include <stdarg.h>
+#include <string.h>
+#if !defined(_WIN32)
+#include <sys/types.h>
+#include <unistd.h>
+#endif
-#include "os/os_misc.h"
-
-#include "pipe/p_format.h"
+#include "util/os_misc.h"
+#include "util/detect_os.h"
+#include "util/macros.h"
+#if DETECT_OS_HAIKU
+/* Haiku provides debug_printf in libroot with OS.h */
+#include <OS.h>
+#endif
#ifdef __cplusplus
extern "C" {
#endif
-#if defined(__GNUC__)
-#define _util_printf_format(fmt, list) __attribute__ ((format (printf, fmt, list)))
-#else
-#define _util_printf_format(fmt, list)
-#endif
+#define _util_printf_format(fmt, list) PRINTFLIKE(fmt, list)
void _debug_vprintf(const char *format, va_list ap);
-
+
static inline void
_debug_printf(const char *format, ...)
@@ -77,7 +83,7 @@ _debug_printf(const char *format, ...)
* - avoid outputing large strings (512 bytes is the current maximum length
* that is guaranteed to be printed in all platforms)
*/
-#if !defined(PIPE_OS_HAIKU)
+#if !DETECT_OS_HAIKU
static inline void
debug_printf(const char *format, ...) _util_printf_format(1,2);
@@ -93,9 +99,6 @@ debug_printf(const char *format, ...)
(void) format; /* silence warning */
#endif
}
-#else /* is Haiku */
-/* Haiku provides debug_printf in libroot with OS.h */
-#include <OS.h>
#endif
@@ -107,9 +110,9 @@ debug_printf(const char *format, ...)
*/
#define debug_printf_once(args) \
do { \
- static boolean once = TRUE; \
+ static bool once = true; \
if (once) { \
- once = FALSE; \
+ once = false; \
debug_printf args; \
} \
} while (0)
@@ -128,13 +131,8 @@ debug_printf(const char *format, ...)
* messages.
*/
void debug_print_blob( const char *name, const void *blob, unsigned size );
-
-/* Print a message along with a prettified format string
- */
-void debug_print_format(const char *msg, unsigned fmt );
#else
#define debug_print_blob(_name, _blob, _size) ((void)0)
-#define debug_print_format(_msg, _fmt) ((void)0)
#endif
@@ -157,12 +155,18 @@ debug_disable_error_message_boxes(void);
#endif /* !DEBUG */
+long
+debug_get_num_option(const char *name, long dfault);
+
+void
+debug_get_version_option(const char *name, unsigned *major, unsigned *minor);
+
#ifdef _MSC_VER
__declspec(noreturn)
#endif
-void _debug_assert_fail(const char *expr,
- const char *file,
- unsigned line,
+void _debug_assert_fail(const char *expr,
+ const char *file,
+ unsigned line,
const char *function)
#if defined(__GNUC__) && !defined(DEBUG)
__attribute__((noreturn))
@@ -170,16 +174,16 @@ void _debug_assert_fail(const char *expr,
;
-/**
+/**
* Assert macro
- *
- * Do not expect that the assert call terminates -- errors must be handled
+ *
+ * Do not expect that the assert call terminates -- errors must be handled
* regardless of assert behavior.
*
* For non debug builds the assert macro will expand to a no-op, so do not
* call functions with side effects in the assert expression.
*/
-#ifdef DEBUG
+#ifndef NDEBUG
#define debug_assert(expr) ((expr) ? (void)0 : _debug_assert_fail(#expr, __FILE__, __LINE__, __FUNCTION__))
#else
#define debug_assert(expr) (void)(0 && (expr))
@@ -201,7 +205,7 @@ void _debug_assert_fail(const char *expr,
_debug_printf("%s\n", __FUNCTION__)
#else
#define debug_checkpoint() \
- ((void)0)
+ ((void)0)
#endif
@@ -213,7 +217,7 @@ void _debug_assert_fail(const char *expr,
_debug_printf("%s:%u:%s\n", __FILE__, __LINE__, __FUNCTION__)
#else
#define debug_checkpoint_full() \
- ((void)0)
+ ((void)0)
#endif
@@ -225,7 +229,7 @@ void _debug_assert_fail(const char *expr,
_debug_printf("%s:%u:%s: warning: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
#else
#define debug_warning(__msg) \
- ((void)0)
+ ((void)0)
#endif
@@ -235,16 +239,16 @@ void _debug_assert_fail(const char *expr,
#ifdef DEBUG
#define debug_warn_once(__msg) \
do { \
- static bool warned = FALSE; \
+ static bool warned = false; \
if (!warned) { \
_debug_printf("%s:%u:%s: one time warning: %s\n", \
__FILE__, __LINE__, __FUNCTION__, __msg); \
- warned = TRUE; \
+ warned = true; \
} \
} while (0)
#else
#define debug_warn_once(__msg) \
- ((void)0)
+ ((void)0)
#endif
@@ -253,12 +257,17 @@ void _debug_assert_fail(const char *expr,
*/
#ifdef DEBUG
#define debug_error(__msg) \
- _debug_printf("%s:%u:%s: error: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
+ _debug_printf("%s:%u:%s: error: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
#else
#define debug_error(__msg) \
_debug_printf("error: %s\n", __msg)
#endif
+/**
+ * Output a debug log message to the debug info callback.
+ * (virglrenderer) Removed.
+ */
+
/**
* Used by debug_dump_enum and debug_dump_flags to describe symbols.
@@ -266,14 +275,14 @@ void _debug_assert_fail(const char *expr,
struct debug_named_value
{
const char *name;
- unsigned long value;
+ uint64_t value;
const char *desc;
};
/**
* Some C pre-processor magic to simplify creating named values.
- *
+ *
* Example:
* @code
* static const debug_named_value my_names[] = {
@@ -282,9 +291,9 @@ struct debug_named_value
* DEBUG_NAMED_VALUE(MY_ENUM_VALUE_Z),
* DEBUG_NAMED_VALUE_END
* };
- *
+ *
* ...
- * debug_printf("%s = %s\n",
+ * debug_printf("%s = %s\n",
* name,
* debug_dump_enum(my_names, my_value));
* ...
@@ -299,11 +308,11 @@ struct debug_named_value
* Convert a enum value to a string.
*/
const char *
-debug_dump_enum(const struct debug_named_value *names,
+debug_dump_enum(const struct debug_named_value *names,
unsigned long value);
const char *
-debug_dump_enum_noprefix(const struct debug_named_value *names,
+debug_dump_enum_noprefix(const struct debug_named_value *names,
const char *prefix,
unsigned long value);
@@ -312,7 +321,7 @@ debug_dump_enum_noprefix(const struct debug_named_value *names,
* Convert binary flags value to a string.
*/
const char *
-debug_dump_flags(const struct debug_named_value *names,
+debug_dump_flags(const struct debug_named_value *names,
unsigned long value);
@@ -355,38 +364,84 @@ void debug_funclog_enter_exit(const char* f, const int line, const char* file);
/**
* Get option.
- *
- * It is an alias for getenv on Linux.
- *
- * On Windows it reads C:\gallium.cfg, which is a text file with CR+LF line
+ *
+ * It is an alias for getenv on Linux.
+ *
+ * On Windows it reads C:\gallium.cfg, which is a text file with CR+LF line
* endings with one option per line as
- *
+ *
* NAME=value
- *
+ *
* This file must be terminated with an extra empty line.
*/
const char *
debug_get_option(const char *name, const char *dfault);
-boolean
-debug_get_bool_option(const char *name, boolean dfault);
+bool
+debug_get_bool_option(const char *name, bool dfault);
long
debug_get_num_option(const char *name, long dfault);
-unsigned long
-debug_get_flags_option(const char *name,
+uint64_t
+debug_get_flags_option(const char *name,
const struct debug_named_value *flags,
- unsigned long dfault);
+ uint64_t dfault);
+
+#define DEBUG_GET_ONCE_OPTION(suffix, name, dfault) \
+static const char * \
+debug_get_option_ ## suffix (void) \
+{ \
+ static bool first = true; \
+ static const char * value; \
+ if (first) { \
+ first = false; \
+ value = debug_get_option(name, dfault); \
+ } \
+ return value; \
+}
+
+static inline bool
+__check_suid(void)
+{
+#if !defined(_WIN32)
+ if (geteuid() != getuid())
+ return true;
+#endif
+ return false;
+}
+
+/**
+ * Define a getter for a debug option which specifies a 'FILE *'
+ * to open, with additional checks for suid executables. Note
+ * that if the return is not NULL, the caller owns the 'FILE *'
+ * reference.
+ */
+#define DEBUG_GET_ONCE_FILE_OPTION(suffix, name, dfault, mode) \
+static FILE * \
+debug_get_option_ ## suffix (void) \
+{ \
+ static bool first = true; \
+ static const char * value; \
+ if (__check_suid()) \
+ return NULL; \
+ if (first) { \
+ first = false; \
+ value = debug_get_option(name, dfault); \
+ } \
+ if (!value) \
+ return NULL; \
+ return fopen(value, mode); \
+}
#define DEBUG_GET_ONCE_BOOL_OPTION(sufix, name, dfault) \
-static boolean \
+static bool \
debug_get_option_ ## sufix (void) \
{ \
- static boolean first = TRUE; \
- static boolean value; \
+ static bool first = true; \
+ static bool value; \
if (first) { \
- first = FALSE; \
+ first = false; \
value = debug_get_bool_option(name, dfault); \
} \
return value; \
@@ -396,10 +451,10 @@ debug_get_option_ ## sufix (void) \
static long \
debug_get_option_ ## sufix (void) \
{ \
- static boolean first = TRUE; \
+ static bool first = true; \
static long value; \
if (first) { \
- first = FALSE; \
+ first = false; \
value = debug_get_num_option(name, dfault); \
} \
return value; \
@@ -409,50 +464,16 @@ debug_get_option_ ## sufix (void) \
static unsigned long \
debug_get_option_ ## sufix (void) \
{ \
- static boolean first = TRUE; \
+ static bool first = true; \
static unsigned long value; \
if (first) { \
- first = FALSE; \
+ first = false; \
value = debug_get_flags_option(name, flags, dfault); \
} \
return value; \
}
-unsigned long
-debug_memory_begin(void);
-
-void
-debug_memory_end(unsigned long beginning);
-
-
-#ifdef DEBUG
-struct pipe_context;
-struct pipe_surface;
-struct pipe_transfer;
-struct pipe_resource;
-
-void debug_dump_image(const char *prefix,
- enum pipe_format format, unsigned cpp,
- unsigned width, unsigned height,
- unsigned stride,
- const void *data);
-void debug_dump_surface(struct pipe_context *pipe,
- const char *prefix,
- struct pipe_surface *surface);
-void debug_dump_texture(struct pipe_context *pipe,
- const char *prefix,
- struct pipe_resource *texture);
-#else
-#define debug_dump_image(prefix, format, cpp, width, height, stride, data) ((void)0)
-#define debug_dump_surface(pipe, prefix, surface) ((void)0)
-#endif
-
-
-void
-debug_print_transfer_flags(const char *msg, unsigned usage);
-
-
#ifdef __cplusplus
}
#endif
diff --git a/src/gallium/auxiliary/os/os_memory_aligned.h b/src/mesa/util/u_endian.h
index 90a5609d..cb309545 100644
--- a/src/gallium/auxiliary/os/os_memory_aligned.h
+++ b/src/mesa/util/u_endian.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2008-2010 VMware, Inc.
+ * Copyright 2007-2008 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,49 +24,15 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#ifndef U_ENDIAN_H
+#define U_ENDIAN_H
+/* (virglrenderer) Detected by meson */
-/*
- * Memory alignment wrappers.
- */
-
-
-#ifndef _OS_MEMORY_H_
-#error "Must not be included directly. Include os_memory.h instead"
+#if !defined(UTIL_ARCH_LITTLE_ENDIAN) || !defined(UTIL_ARCH_BIG_ENDIAN)
+# error "UTIL_ARCH_LITTLE_ENDIAN and/or UTIL_ARCH_BIG_ENDIAN were unset."
+#elif UTIL_ARCH_LITTLE_ENDIAN == UTIL_ARCH_BIG_ENDIAN
+# error "UTIL_ARCH_LITTLE_ENDIAN and UTIL_ARCH_BIG_ENDIAN must not both be 1 or 0."
#endif
-
-#include "pipe/p_compiler.h"
-
-
-/**
- * Return memory on given byte alignment
- */
-static inline void *
-os_malloc_aligned(size_t size, size_t alignment)
-{
- char *ptr, *buf;
-
- ptr = (char *) os_malloc(size + alignment + sizeof(void *));
- if (!ptr)
- return NULL;
-
- buf = (char *)(((uintptr_t)ptr + sizeof(void *) + alignment - 1) & ~((uintptr_t)(alignment - 1)));
- *(char **)(buf - sizeof(void *)) = ptr;
-
- return buf;
-}
-
-
-/**
- * Free memory returned by align_malloc().
- */
-static inline void
-os_free_aligned(void *ptr)
-{
- if (ptr) {
- void **cubbyHole = (void **) ((char *) ptr - sizeof(void *));
- void *realAddr = *cubbyHole;
- os_free(realAddr);
- }
-}
+#endif
diff --git a/src/mesa/util/u_math.c b/src/mesa/util/u_math.c
new file mode 100644
index 00000000..7913285a
--- /dev/null
+++ b/src/mesa/util/u_math.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#include "pipe/p_config.h"
+#include "util/u_math.h"
+#include "util/u_cpu_detect.h"
+
+#if defined(PIPE_ARCH_SSE)
+#include <xmmintrin.h>
+/* This is defined in pmmintrin.h, but it can only be included when -msse3 is
+ * used, so just define it here to avoid further. */
+#ifndef _MM_DENORMALS_ZERO_MASK
+#define _MM_DENORMALS_ZERO_MASK 0x0040
+#endif
+#endif
+
+
+/** log2(x), for x in [1.0, 2.0) */
+float log2_table[LOG2_TABLE_SIZE];
+
+
+static void
+init_log2_table(void)
+{
+ unsigned i;
+ for (i = 0; i < LOG2_TABLE_SIZE; i++)
+ log2_table[i] = (float) log2(1.0 + i * (1.0 / LOG2_TABLE_SCALE));
+}
+
+
+/**
+ * One time init for math utilities.
+ */
+void
+util_init_math(void)
+{
+ static bool initialized = false;
+ if (!initialized) {
+ init_log2_table();
+ initialized = true;
+ }
+}
+
+/**
+ * Fetches the contents of the fpstate (mxcsr on x86) register.
+ *
+ * On platforms without support for it just returns 0.
+ */
+unsigned
+util_fpstate_get(void)
+{
+ unsigned mxcsr = 0;
+
+#if defined(PIPE_ARCH_SSE)
+ if (util_get_cpu_caps()->has_sse) {
+ mxcsr = _mm_getcsr();
+ }
+#endif
+
+ return mxcsr;
+}
+
+/**
+ * Make sure that the fp treats the denormalized floating
+ * point numbers as zero.
+ *
+ * This is the behavior required by D3D10. OpenGL doesn't care.
+ */
+unsigned
+util_fpstate_set_denorms_to_zero(unsigned current_mxcsr)
+{
+#if defined(PIPE_ARCH_SSE)
+ if (util_get_cpu_caps()->has_sse) {
+ /* Enable flush to zero mode */
+ current_mxcsr |= _MM_FLUSH_ZERO_MASK;
+ if (util_get_cpu_caps()->has_daz) {
+ /* Enable denormals are zero mode */
+ current_mxcsr |= _MM_DENORMALS_ZERO_MASK;
+ }
+ util_fpstate_set(current_mxcsr);
+ }
+#endif
+ return current_mxcsr;
+}
+
+/**
+ * Set the state of the fpstate (mxcsr on x86) register.
+ *
+ * On platforms without support for it's a noop.
+ */
+void
+util_fpstate_set(unsigned mxcsr)
+{
+#if defined(PIPE_ARCH_SSE)
+ if (util_get_cpu_caps()->has_sse) {
+ _mm_setcsr(mxcsr);
+ }
+#endif
+}
+
+/**
+ * Compute inverse of 4x4 matrix.
+ *
+ * \return false if the source matrix is singular.
+ *
+ * \author
+ * Code contributed by Jacques Leroy jle@star.be
+ *
+ * Calculates the inverse matrix by performing the gaussian matrix reduction
+ * with partial pivoting followed by back/substitution with the loops manually
+ * unrolled.
+ */
+bool
+util_invert_mat4x4(float *out, const float *m)
+{
+ float wtmp[4][8];
+ float m0, m1, m2, m3, s;
+ float *r0, *r1, *r2, *r3;
+
+#define MAT(m, r, c) (m)[(c)*4 + (r)]
+#define SWAP_ROWS(a, b) \
+ { \
+ float *_tmp = a; \
+ (a) = (b); \
+ (b) = _tmp; \
+ }
+
+ r0 = wtmp[0], r1 = wtmp[1], r2 = wtmp[2], r3 = wtmp[3];
+
+ r0[0] = MAT(m, 0, 0), r0[1] = MAT(m, 0, 1), r0[2] = MAT(m, 0, 2), r0[3] = MAT(m, 0, 3),
+ r0[4] = 1.0, r0[5] = r0[6] = r0[7] = 0.0,
+
+ r1[0] = MAT(m, 1, 0), r1[1] = MAT(m, 1, 1), r1[2] = MAT(m, 1, 2), r1[3] = MAT(m, 1, 3),
+ r1[5] = 1.0, r1[4] = r1[6] = r1[7] = 0.0,
+
+ r2[0] = MAT(m, 2, 0), r2[1] = MAT(m, 2, 1), r2[2] = MAT(m, 2, 2), r2[3] = MAT(m, 2, 3),
+ r2[6] = 1.0, r2[4] = r2[5] = r2[7] = 0.0,
+
+ r3[0] = MAT(m, 3, 0), r3[1] = MAT(m, 3, 1), r3[2] = MAT(m, 3, 2), r3[3] = MAT(m, 3, 3),
+ r3[7] = 1.0, r3[4] = r3[5] = r3[6] = 0.0;
+
+ /* choose pivot - or die */
+ if (fabsf(r3[0]) > fabsf(r2[0]))
+ SWAP_ROWS(r3, r2);
+ if (fabsf(r2[0]) > fabsf(r1[0]))
+ SWAP_ROWS(r2, r1);
+ if (fabsf(r1[0]) > fabsf(r0[0]))
+ SWAP_ROWS(r1, r0);
+ if (0.0F == r0[0])
+ return false;
+
+ /* eliminate first variable */
+ m1 = r1[0] / r0[0];
+ m2 = r2[0] / r0[0];
+ m3 = r3[0] / r0[0];
+ s = r0[1];
+ r1[1] -= m1 * s;
+ r2[1] -= m2 * s;
+ r3[1] -= m3 * s;
+ s = r0[2];
+ r1[2] -= m1 * s;
+ r2[2] -= m2 * s;
+ r3[2] -= m3 * s;
+ s = r0[3];
+ r1[3] -= m1 * s;
+ r2[3] -= m2 * s;
+ r3[3] -= m3 * s;
+ s = r0[4];
+ if (s != 0.0F) {
+ r1[4] -= m1 * s;
+ r2[4] -= m2 * s;
+ r3[4] -= m3 * s;
+ }
+ s = r0[5];
+ if (s != 0.0F) {
+ r1[5] -= m1 * s;
+ r2[5] -= m2 * s;
+ r3[5] -= m3 * s;
+ }
+ s = r0[6];
+ if (s != 0.0F) {
+ r1[6] -= m1 * s;
+ r2[6] -= m2 * s;
+ r3[6] -= m3 * s;
+ }
+ s = r0[7];
+ if (s != 0.0F) {
+ r1[7] -= m1 * s;
+ r2[7] -= m2 * s;
+ r3[7] -= m3 * s;
+ }
+
+ /* choose pivot - or die */
+ if (fabsf(r3[1]) > fabsf(r2[1]))
+ SWAP_ROWS(r3, r2);
+ if (fabsf(r2[1]) > fabsf(r1[1]))
+ SWAP_ROWS(r2, r1);
+ if (0.0F == r1[1])
+ return false;
+
+ /* eliminate second variable */
+ m2 = r2[1] / r1[1];
+ m3 = r3[1] / r1[1];
+ r2[2] -= m2 * r1[2];
+ r3[2] -= m3 * r1[2];
+ r2[3] -= m2 * r1[3];
+ r3[3] -= m3 * r1[3];
+ s = r1[4];
+ if (0.0F != s) {
+ r2[4] -= m2 * s;
+ r3[4] -= m3 * s;
+ }
+ s = r1[5];
+ if (0.0F != s) {
+ r2[5] -= m2 * s;
+ r3[5] -= m3 * s;
+ }
+ s = r1[6];
+ if (0.0F != s) {
+ r2[6] -= m2 * s;
+ r3[6] -= m3 * s;
+ }
+ s = r1[7];
+ if (0.0F != s) {
+ r2[7] -= m2 * s;
+ r3[7] -= m3 * s;
+ }
+
+ /* choose pivot - or die */
+ if (fabsf(r3[2]) > fabsf(r2[2]))
+ SWAP_ROWS(r3, r2);
+ if (0.0F == r2[2])
+ return false;
+
+ /* eliminate third variable */
+ m3 = r3[2] / r2[2];
+ r3[3] -= m3 * r2[3], r3[4] -= m3 * r2[4], r3[5] -= m3 * r2[5], r3[6] -= m3 * r2[6],
+ r3[7] -= m3 * r2[7];
+
+ /* last check */
+ if (0.0F == r3[3])
+ return false;
+
+ s = 1.0F / r3[3]; /* now back substitute row 3 */
+ r3[4] *= s;
+ r3[5] *= s;
+ r3[6] *= s;
+ r3[7] *= s;
+
+ m2 = r2[3]; /* now back substitute row 2 */
+ s = 1.0F / r2[2];
+ r2[4] = s * (r2[4] - r3[4] * m2), r2[5] = s * (r2[5] - r3[5] * m2),
+ r2[6] = s * (r2[6] - r3[6] * m2), r2[7] = s * (r2[7] - r3[7] * m2);
+ m1 = r1[3];
+ r1[4] -= r3[4] * m1, r1[5] -= r3[5] * m1, r1[6] -= r3[6] * m1, r1[7] -= r3[7] * m1;
+ m0 = r0[3];
+ r0[4] -= r3[4] * m0, r0[5] -= r3[5] * m0, r0[6] -= r3[6] * m0, r0[7] -= r3[7] * m0;
+
+ m1 = r1[2]; /* now back substitute row 1 */
+ s = 1.0F / r1[1];
+ r1[4] = s * (r1[4] - r2[4] * m1), r1[5] = s * (r1[5] - r2[5] * m1),
+ r1[6] = s * (r1[6] - r2[6] * m1), r1[7] = s * (r1[7] - r2[7] * m1);
+ m0 = r0[2];
+ r0[4] -= r2[4] * m0, r0[5] -= r2[5] * m0, r0[6] -= r2[6] * m0, r0[7] -= r2[7] * m0;
+
+ m0 = r0[1]; /* now back substitute row 0 */
+ s = 1.0F / r0[0];
+ r0[4] = s * (r0[4] - r1[4] * m0), r0[5] = s * (r0[5] - r1[5] * m0),
+ r0[6] = s * (r0[6] - r1[6] * m0), r0[7] = s * (r0[7] - r1[7] * m0);
+
+ MAT(out, 0, 0) = r0[4];
+ MAT(out, 0, 1) = r0[5], MAT(out, 0, 2) = r0[6];
+ MAT(out, 0, 3) = r0[7], MAT(out, 1, 0) = r1[4];
+ MAT(out, 1, 1) = r1[5], MAT(out, 1, 2) = r1[6];
+ MAT(out, 1, 3) = r1[7], MAT(out, 2, 0) = r2[4];
+ MAT(out, 2, 1) = r2[5], MAT(out, 2, 2) = r2[6];
+ MAT(out, 2, 3) = r2[7], MAT(out, 3, 0) = r3[4];
+ MAT(out, 3, 1) = r3[5], MAT(out, 3, 2) = r3[6];
+ MAT(out, 3, 3) = r3[7];
+
+#undef MAT
+#undef SWAP_ROWS
+
+ return true;
+}
diff --git a/src/gallium/auxiliary/util/u_math.h b/src/mesa/util/u_math.h
index dd8e5497..7c989a39 100644
--- a/src/gallium/auxiliary/util/u_math.h
+++ b/src/mesa/util/u_math.h
@@ -39,177 +39,24 @@
#define U_MATH_H
-#include "pipe/p_compiler.h"
+#include "c99_math.h"
+#include <assert.h>
+#include <float.h>
+#include <stdarg.h>
+#include "bitscan.h"
+#include "u_endian.h" /* for UTIL_ARCH_BIG_ENDIAN */
#ifdef __cplusplus
extern "C" {
#endif
-#include <math.h>
-#include <float.h>
-#include <stdarg.h>
-
-#ifdef PIPE_OS_UNIX
-#include <strings.h> /* for ffs */
-#endif
-
-
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880
#endif
-#if defined(_MSC_VER)
-
-#if _MSC_VER < 1400 && !defined(__cplusplus)
-
-static inline float cosf( float f )
-{
- return (float) cos( (double) f );
-}
-
-static inline float sinf( float f )
-{
- return (float) sin( (double) f );
-}
-
-static inline float ceilf( float f )
-{
- return (float) ceil( (double) f );
-}
-
-static inline float floorf( float f )
-{
- return (float) floor( (double) f );
-}
-
-static inline float powf( float f, float g )
-{
- return (float) pow( (double) f, (double) g );
-}
-
-static inline float sqrtf( float f )
-{
- return (float) sqrt( (double) f );
-}
-
-static inline float fabsf( float f )
-{
- return (float) fabs( (double) f );
-}
-
-static inline float logf( float f )
-{
- return (float) log( (double) f );
-}
-
-#else
-/* Work-around an extra semi-colon in VS 2005 logf definition */
-#ifdef logf
-#undef logf
-#define logf(x) ((float)log((double)(x)))
-#endif /* logf */
-
-#if _MSC_VER < 1800
-#define isfinite(x) _finite((double)(x))
-#define isnan(x) _isnan((double)(x))
-#endif /* _MSC_VER < 1800 */
-#endif /* _MSC_VER < 1400 && !defined(__cplusplus) */
-
-#if _MSC_VER < 1800
-static inline double log2( double x )
-{
- const double invln2 = 1.442695041;
- return log( x ) * invln2;
-}
-
-static inline double
-round(double x)
-{
- return x >= 0.0 ? floor(x + 0.5) : ceil(x - 0.5);
-}
-
-static inline float
-roundf(float x)
-{
- return x >= 0.0f ? floorf(x + 0.5f) : ceilf(x - 0.5f);
-}
-#endif
-
-#ifndef INFINITY
-#define INFINITY (DBL_MAX + DBL_MAX)
-#endif
-
-#ifndef NAN
-#define NAN (INFINITY - INFINITY)
-#endif
-
-#endif /* _MSC_VER */
-
-
-#if __STDC_VERSION__ < 199901L && (!defined(__cplusplus) || defined(_MSC_VER))
-static inline long int
-lrint(double d)
-{
- long int rounded = (long int)(d + 0.5);
-
- if (d - floor(d) == 0.5) {
- if (rounded % 2 != 0)
- rounded += (d > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline long int
-lrintf(float f)
-{
- long int rounded = (long int)(f + 0.5f);
-
- if (f - floorf(f) == 0.5f) {
- if (rounded % 2 != 0)
- rounded += (f > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline long long int
-llrint(double d)
-{
- long long int rounded = (long long int)(d + 0.5);
-
- if (d - floor(d) == 0.5) {
- if (rounded % 2 != 0)
- rounded += (d > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline long long int
-llrintf(float f)
-{
- long long int rounded = (long long int)(f + 0.5f);
-
- if (f - floorf(f) == 0.5f) {
- if (rounded % 2 != 0)
- rounded += (f > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-#endif /* C99 */
-
-#define POW2_TABLE_SIZE_LOG2 9
-#define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2)
-#define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2)
-#define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2))
-extern float pow2_table[POW2_TABLE_SIZE];
-
-
/**
* Initialize math module. This should be called before using any
* other functions in this module.
@@ -236,7 +83,8 @@ union di {
* Extract the IEEE float32 exponent.
*/
static inline signed
-util_get_float32_exponent(float x) {
+util_get_float32_exponent(float x)
+{
union fi f;
f.f = x;
@@ -245,57 +93,7 @@ util_get_float32_exponent(float x) {
}
-/**
- * Fast version of 2^x
- * Identity: exp2(a + b) = exp2(a) * exp2(b)
- * Let ipart = int(x)
- * Let fpart = x - ipart;
- * So, exp2(x) = exp2(ipart) * exp2(fpart)
- * Compute exp2(ipart) with i << ipart
- * Compute exp2(fpart) with lookup table.
- */
-static inline float
-util_fast_exp2(float x)
-{
- int32_t ipart;
- float fpart, mpart;
- union fi epart;
-
- if(x > 129.00000f)
- return 3.402823466e+38f;
-
- if (x < -126.99999f)
- return 0.0f;
-
- ipart = (int32_t) x;
- fpart = x - (float) ipart;
-
- /* same as
- * epart.f = (float) (1 << ipart)
- * but faster and without integer overflow for ipart > 31
- */
- epart.i = (ipart + 127 ) << 23;
-
- mpart = pow2_table[POW2_TABLE_OFFSET + (int)(fpart * POW2_TABLE_SCALE)];
-
- return epart.f * mpart;
-}
-
-
-/**
- * Fast approximation to exp(x).
- */
-static inline float
-util_fast_exp(float x)
-{
- const float k = 1.44269f; /* = log2(e) */
- return util_fast_exp2(k * x);
-}
-
-
-#if 0
-
-#define LOG2_TABLE_SIZE_LOG2 16
+#define LOG2_TABLE_SIZE_LOG2 8
#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
extern float log2_table[LOG2_TABLE_SIZE];
@@ -318,29 +116,28 @@ util_fast_log2(float x)
/**
- * Fast approximation to x^y.
- */
-static inline float
-util_fast_pow(float x, float y)
-{
- return util_fast_exp2(util_fast_log2(x) * y);
-}
-#endif
-/* Note that this counts zero as a power of two.
- */
-static inline boolean
-util_is_power_of_two( unsigned v )
-{
- return (v & (v-1)) == 0;
-}
-
-
-/**
* Floor(x), returned as int.
*/
static inline int
util_ifloor(float f)
{
+#if defined(USE_X86_ASM) && defined(__GNUC__) && defined(__i386__)
+ /*
+ * IEEE floor for computers that round to nearest or even.
+ * 'f' must be between -4194304 and 4194303.
+ * This floor operation is done by "(iround(f + .5) + iround(f - .5)) >> 1",
+ * but uses some IEEE specific tricks for better speed.
+ * Contributed by Josh Vanderhoof
+ */
+ int ai, bi;
+ double af, bf;
+ af = (3 << 22) + 0.5 + (double)f;
+ bf = (3 << 22) + 0.5 - (double)f;
+ /* GCC generates an extra fstp/fld without this. */
+ __asm__ ("fstps %0" : "=m" (ai) : "t" (af) : "st");
+ __asm__ ("fstps %0" : "=m" (bi) : "t" (bf) : "st");
+ return (ai - bi) >> 1;
+#else
int ai, bi;
double af, bf;
union fi u;
@@ -349,6 +146,7 @@ util_ifloor(float f)
u.f = (float) af; ai = u.i;
u.f = (float) bf; bi = u.i;
return (ai - bi) >> 1;
+#endif
}
@@ -381,10 +179,10 @@ util_iround(float f)
/**
* Approximate floating point comparison
*/
-static inline boolean
+static inline bool
util_is_approx(float a, float b, float tol)
{
- return fabs(b - a) <= tol;
+ return fabsf(b - a) <= tol;
}
@@ -400,7 +198,7 @@ util_is_approx(float a, float b, float tol)
/**
* Single-float
*/
-static inline boolean
+static inline bool
util_is_inf_or_nan(float x)
{
union fi tmp;
@@ -409,7 +207,7 @@ util_is_inf_or_nan(float x)
}
-static inline boolean
+static inline bool
util_is_nan(float x)
{
union fi tmp;
@@ -434,7 +232,7 @@ util_inf_sign(float x)
/**
* Double-float
*/
-static inline boolean
+static inline bool
util_is_double_inf_or_nan(double x)
{
union di tmp;
@@ -443,7 +241,7 @@ util_is_double_inf_or_nan(double x)
}
-static inline boolean
+static inline bool
util_is_double_nan(double x)
{
union di tmp;
@@ -468,14 +266,14 @@ util_double_inf_sign(double x)
/**
* Half-float
*/
-static inline boolean
+static inline bool
util_is_half_inf_or_nan(int16_t x)
{
return (x & 0x7c00) == 0x7c00;
}
-static inline boolean
+static inline bool
util_is_half_nan(int16_t x)
{
return (x & 0x7fff) > 0x7c00;
@@ -494,163 +292,84 @@ util_half_inf_sign(int16_t x)
/**
- * Find first bit set in word. Least significant bit is 1.
- * Return 0 if no bits set.
+ * Return float bits.
*/
-#ifndef FFS_DEFINED
-#define FFS_DEFINED 1
-
-#if defined(_MSC_VER) && _MSC_VER >= 1300 && (_M_IX86 || _M_AMD64 || _M_IA64)
-unsigned char _BitScanForward(unsigned long* Index, unsigned long Mask);
-#pragma intrinsic(_BitScanForward)
-static inline
-unsigned long ffs( unsigned long u )
-{
- unsigned long i;
- if (_BitScanForward(&i, u))
- return i + 1;
- else
- return 0;
-}
-#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
-static inline
-unsigned ffs( unsigned u )
+static inline unsigned
+fui( float f )
{
- unsigned i;
-
- if (u == 0) {
- return 0;
- }
-
- __asm bsf eax, [u]
- __asm inc eax
- __asm mov [i], eax
-
- return i;
+ union fi fi;
+ fi.f = f;
+ return fi.ui;
}
-#elif defined(__MINGW32__) || defined(PIPE_OS_ANDROID)
-#define ffs __builtin_ffs
-#endif
-
-#endif /* FFS_DEFINED */
-/**
- * Find last bit set in a word. The least significant bit is 1.
- * Return 0 if no bits are set.
- */
-static inline unsigned util_last_bit(unsigned u)
+static inline float
+uif(uint32_t ui)
{
-#if defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304)
- return u == 0 ? 0 : 32 - __builtin_clz(u);
-#else
- unsigned r = 0;
- while (u) {
- r++;
- u >>= 1;
- }
- return r;
-#endif
+ union fi fi;
+ fi.ui = ui;
+ return fi.f;
}
-/**
- * Find last bit in a word that does not match the sign bit. The least
- * significant bit is 1.
- * Return 0 if no bits are set.
- */
-static inline unsigned util_last_bit_signed(int i)
-{
-#if defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)
- return 31 - __builtin_clrsb(i);
-#else
- if (i >= 0)
- return util_last_bit(i);
- else
- return util_last_bit(~(unsigned)i);
-#endif
-}
-/* Destructively loop over all of the bits in a mask as in:
- *
- * while (mymask) {
- * int i = u_bit_scan(&mymask);
- * ... process element i
- * }
- *
+/**
+ * Convert uint8_t to float in [0, 1].
*/
-static inline int u_bit_scan(unsigned *mask)
+static inline float
+ubyte_to_float(uint8_t ub)
{
- int i = ffs(*mask) - 1;
- *mask &= ~(1 << i);
- return i;
+ return (float) ub * (1.0f / 255.0f);
}
-/* For looping over a bitmask when you want to loop over consecutive bits
- * manually, for example:
- *
- * while (mask) {
- * int start, count, i;
- *
- * u_bit_scan_consecutive_range(&mask, &start, &count);
- *
- * for (i = 0; i < count; i++)
- * ... process element (start+i)
- * }
- */
-static inline void
-u_bit_scan_consecutive_range(unsigned *mask, int *start, int *count)
-{
- if (*mask == 0xffffffff) {
- *start = 0;
- *count = 32;
- *mask = 0;
- return;
- }
- *start = ffs(*mask) - 1;
- *count = ffs(~(*mask >> *start)) - 1;
- *mask &= ~(((1u << *count) - 1) << *start);
-}
/**
- * Return float bits.
+ * Convert float in [0,1] to uint8_t in [0,255] with clamping.
*/
-static inline unsigned
-fui( float f )
+static inline uint8_t
+float_to_ubyte(float f)
{
- union fi fi;
- fi.f = f;
- return fi.ui;
+ /* return 0 for NaN too */
+ if (!(f > 0.0f)) {
+ return (uint8_t) 0;
+ }
+ else if (f >= 1.0f) {
+ return (uint8_t) 255;
+ }
+ else {
+ union fi tmp;
+ tmp.f = f;
+ tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
+ return (uint8_t) tmp.i;
+ }
}
-
/**
- * Convert ubyte to float in [0, 1].
- * XXX a 256-entry lookup table would be slightly faster.
+ * Convert uint16_t to float in [0, 1].
*/
static inline float
-ubyte_to_float(ubyte ub)
+ushort_to_float(uint16_t us)
{
- return (float) ub * (1.0f / 255.0f);
+ return (float) us * (1.0f / 65535.0f);
}
/**
- * Convert float in [0,1] to ubyte in [0,255] with clamping.
+ * Convert float in [0,1] to uint16_t in [0,65535] with clamping.
*/
-static inline ubyte
-float_to_ubyte(float f)
+static inline uint16_t
+float_to_ushort(float f)
{
- union fi tmp;
-
- tmp.f = f;
- if (tmp.i < 0) {
- return (ubyte) 0;
+ /* return 0 for NaN too */
+ if (!(f > 0.0f)) {
+ return (uint16_t) 0;
}
- else if (tmp.i >= 0x3f800000 /* 1.0f */) {
- return (ubyte) 255;
+ else if (f >= 1.0f) {
+ return (uint16_t) 65535;
}
else {
- tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
- return (ubyte) tmp.i;
+ union fi tmp;
+ tmp.f = f;
+ tmp.f = tmp.f * (65535.0f/65536.0f) + 128.0f;
+ return (uint16_t) tmp.i;
}
}
@@ -672,7 +391,7 @@ float_to_byte_tex(float f)
static inline unsigned
util_logbase2(unsigned n)
{
-#if defined(PIPE_CC_GCC) && (PIPE_CC_GCC_VERSION >= 304)
+#if defined(HAVE___BUILTIN_CLZ)
return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1));
#else
unsigned pos = 0;
@@ -685,6 +404,44 @@ util_logbase2(unsigned n)
#endif
}
+static inline uint64_t
+util_logbase2_64(uint64_t n)
+{
+#if defined(HAVE___BUILTIN_CLZLL)
+ return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1));
+#else
+ uint64_t pos = 0ull;
+ if (n >= 1ull<<32) { n >>= 32; pos += 32; }
+ if (n >= 1ull<<16) { n >>= 16; pos += 16; }
+ if (n >= 1ull<< 8) { n >>= 8; pos += 8; }
+ if (n >= 1ull<< 4) { n >>= 4; pos += 4; }
+ if (n >= 1ull<< 2) { n >>= 2; pos += 2; }
+ if (n >= 1ull<< 1) { pos += 1; }
+ return pos;
+#endif
+}
+
+/**
+ * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently,
+ * returns the smallest x such that n <= 2**x.
+ */
+static inline unsigned
+util_logbase2_ceil(unsigned n)
+{
+ if (n <= 1)
+ return 0;
+
+ return 1 + util_logbase2(n - 1);
+}
+
+static inline uint64_t
+util_logbase2_ceil64(uint64_t n)
+{
+ if (n <= 1)
+ return 0;
+
+ return 1ull + util_logbase2_64(n - 1);
+}
/**
* Returns the smallest power of two >= x
@@ -692,7 +449,7 @@ util_logbase2(unsigned n)
static inline unsigned
util_next_power_of_two(unsigned x)
{
-#if defined(PIPE_CC_GCC) && (PIPE_CC_GCC_VERSION >= 304)
+#if defined(HAVE___BUILTIN_CLZ)
if (x <= 1)
return 1;
@@ -703,7 +460,7 @@ util_next_power_of_two(unsigned x)
if (x <= 1)
return 1;
- if (util_is_power_of_two(x))
+ if (util_is_power_of_two_or_zero(x))
return x;
val--;
@@ -717,27 +474,32 @@ util_next_power_of_two(unsigned x)
#endif
}
-
-/**
- * Return number of bits set in n.
- */
-static inline unsigned
-util_bitcount(unsigned n)
+static inline uint64_t
+util_next_power_of_two64(uint64_t x)
{
-#if defined(PIPE_CC_GCC) && (PIPE_CC_GCC_VERSION >= 304)
- return __builtin_popcount(n);
+#if defined(HAVE___BUILTIN_CLZLL)
+ if (x <= 1)
+ return 1;
+
+ return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1)));
#else
- /* K&R classic bitcount.
- *
- * For each iteration, clear the LSB from the bitfield.
- * Requires only one iteration per set bit, instead of
- * one iteration per bit less than highest set bit.
- */
- unsigned bits = 0;
- for (bits; n; bits++) {
- n &= n - 1;
- }
- return bits;
+ uint64_t val = x;
+
+ if (x <= 1)
+ return 1;
+
+ if (util_is_power_of_two_or_zero64(x))
+ return x;
+
+ val--;
+ val = (val >> 1) | val;
+ val = (val >> 2) | val;
+ val = (val >> 4) | val;
+ val = (val >> 8) | val;
+ val = (val >> 16) | val;
+ val = (val >> 32) | val;
+ val++;
+ return val;
#endif
}
@@ -761,7 +523,7 @@ util_bitreverse(unsigned n)
* Convert from little endian to CPU byte order.
*/
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
#define util_le64_to_cpu(x) util_bswap64(x)
#define util_le32_to_cpu(x) util_bswap32(x)
#define util_le16_to_cpu(x) util_bswap16(x)
@@ -781,8 +543,7 @@ util_bitreverse(unsigned n)
static inline uint32_t
util_bswap32(uint32_t n)
{
-/* We need the gcc version checks for non-autoconf build system */
-#if defined(HAVE___BUILTIN_BSWAP32) || (defined(PIPE_CC_GCC) && (PIPE_CC_GCC_VERSION >= 403))
+#if defined(HAVE___BUILTIN_BSWAP32)
return __builtin_bswap32(n);
#else
return (n >> 24) |
@@ -801,7 +562,7 @@ util_bswap64(uint64_t n)
#if defined(HAVE___BUILTIN_BSWAP64)
return __builtin_bswap64(n);
#else
- return ((uint64_t)util_bswap32(n) << 32) |
+ return ((uint64_t)util_bswap32((uint32_t)n) << 32) |
util_bswap32((n >> 32));
#endif
}
@@ -817,12 +578,47 @@ util_bswap16(uint16_t n)
(n << 8);
}
+/**
+ * Extend sign.
+ */
+static inline int64_t
+util_sign_extend(uint64_t val, unsigned width)
+{
+ assert(width > 0);
+ if (val & (UINT64_C(1) << (width - 1))) {
+ return -(int64_t)((UINT64_C(1) << width) - val);
+ } else {
+ return val;
+ }
+}
+
+static inline void*
+util_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n)
+{
+#if UTIL_ARCH_BIG_ENDIAN
+ size_t i, e;
+ assert(n % 4 == 0);
+
+ for (i = 0, e = n / 4; i < e; i++) {
+ uint32_t * restrict d = (uint32_t* restrict)dest;
+ const uint32_t * restrict s = (const uint32_t* restrict)src;
+ d[i] = util_bswap32(s[i]);
+ }
+ return dest;
+#else
+ return memcpy(dest, src, n);
+#endif
+}
/**
* Clamp X to [MIN, MAX].
* This is a macro to allow float, int, uint, etc. types.
+ * We arbitrarily turn NaN into MIN.
*/
-#define CLAMP( X, MIN, MAX ) ( (X)<(MIN) ? (MIN) : ((X)>(MAX) ? (MAX) : (X)) )
+#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
+
+/* Syntax sugar occuring frequently in graphics code */
+#define SATURATE( X ) CLAMP(X, 0.0f, 1.0f)
#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
@@ -835,6 +631,56 @@ util_bswap16(uint16_t n)
/**
+ * Align a value up to an alignment value
+ *
+ * If \c value is not already aligned to the requested alignment value, it
+ * will be rounded up.
+ *
+ * \param value Value to be rounded
+ * \param alignment Alignment value to be used. This must be a power of two.
+ *
+ * \sa ROUND_DOWN_TO()
+ */
+
+#if defined(ALIGN)
+#undef ALIGN
+#endif
+static inline uintptr_t
+ALIGN(uintptr_t value, int32_t alignment)
+{
+ assert(util_is_power_of_two_nonzero(alignment));
+ return (((value) + (alignment) - 1) & ~((alignment) - 1));
+}
+
+/**
+ * Like ALIGN(), but works with a non-power-of-two alignment.
+ */
+static inline uintptr_t
+ALIGN_NPOT(uintptr_t value, int32_t alignment)
+{
+ assert(alignment > 0);
+ return (value + alignment - 1) / alignment * alignment;
+}
+
+/**
+ * Align a value down to an alignment value
+ *
+ * If \c value is not already aligned to the requested alignment value, it
+ * will be rounded down.
+ *
+ * \param value Value to be rounded
+ * \param alignment Alignment value to be used. This must be a power of two.
+ *
+ * \sa ALIGN()
+ */
+static inline uint64_t
+ROUND_DOWN_TO(uint64_t value, int32_t alignment)
+{
+ assert(util_is_power_of_two_nonzero(alignment));
+ return ((value) & ~(alignment - 1));
+}
+
+/**
* Align a value, only works pot alignemnts.
*/
static inline int
@@ -843,6 +689,12 @@ align(int value, int alignment)
return (value + alignment - 1) & ~(alignment - 1);
}
+static inline uint64_t
+align64(uint64_t value, unsigned alignment)
+{
+ return (value + alignment - 1) & ~((uint64_t)alignment - 1);
+}
+
/**
* Works like align but on npot alignments.
*/
@@ -887,12 +739,14 @@ do { \
#endif
-static inline uint32_t util_unsigned_fixed(float value, unsigned frac_bits)
+static inline uint32_t
+util_unsigned_fixed(float value, unsigned frac_bits)
{
return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits));
}
-static inline int32_t util_signed_fixed(float value, unsigned frac_bits)
+static inline int32_t
+util_signed_fixed(float value, unsigned frac_bits)
{
return (int32_t)(value * (1<<frac_bits));
}
@@ -904,7 +758,41 @@ util_fpstate_set_denorms_to_zero(unsigned current_fpstate);
void
util_fpstate_set(unsigned fpstate);
+/**
+ * For indexed draw calls, return true if the vertex count to be drawn is
+ * much lower than the vertex count that has to be uploaded, meaning
+ * that the driver should flatten indices instead of trying to upload
+ * a too big range.
+ *
+ * This is used by vertex upload code in u_vbuf and glthread.
+ */
+static inline bool
+util_is_vbo_upload_ratio_too_large(unsigned draw_vertex_count,
+ unsigned upload_vertex_count)
+{
+ if (draw_vertex_count > 1024)
+ return upload_vertex_count > draw_vertex_count * 4;
+ else if (draw_vertex_count > 32)
+ return upload_vertex_count > draw_vertex_count * 8;
+ else
+ return upload_vertex_count > draw_vertex_count * 16;
+}
+bool util_invert_mat4x4(float *out, const float *m);
+
+/* Quantize the lod bias value to reduce the number of sampler state
+ * variants in gallium because apps use it for smooth mipmap transitions,
+ * thrashing cso_cache and degrading performance.
+ *
+ * This quantization matches the AMD hw specification, so having more
+ * precision would have no effect anyway.
+ */
+static inline float
+util_quantize_lod_bias(float lod)
+{
+ lod = CLAMP(lod, -16, 16);
+ return roundf(lod * 256) / 256;
+}
#ifdef __cplusplus
}
diff --git a/src/gallium/auxiliary/util/u_memory.h b/src/mesa/util/u_memory.h
index d53575e3..4cdccb66 100644
--- a/src/gallium/auxiliary/util/u_memory.h
+++ b/src/mesa/util/u_memory.h
@@ -34,10 +34,8 @@
#ifndef U_MEMORY_H
#define U_MEMORY_H
-
-#include "util/u_pointer.h"
#include "util/u_debug.h"
-#include "os/os_memory.h"
+#include "util/os_memory.h"
#ifdef __cplusplus
@@ -62,13 +60,22 @@ extern "C" {
#define align_malloc(_size, _alignment) os_malloc_aligned(_size, _alignment)
#define align_free(_ptr) os_free_aligned(_ptr)
+#define align_realloc(_ptr, _oldsize, _newsize, _alignment) os_realloc_aligned(_ptr, _oldsize, _newsize, _alignment)
+static inline void *
+align_calloc(size_t size, unsigned long alignment)
+{
+ void *ptr = align_malloc(size, alignment);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
/**
* Duplicate a block of memory.
*/
static inline void *
-mem_dup(const void *src, uint size)
+mem_dup(const void *src, size_t size)
{
void *dup = MALLOC(size);
if (dup)
@@ -78,18 +85,6 @@ mem_dup(const void *src, uint size)
/**
- * Number of elements in an array.
- */
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-#ifndef Elements
-#define Elements(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-
-/**
* Offset of a field in a struct, in bytes.
*/
#define Offset(TYPE, MEMBER) ((uintptr_t)&(((TYPE *)NULL)->MEMBER))
diff --git a/src/mesa/util/u_string.h b/src/mesa/util/u_string.h
new file mode 100644
index 00000000..4700d9ca
--- /dev/null
+++ b/src/mesa/util/u_string.h
@@ -0,0 +1,131 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Platform independent functions for string manipulation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_STRING_H_
+#define U_STRING_H_
+
+#if !defined(XF86_LIBC_H)
+#include <stdio.h>
+#endif
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <string.h>
+#include <limits.h>
+
+#include "util/macros.h" // PRINTFLIKE
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(_GNU_SOURCE) || defined(__APPLE__)
+
+#define strchrnul util_strchrnul
+static inline char *
+util_strchrnul(const char *s, char c)
+{
+ for (; *s && *s != c; ++s);
+
+ return (char *)s;
+}
+
+#endif
+
+#ifdef _WIN32
+
+#define sprintf util_sprintf
+static inline void
+ PRINTFLIKE(2, 3)
+util_sprintf(char *str, const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(str, INT_MAX, format, ap);
+ va_end(ap);
+}
+
+#define vasprintf util_vasprintf
+static inline int
+util_vasprintf(char **ret, const char *format, va_list ap)
+{
+ va_list ap_copy;
+
+ /* Compute length of output string first */
+ va_copy(ap_copy, ap);
+ int r = vsnprintf(NULL, 0, format, ap_copy);
+ va_end(ap_copy);
+
+ if (r < 0)
+ return -1;
+
+ *ret = (char *) malloc(r + 1);
+ if (!*ret)
+ return -1;
+
+ /* Print to buffer */
+ return vsnprintf(*ret, r + 1, format, ap);
+}
+
+#define asprintf util_asprintf
+static inline int
+util_asprintf(char **str, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ va_start(args, fmt);
+ ret = vasprintf(str, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+#ifndef strcasecmp
+#define strcasecmp stricmp
+#endif
+
+#define strdup _strdup
+
+#if defined(_WIN32) && !defined(HAVE_STRTOK_R)
+#define strtok_r strtok_s
+#endif
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_STRING_H_ */
diff --git a/src/mesa/util/u_thread.h b/src/mesa/util/u_thread.h
new file mode 100644
index 00000000..c749ac3d
--- /dev/null
+++ b/src/mesa/util/u_thread.h
@@ -0,0 +1,396 @@
+/**************************************************************************
+ *
+ * Copyright 1999-2006 Brian Paul
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_THREAD_H_
+#define U_THREAD_H_
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "c11/threads.h"
+#include "detect_os.h"
+#include "macros.h"
+
+#ifdef HAVE_PTHREAD
+#include <signal.h>
+#ifdef HAVE_PTHREAD_NP_H
+#include <pthread_np.h>
+#endif
+#endif
+
+#ifdef __HAIKU__
+#include <OS.h>
+#endif
+
+#if DETECT_OS_LINUX
+#include <sched.h>
+#elif defined(_WIN32) && !defined(__CYGWIN__) && _WIN32_WINNT >= 0x0600
+#include <windows.h>
+#endif
+
+#ifdef __FreeBSD__
+/* pthread_np.h -> sys/param.h -> machine/param.h
+ * - defines ALIGN which clashes with our ALIGN
+ */
+#undef ALIGN
+#define cpu_set_t cpuset_t
+#endif
+
+/* For util_set_thread_affinity to size the mask. */
+#define UTIL_MAX_CPUS 1024 /* this should be enough */
+#define UTIL_MAX_L3_CACHES UTIL_MAX_CPUS
+
+/* Some highly performance-sensitive thread-local variables like the current GL
+ * context are declared with the initial-exec model on Linux. glibc allocates a
+ * fixed number of extra slots for initial-exec TLS variables at startup, and
+ * Mesa relies on (even if it's dlopen()ed after init) being able to fit into
+ * those. This model saves the call to look up the address of the TLS variable.
+ *
+ * However, if we don't have this TLS model available on the platform, then we
+ * still want to use normal TLS (which involves a function call, but not the
+ * expensive pthread_getspecific() or its equivalent).
+ */
+#ifdef _MSC_VER
+#define __THREAD_INITIAL_EXEC __declspec(thread)
+#elif defined(__ANDROID__)
+/* Android 29 gained ELF TLS support, but it doesn't support initial-exec and
+ * it will throw:
+ *
+ * dlopen failed: TLS symbol "(null)" in dlopened
+ * "/vendor/lib64/egl/libEGL_mesa.so" referenced from
+ * "/vendor/lib64/egl/libEGL_mesa.so" using IE access model.
+ */
+#define __THREAD_INITIAL_EXEC __thread
+#else
+#define __THREAD_INITIAL_EXEC __thread __attribute__((tls_model("initial-exec")))
+#endif
+
+static inline int
+util_get_current_cpu(void)
+{
+#if DETECT_OS_LINUX && !defined(__ANDROID__)
+ return sched_getcpu();
+
+#elif defined(_WIN32) && !defined(__CYGWIN__) && _WIN32_WINNT >= 0x0600
+ return GetCurrentProcessorNumber();
+
+#else
+ return -1;
+#endif
+}
+
+static inline thrd_t u_thread_create(int (*routine)(void *), void *param)
+{
+ thrd_t thread;
+#ifdef HAVE_PTHREAD
+ sigset_t saved_set, new_set;
+ int ret;
+
+ sigfillset(&new_set);
+ sigdelset(&new_set, SIGSYS);
+ pthread_sigmask(SIG_BLOCK, &new_set, &saved_set);
+ ret = thrd_create( &thread, routine, param );
+ pthread_sigmask(SIG_SETMASK, &saved_set, NULL);
+#else
+ int ret;
+ ret = thrd_create( &thread, routine, param );
+#endif
+ if (ret)
+ return 0;
+
+ return thread;
+}
+
+static inline void u_thread_setname( const char *name )
+{
+#if defined(HAVE_PTHREAD)
+#if DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS
+ int ret = pthread_setname_np(pthread_self(), name);
+ if (ret == ERANGE) {
+ char buf[16];
+ const size_t len = MIN2(strlen(name), ARRAY_SIZE(buf) - 1);
+ memcpy(buf, name, len);
+ buf[len] = '\0';
+ pthread_setname_np(pthread_self(), buf);
+ }
+#elif DETECT_OS_FREEBSD || DETECT_OS_OPENBSD
+ pthread_set_name_np(pthread_self(), name);
+#elif DETECT_OS_NETBSD
+ pthread_setname_np(pthread_self(), "%s", (void *)name);
+#elif DETECT_OS_APPLE
+ pthread_setname_np(name);
+#elif DETECT_OS_HAIKU
+ rename_thread(find_thread(NULL), name);
+#else
+#warning Not sure how to call pthread_setname_np
+#endif
+#endif
+ (void)name;
+}
+
+/**
+ * Set thread affinity.
+ *
+ * \param thread Thread
+ * \param mask Set this affinity mask
+ * \param old_mask Previous affinity mask returned if not NULL
+ * \param num_mask_bits Number of bits in both masks
+ * \return true on success
+ */
+static inline bool
+util_set_thread_affinity(thrd_t thread,
+ const uint32_t *mask,
+ uint32_t *old_mask,
+ unsigned num_mask_bits)
+{
+#if defined(HAVE_PTHREAD_SETAFFINITY)
+ cpu_set_t cpuset;
+
+ if (old_mask) {
+ if (pthread_getaffinity_np(thread, sizeof(cpuset), &cpuset) != 0)
+ return false;
+
+ memset(old_mask, 0, num_mask_bits / 8);
+ for (unsigned i = 0; i < num_mask_bits && i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &cpuset))
+ old_mask[i / 32] |= 1u << (i % 32);
+ }
+ }
+
+ CPU_ZERO(&cpuset);
+ for (unsigned i = 0; i < num_mask_bits && i < CPU_SETSIZE; i++) {
+ if (mask[i / 32] & (1u << (i % 32)))
+ CPU_SET(i, &cpuset);
+ }
+ return pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset) == 0;
+
+#elif defined(_WIN32) && !defined(__CYGWIN__)
+ DWORD_PTR m = mask[0];
+
+ if (sizeof(m) > 4 && num_mask_bits > 32)
+ m |= (uint64_t)mask[1] << 32;
+
+ m = SetThreadAffinityMask(thread, m);
+ if (!m)
+ return false;
+
+ if (old_mask) {
+ memset(old_mask, 0, num_mask_bits / 8);
+
+ old_mask[0] = m;
+#ifdef _WIN64
+ old_mask[1] = m >> 32;
+#endif
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+static inline bool
+util_set_current_thread_affinity(const uint32_t *mask,
+ uint32_t *old_mask,
+ unsigned num_mask_bits)
+{
+#if defined(HAVE_PTHREAD_SETAFFINITY)
+ return util_set_thread_affinity(pthread_self(), mask, old_mask,
+ num_mask_bits);
+
+#elif defined(_WIN32) && !defined(__CYGWIN__)
+ /* The GetCurrentThreadId() handle is only valid within the current thread. */
+ return util_set_thread_affinity(GetCurrentThread(), mask, old_mask,
+ num_mask_bits);
+
+#else
+ return false;
+#endif
+}
+
+
+/*
+ * Thread statistics.
+ */
+
+/* Return the time of a thread's CPU time clock. */
+static inline int64_t
+util_thread_get_time_nano(thrd_t thread)
+{
+#if defined(HAVE_PTHREAD) && !defined(__APPLE__) && !defined(__HAIKU__)
+ struct timespec ts;
+ clockid_t cid;
+
+ pthread_getcpuclockid(thread, &cid);
+ clock_gettime(cid, &ts);
+ return (int64_t)ts.tv_sec * 1000000000 + ts.tv_nsec;
+#else
+ return 0;
+#endif
+}
+
+/* Return the time of the current thread's CPU time clock. */
+static inline int64_t
+util_current_thread_get_time_nano(void)
+{
+#if defined(HAVE_PTHREAD)
+ return util_thread_get_time_nano(pthread_self());
+
+#elif defined(_WIN32) && !defined(__CYGWIN__)
+ /* The GetCurrentThreadId() handle is only valid within the current thread. */
+ return util_thread_get_time_nano(GetCurrentThread());
+
+#else
+ return 0;
+#endif
+}
+
+static inline bool u_thread_is_self(thrd_t thread)
+{
+#if defined(HAVE_PTHREAD)
+ return pthread_equal(pthread_self(), thread);
+#endif
+ return false;
+}
+
+/*
+ * util_barrier
+ */
+
+#if defined(HAVE_PTHREAD) && !defined(__APPLE__) && !defined(__HAIKU__)
+
+typedef pthread_barrier_t util_barrier;
+
+static inline void util_barrier_init(util_barrier *barrier, unsigned count)
+{
+ pthread_barrier_init(barrier, NULL, count);
+}
+
+static inline void util_barrier_destroy(util_barrier *barrier)
+{
+ pthread_barrier_destroy(barrier);
+}
+
+static inline void util_barrier_wait(util_barrier *barrier)
+{
+ pthread_barrier_wait(barrier);
+}
+
+
+#else /* If the OS doesn't have its own, implement barriers using a mutex and a condvar */
+
+typedef struct {
+ unsigned count;
+ unsigned waiters;
+ uint64_t sequence;
+ mtx_t mutex;
+ cnd_t condvar;
+} util_barrier;
+
+static inline void util_barrier_init(util_barrier *barrier, unsigned count)
+{
+ barrier->count = count;
+ barrier->waiters = 0;
+ barrier->sequence = 0;
+ (void) mtx_init(&barrier->mutex, mtx_plain);
+ cnd_init(&barrier->condvar);
+}
+
+static inline void util_barrier_destroy(util_barrier *barrier)
+{
+ assert(barrier->waiters == 0);
+ mtx_destroy(&barrier->mutex);
+ cnd_destroy(&barrier->condvar);
+}
+
+static inline void util_barrier_wait(util_barrier *barrier)
+{
+ mtx_lock(&barrier->mutex);
+
+ assert(barrier->waiters < barrier->count);
+ barrier->waiters++;
+
+ if (barrier->waiters < barrier->count) {
+ uint64_t sequence = barrier->sequence;
+
+ do {
+ cnd_wait(&barrier->condvar, &barrier->mutex);
+ } while (sequence == barrier->sequence);
+ } else {
+ barrier->waiters = 0;
+ barrier->sequence++;
+ cnd_broadcast(&barrier->condvar);
+ }
+
+ mtx_unlock(&barrier->mutex);
+}
+
+#endif
+
+/*
+ * Thread-id's.
+ *
+ * thrd_current() is not portable to windows (or at least not in a desirable
+ * way), so thread_id's provide an alternative mechanism
+ */
+
+#ifdef _WIN32
+typedef DWORD thread_id;
+#else
+typedef thrd_t thread_id;
+#endif
+
+static inline thread_id
+util_get_thread_id(void)
+{
+ /*
+ * XXX: Callers of of this function assume it is a lightweight function.
+ * But unfortunately C11's thrd_current() gives no such guarantees. In
+ * fact, it's pretty hard to have a compliant implementation of
+ * thrd_current() on Windows with such characteristics. So for now, we
+ * side-step this mess and use Windows thread primitives directly here.
+ */
+#ifdef _WIN32
+ return GetCurrentThreadId();
+#else
+ return thrd_current();
+#endif
+}
+
+
+static inline int
+util_thread_id_equal(thread_id t1, thread_id t2)
+{
+#ifdef _WIN32
+ return t1 == t2;
+#else
+ return thrd_equal(t1, t2);
+#endif
+}
+
+#endif /* U_THREAD_H_ */
diff --git a/src/gallium/auxiliary/util/xxhash.h b/src/mesa/util/xxhash.h
index f7a4b405..eb9e8659 100644
--- a/src/gallium/auxiliary/util/xxhash.h
+++ b/src/mesa/util/xxhash.h
@@ -78,6 +78,8 @@ XXH32 6.8 GB/s 6.0 GB/s
#define XXH_FORCE_ALIGN_CHECK 0
#define XXH_FORCE_MEMORY_ACCESS 0
+#include "util/compiler.h" /* for FALLTHROUGH */
+
#if defined (__cplusplus)
extern "C" {
#endif
@@ -729,41 +731,41 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
} else {
switch(len&15) /* or switch(bEnd - p) */ {
case 12: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 8: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 4: PROCESS4;
return XXH32_avalanche(h32);
case 13: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 9: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 5: PROCESS4;
PROCESS1;
return XXH32_avalanche(h32);
case 14: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 10: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 6: PROCESS4;
PROCESS1;
PROCESS1;
return XXH32_avalanche(h32);
case 15: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 11: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 7: PROCESS4;
- /* fallthrough */
+ FALLTHROUGH;
case 3: PROCESS1;
- /* fallthrough */
+ FALLTHROUGH;
case 2: PROCESS1;
- /* fallthrough */
+ FALLTHROUGH;
case 1: PROCESS1;
- /* fallthrough */
+ FALLTHROUGH;
case 0: return XXH32_avalanche(h32);
}
XXH_ASSERT(0);
@@ -1144,63 +1146,63 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
} else {
switch(len & 31) {
case 24: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 16: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 8: PROCESS8_64;
return XXH64_avalanche(h64);
case 28: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 20: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 12: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 4: PROCESS4_64;
return XXH64_avalanche(h64);
case 25: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 17: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 9: PROCESS8_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 29: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 21: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 13: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 5: PROCESS4_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 26: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 18: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 10: PROCESS8_64;
PROCESS1_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 30: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 22: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 14: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 6: PROCESS4_64;
PROCESS1_64;
PROCESS1_64;
return XXH64_avalanche(h64);
case 27: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 19: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 11: PROCESS8_64;
PROCESS1_64;
PROCESS1_64;
@@ -1208,19 +1210,19 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
return XXH64_avalanche(h64);
case 31: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 23: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 15: PROCESS8_64;
- /* fallthrough */
+ FALLTHROUGH;
case 7: PROCESS4_64;
- /* fallthrough */
+ FALLTHROUGH;
case 3: PROCESS1_64;
- /* fallthrough */
+ FALLTHROUGH;
case 2: PROCESS1_64;
- /* fallthrough */
+ FALLTHROUGH;
case 1: PROCESS1_64;
- /* fallthrough */
+ FALLTHROUGH;
case 0: return XXH64_avalanche(h64);
}
}
diff --git a/src/meson.build b/src/meson.build
index ac8df76d..d78ac8c9 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -21,6 +21,7 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
+subdir('mesa')
subdir('gallium')
virgl_sources = [
@@ -77,6 +78,8 @@ vrend_winsys_glx_sources = [
venus_sources = [
'venus_hw.h',
'venus/venus-protocol/vn_protocol_renderer.h',
+ 'venus/vkr_allocator.c',
+ 'venus/vkr_allocator.h',
'venus/vkr_buffer.c',
'venus/vkr_buffer.h',
'venus/vkr_command_buffer.c',
@@ -132,6 +135,39 @@ venus_codegen = custom_target(
command : [prog_python, '@INPUT0@', '-o', '@OUTDIR@', '@INPUT1@'],
)
+drm_sources = [
+ 'drm/drm_fence.c',
+ 'drm/drm_fence.h',
+ 'drm/drm_renderer.c',
+ 'drm/drm_renderer.h',
+ 'drm/drm_util.c',
+ 'drm/drm_util.h',
+]
+
+drm_msm_sources = [
+ 'drm/drm-uapi/msm_drm.h',
+ 'drm/msm/msm_proto.h',
+ 'drm/msm/msm_renderer.c',
+ 'drm/msm/msm_renderer.h',
+]
+
+proxy_sources = [
+ 'proxy/proxy_client.c',
+ 'proxy/proxy_common.c',
+ 'proxy/proxy_context.c',
+ 'proxy/proxy_renderer.c',
+ 'proxy/proxy_server.c',
+ 'proxy/proxy_socket.c',
+]
+
+video_sources = [
+ 'virgl_video_hw.h',
+ 'virgl_video.c',
+ 'virgl_video.h',
+ 'vrend_video.c',
+ 'vrend_video.h',
+]
+
virgl_depends = [
gallium_dep,
epoxy_dep,
@@ -166,17 +202,34 @@ if with_venus
virgl_depends += [venus_dep]
endif
+if with_drm
+ virgl_sources += drm_sources
+endif
+
+if with_drm_msm
+ virgl_sources += drm_msm_sources
+endif
+
+if with_render_server
+ virgl_sources += proxy_sources
+endif
+
+if with_video
+ virgl_sources += video_sources
+ virgl_depends += [libva_dep, libvadrm_dep]
+endif
+
libvirgl = static_library(
'virgl',
virgl_sources,
- include_directories: [inc_gallium, inc_configuration, 'venus'],
+ include_directories: [inc_gallium, inc_configuration, 'venus', 'drm', 'drm/drm-uapi'],
dependencies : virgl_depends,
)
libvirgl_inc = [
inc_gallium,
inc_configuration,
- include_directories(['.', 'venus'])
+ include_directories(['.', 'venus', 'drm'])
]
libvirgl_dep = declare_dependency(
@@ -195,6 +248,12 @@ libvirglrenderer = library(
install : true
)
+pkg = import('pkgconfig')
+pkg.generate(libvirglrenderer,
+ description: 'virgl GL renderer',
+ subdirs: 'virgl'
+)
+
libvirglrenderer_dep = declare_dependency(
link_with: libvirglrenderer,
include_directories: libvirgl_inc,
diff --git a/src/proxy/.clang-format b/src/proxy/.clang-format
new file mode 120000
index 00000000..2d1184b6
--- /dev/null
+++ b/src/proxy/.clang-format
@@ -0,0 +1 @@
+../venus/.clang-format \ No newline at end of file
diff --git a/src/proxy/proxy_client.c b/src/proxy/proxy_client.c
new file mode 100644
index 00000000..97e025fd
--- /dev/null
+++ b/src/proxy/proxy_client.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "proxy_client.h"
+
+#include <unistd.h>
+
+#include "server/render_protocol.h"
+
+#include "proxy_server.h"
+
+bool
+proxy_client_destroy_context(struct proxy_client *client, uint32_t ctx_id)
+{
+ const struct render_client_op_destroy_context_request req = {
+ .header.op = RENDER_CLIENT_OP_DESTROY_CONTEXT,
+ .ctx_id = ctx_id,
+ };
+
+ return proxy_socket_send_request(&client->socket, &req, sizeof(req));
+}
+
+bool
+proxy_client_create_context(struct proxy_client *client,
+ uint32_t ctx_id,
+ size_t ctx_name_len,
+ const char *ctx_name,
+ int *out_ctx_fd)
+{
+ struct render_client_op_create_context_request req = {
+ .header.op = RENDER_CLIENT_OP_CREATE_CONTEXT,
+ .ctx_id = ctx_id,
+ };
+
+ const size_t len = MIN2(ctx_name_len, sizeof(req.ctx_name) - 1);
+ memcpy(req.ctx_name, ctx_name, len);
+
+ if (!proxy_socket_send_request(&client->socket, &req, sizeof(req)))
+ return false;
+
+ struct render_client_op_create_context_reply reply;
+ int fd_count;
+ int ctx_fd;
+ if (!proxy_socket_receive_reply_with_fds(&client->socket, &reply, sizeof(reply),
+ &ctx_fd, 1, &fd_count))
+ return false;
+
+ if (reply.ok != fd_count) {
+ if (fd_count)
+ close(ctx_fd);
+ return false;
+ } else if (!reply.ok) {
+ return false;
+ }
+
+ if (!proxy_socket_is_seqpacket(ctx_fd)) {
+ close(ctx_fd);
+ return false;
+ }
+
+ *out_ctx_fd = ctx_fd;
+ return true;
+}
+
+bool
+proxy_client_reset(struct proxy_client *client)
+{
+ const struct render_client_op_reset_request req = {
+ .header.op = RENDER_CLIENT_OP_RESET,
+ };
+ return proxy_socket_send_request(&client->socket, &req, sizeof(req));
+}
+
+void
+proxy_client_destroy(struct proxy_client *client)
+{
+ proxy_socket_fini(&client->socket);
+ free(client);
+}
+
+static bool
+proxy_client_init(struct proxy_client *client, uint32_t flags)
+{
+ const struct render_client_op_init_request req = {
+ .header.op = RENDER_CLIENT_OP_INIT,
+ .flags = flags,
+ };
+ return proxy_socket_send_request(&client->socket, &req, sizeof(req));
+}
+
+struct proxy_client *
+proxy_client_create(struct proxy_server *srv, uint32_t flags)
+{
+ struct proxy_client *client = calloc(1, sizeof(*client));
+ if (!client)
+ return NULL;
+
+ const int client_fd = proxy_server_connect(srv);
+ if (client_fd < 0) {
+ free(client);
+ return NULL;
+ }
+
+ proxy_socket_init(&client->socket, client_fd);
+
+ if (!proxy_client_init(client, flags)) {
+ proxy_socket_fini(&client->socket);
+ free(client);
+ return NULL;
+ }
+
+ return client;
+}
diff --git a/src/proxy/proxy_client.h b/src/proxy/proxy_client.h
new file mode 100644
index 00000000..d20af311
--- /dev/null
+++ b/src/proxy/proxy_client.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef PROXY_CLIENT_H
+#define PROXY_CLIENT_H
+
+#include "proxy_common.h"
+
+struct proxy_client {
+ struct proxy_socket socket;
+};
+
+struct proxy_client *
+proxy_client_create(struct proxy_server *srv, uint32_t flags);
+
+void
+proxy_client_destroy(struct proxy_client *client);
+
+bool
+proxy_client_reset(struct proxy_client *client);
+
+bool
+proxy_client_create_context(struct proxy_client *client,
+ uint32_t ctx_id,
+ size_t ctx_name_len,
+ const char *ctx_name,
+ int *out_ctx_fd);
+
+bool
+proxy_client_destroy_context(struct proxy_client *client, uint32_t ctx_id);
+
+#endif /* PROXY_CLIENT_H */
diff --git a/src/proxy/proxy_common.c b/src/proxy/proxy_common.c
new file mode 100644
index 00000000..ff8f8fc8
--- /dev/null
+++ b/src/proxy/proxy_common.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "proxy_common.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+struct proxy_renderer proxy_renderer;
+
+void
+proxy_log(const char *fmt, ...)
+{
+ const char prefix[] = "proxy: ";
+ char line[1024];
+ size_t len;
+ va_list va;
+ int ret;
+
+ len = ARRAY_SIZE(prefix) - 1;
+ memcpy(line, prefix, len);
+
+ va_start(va, fmt);
+ ret = vsnprintf(line + len, ARRAY_SIZE(line) - len, fmt, va);
+ va_end(va);
+
+ if (ret < 0) {
+ const char log_error[] = "log error";
+ memcpy(line + len, log_error, ARRAY_SIZE(log_error) - 1);
+ len += ARRAY_SIZE(log_error) - 1;
+ } else if ((size_t)ret < ARRAY_SIZE(line) - len) {
+ len += ret;
+ } else {
+ len = ARRAY_SIZE(line) - 1;
+ }
+
+ /* make room for newline */
+ if (len + 1 >= ARRAY_SIZE(line))
+ len--;
+
+ line[len++] = '\n';
+ line[len] = '\0';
+
+ virgl_log("%s", line);
+}
diff --git a/src/proxy/proxy_common.h b/src/proxy/proxy_common.h
new file mode 100644
index 00000000..920b0f35
--- /dev/null
+++ b/src/proxy/proxy_common.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef PROXY_COMMON_H
+#define PROXY_COMMON_H
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "util/hash_table.h"
+#include "util/list.h"
+#include "util/macros.h"
+#include "virgl_util.h"
+#include "virglrenderer.h"
+
+#include "proxy_renderer.h"
+#include "proxy_socket.h"
+
+struct proxy_client;
+struct proxy_context;
+struct proxy_server;
+struct proxy_socket;
+
+struct proxy_renderer {
+ const struct proxy_renderer_cbs *cbs;
+ uint32_t flags;
+
+ struct proxy_server *server;
+ struct proxy_client *client;
+};
+
+extern struct proxy_renderer proxy_renderer;
+
+void
+proxy_log(const char *fmt, ...);
+
+#endif /* PROXY_COMMON_H */
diff --git a/src/proxy/proxy_context.c b/src/proxy/proxy_context.c
new file mode 100644
index 00000000..f254afe2
--- /dev/null
+++ b/src/proxy/proxy_context.c
@@ -0,0 +1,683 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "proxy_context.h"
+
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "server/render_protocol.h"
+#include "util/anon_file.h"
+#include "util/bitscan.h"
+
+#include "proxy_client.h"
+
+struct proxy_fence {
+ uint32_t flags;
+ uint32_t seqno;
+ uint64_t fence_id;
+ struct list_head head;
+};
+
+static inline void
+proxy_context_resource_add(struct proxy_context *ctx, uint32_t res_id)
+{
+ assert(!_mesa_hash_table_search(ctx->resource_table, (void *)(uintptr_t)res_id));
+ _mesa_hash_table_insert(ctx->resource_table, (void *)(uintptr_t)res_id, NULL);
+}
+
+static inline bool
+proxy_context_resource_find(struct proxy_context *ctx, uint32_t res_id)
+{
+ return _mesa_hash_table_search(ctx->resource_table, (void *)(uintptr_t)res_id);
+}
+
+static inline void
+proxy_context_resource_remove(struct proxy_context *ctx, uint32_t res_id)
+{
+ _mesa_hash_table_remove_key(ctx->resource_table, (void *)(uintptr_t)res_id);
+}
+
+static inline bool
+proxy_context_resource_table_init(struct proxy_context *ctx)
+{
+ ctx->resource_table = _mesa_hash_table_create_u32_keys(NULL);
+ return ctx->resource_table;
+}
+
+static inline void
+proxy_context_resource_table_fini(struct proxy_context *ctx)
+{
+ _mesa_hash_table_destroy(ctx->resource_table, NULL);
+}
+
+static bool
+proxy_fence_is_signaled(const struct proxy_fence *fence, uint32_t cur_seqno)
+{
+ /* takes wrapping into account */
+ const uint32_t d = cur_seqno - fence->seqno;
+ return d < INT32_MAX;
+}
+
+static struct proxy_fence *
+proxy_context_alloc_fence(struct proxy_context *ctx)
+{
+ struct proxy_fence *fence = NULL;
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_lock(&ctx->free_fences_mutex);
+
+ if (!list_is_empty(&ctx->free_fences)) {
+ fence = list_first_entry(&ctx->free_fences, struct proxy_fence, head);
+ list_del(&fence->head);
+ }
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_unlock(&ctx->free_fences_mutex);
+
+ return fence ? fence : malloc(sizeof(*fence));
+}
+
+static void
+proxy_context_free_fence(struct proxy_context *ctx, struct proxy_fence *fence)
+{
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_lock(&ctx->free_fences_mutex);
+
+ list_add(&fence->head, &ctx->free_fences);
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_unlock(&ctx->free_fences_mutex);
+}
+
+static uint32_t
+proxy_context_load_timeline_seqno(struct proxy_context *ctx, uint32_t ring_idx)
+{
+ return atomic_load(&ctx->timeline_seqnos[ring_idx]);
+}
+
+static bool
+proxy_context_retire_timeline_fences_locked(struct proxy_context *ctx,
+ uint32_t ring_idx,
+ uint32_t cur_seqno)
+{
+ struct proxy_timeline *timeline = &ctx->timelines[ring_idx];
+ bool force_retire_all = false;
+
+ /* check if the socket has been disconnected (i.e., the other end has
+ * crashed) if no progress is made after a while
+ */
+ if (timeline->cur_seqno == cur_seqno && !list_is_empty(&timeline->fences)) {
+ timeline->cur_seqno_stall_count++;
+ if (timeline->cur_seqno_stall_count < 100 ||
+ proxy_socket_is_connected(&ctx->socket))
+ return false;
+
+ /* socket has been disconnected */
+ force_retire_all = true;
+ }
+
+ timeline->cur_seqno = cur_seqno;
+ timeline->cur_seqno_stall_count = 0;
+
+ list_for_each_entry_safe (struct proxy_fence, fence, &timeline->fences, head) {
+ if (!proxy_fence_is_signaled(fence, timeline->cur_seqno) && !force_retire_all)
+ return false;
+
+ ctx->base.fence_retire(&ctx->base, ring_idx, fence->fence_id);
+
+ list_del(&fence->head);
+ proxy_context_free_fence(ctx, fence);
+ }
+
+ return true;
+}
+
+static void
+proxy_context_retire_fences_internal(struct proxy_context *ctx)
+{
+ if (ctx->sync_thread.fence_eventfd >= 0)
+ flush_eventfd(ctx->sync_thread.fence_eventfd);
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_lock(&ctx->timeline_mutex);
+
+ uint64_t new_busy_mask = 0;
+ uint64_t old_busy_mask = ctx->timeline_busy_mask;
+ while (old_busy_mask) {
+ const uint32_t ring_idx = u_bit_scan64(&old_busy_mask);
+ const uint32_t cur_seqno = proxy_context_load_timeline_seqno(ctx, ring_idx);
+ if (!proxy_context_retire_timeline_fences_locked(ctx, ring_idx, cur_seqno))
+ new_busy_mask |= 1ull << ring_idx;
+ }
+
+ ctx->timeline_busy_mask = new_busy_mask;
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_unlock(&ctx->timeline_mutex);
+}
+
+static int
+proxy_context_sync_thread(void *arg)
+{
+ struct proxy_context *ctx = arg;
+ struct pollfd poll_fds[2] = {
+ [0] = {
+ .fd = ctx->sync_thread.fence_eventfd,
+ .events = POLLIN,
+ },
+ [1] = {
+ .fd = ctx->socket.fd,
+ },
+ };
+
+ assert(proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB);
+
+ while (!ctx->sync_thread.stop) {
+ const int ret = poll(poll_fds, ARRAY_SIZE(poll_fds), -1);
+ if (ret <= 0) {
+ if (ret < 0 && (errno == EINTR || errno == EAGAIN))
+ continue;
+
+ proxy_log("failed to poll fence eventfd");
+ break;
+ }
+
+ proxy_context_retire_fences_internal(ctx);
+ }
+
+ return 0;
+}
+
+static int
+proxy_context_submit_fence(struct virgl_context *base,
+ uint32_t flags,
+ uint32_t ring_idx,
+ uint64_t fence_id)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+ const uint64_t old_busy_mask = ctx->timeline_busy_mask;
+
+ if (ring_idx >= PROXY_CONTEXT_TIMELINE_COUNT)
+ return -EINVAL;
+
+ struct proxy_timeline *timeline = &ctx->timelines[ring_idx];
+ struct proxy_fence *fence = proxy_context_alloc_fence(ctx);
+ if (!fence)
+ return -ENOMEM;
+
+ fence->flags = flags;
+ fence->seqno = timeline->next_seqno++;
+ fence->fence_id = fence_id;
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_lock(&ctx->timeline_mutex);
+
+ list_addtail(&fence->head, &timeline->fences);
+ ctx->timeline_busy_mask |= 1ull << ring_idx;
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_unlock(&ctx->timeline_mutex);
+
+ const struct render_context_op_submit_fence_request req = {
+ .header.op = RENDER_CONTEXT_OP_SUBMIT_FENCE,
+ .flags = flags,
+ .ring_index = ring_idx,
+ .seqno = fence->seqno,
+ };
+ if (proxy_socket_send_request(&ctx->socket, &req, sizeof(req)))
+ return 0;
+
+ /* recover timeline fences and busy_mask on submit_fence request failure */
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_lock(&ctx->timeline_mutex);
+
+ list_del(&fence->head);
+ ctx->timeline_busy_mask = old_busy_mask;
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
+ mtx_unlock(&ctx->timeline_mutex);
+
+ proxy_context_free_fence(ctx, fence);
+ proxy_log("failed to submit fence");
+ return -1;
+}
+
+static void
+proxy_context_retire_fences(struct virgl_context *base)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+
+ assert(!(proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB));
+ proxy_context_retire_fences_internal(ctx);
+}
+
+static int
+proxy_context_get_fencing_fd(struct virgl_context *base)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+
+ assert(!(proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB));
+ return ctx->sync_thread.fence_eventfd;
+}
+
+static int
+proxy_context_submit_cmd(struct virgl_context *base, const void *buffer, size_t size)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+
+ if (!size)
+ return 0;
+
+ struct render_context_op_submit_cmd_request req = {
+ .header.op = RENDER_CONTEXT_OP_SUBMIT_CMD,
+ .size = size,
+ };
+
+ const size_t inlined = MIN2(size, sizeof(req.cmd));
+ memcpy(req.cmd, buffer, inlined);
+
+ if (!proxy_socket_send_request(&ctx->socket, &req, sizeof(req))) {
+ proxy_log("failed to submit cmd");
+ return -1;
+ }
+
+ if (size > inlined) {
+ if (!proxy_socket_send_request(&ctx->socket, (const char *)buffer + inlined,
+ size - inlined)) {
+ proxy_log("failed to submit large cmd buffer");
+ return -1;
+ }
+ }
+
+ /* XXX this is forced a roundtrip to avoid surprises; vtest requires this
+ * at least
+ */
+ struct render_context_op_submit_cmd_reply reply;
+ if (!proxy_socket_receive_reply(&ctx->socket, &reply, sizeof(reply))) {
+ proxy_log("failed to get submit result");
+ return -1;
+ }
+
+ return reply.ok ? 0 : -1;
+}
+
+static bool
+validate_resource_fd_shm(int fd, uint64_t expected_size)
+{
+ static const int blocked_seals = F_SEAL_WRITE;
+
+ const int seals = fcntl(fd, F_GET_SEALS);
+ if (seals & blocked_seals) {
+ proxy_log("failed to validate shm seals(%d): blocked(%d)", seals, blocked_seals);
+ return false;
+ }
+
+ const uint64_t size = lseek(fd, 0, SEEK_END);
+ if (size != expected_size) {
+ proxy_log("failed to validate shm size(%" PRIu64 ") expected(%" PRIu64 ")", size,
+ expected_size);
+ return false;
+ }
+
+ return true;
+}
+
+static inline int
+add_required_seals_to_fd(int fd)
+{
+ return fcntl(fd, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_SHRINK | F_SEAL_GROW);
+}
+
+static int
+proxy_context_get_blob(struct virgl_context *base,
+ uint32_t res_id,
+ uint64_t blob_id,
+ uint64_t blob_size,
+ uint32_t blob_flags,
+ struct virgl_context_blob *blob)
+{
+ /* RENDER_CONTEXT_OP_CREATE_RESOURCE implies resource attach, thus proxy tracks
+ * resources created here to avoid double attaching the same resource when proxy is on
+ * attach_resource callback.
+ */
+ struct proxy_context *ctx = (struct proxy_context *)base;
+
+ const struct render_context_op_create_resource_request req = {
+ .header.op = RENDER_CONTEXT_OP_CREATE_RESOURCE,
+ .res_id = res_id,
+ .blob_id = blob_id,
+ .blob_size = blob_size,
+ .blob_flags = blob_flags,
+ };
+ if (!proxy_socket_send_request(&ctx->socket, &req, sizeof(req))) {
+ proxy_log("failed to get blob %" PRIu64, blob_id);
+ return -1;
+ }
+
+ struct render_context_op_create_resource_reply reply;
+ int reply_fd;
+ int reply_fd_count;
+ if (!proxy_socket_receive_reply_with_fds(&ctx->socket, &reply, sizeof(reply),
+ &reply_fd, 1, &reply_fd_count)) {
+ proxy_log("failed to get reply of blob %" PRIu64, blob_id);
+ return -1;
+ }
+
+ if (!reply_fd_count) {
+ proxy_log("invalid reply for blob %" PRIu64, blob_id);
+ return -1;
+ }
+
+ bool reply_fd_valid = false;
+ switch (reply.fd_type) {
+ case VIRGL_RESOURCE_FD_DMABUF:
+ /* TODO validate the fd is dmabuf >= blob_size */
+ reply_fd_valid = true;
+ break;
+ case VIRGL_RESOURCE_FD_OPAQUE:
+ /* this will be validated when imported by the client */
+ reply_fd_valid = true;
+ break;
+ case VIRGL_RESOURCE_FD_SHM:
+ /* validate the seals and size here */
+ reply_fd_valid = !add_required_seals_to_fd(reply_fd) &&
+ validate_resource_fd_shm(reply_fd, blob_size);
+ break;
+ default:
+ break;
+ }
+ if (!reply_fd_valid) {
+ proxy_log("invalid fd type %d for blob %" PRIu64, reply.fd_type, blob_id);
+ close(reply_fd);
+ return -1;
+ }
+
+ blob->type = reply.fd_type;
+ blob->u.fd = reply_fd;
+ blob->map_info = reply.map_info;
+
+ proxy_context_resource_add(ctx, res_id);
+
+ return 0;
+}
+
+static int
+proxy_context_transfer_3d(struct virgl_context *base,
+ struct virgl_resource *res,
+ UNUSED const struct vrend_transfer_info *info,
+ UNUSED int transfer_mode)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+
+ proxy_log("no transfer support for ctx %d and res %d", ctx->base.ctx_id, res->res_id);
+ return -1;
+}
+
+static void
+proxy_context_detach_resource(struct virgl_context *base, struct virgl_resource *res)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+ const uint32_t res_id = res->res_id;
+
+ const struct render_context_op_destroy_resource_request req = {
+ .header.op = RENDER_CONTEXT_OP_DESTROY_RESOURCE,
+ .res_id = res_id,
+ };
+ if (!proxy_socket_send_request(&ctx->socket, &req, sizeof(req)))
+ proxy_log("failed to detach res %d", res_id);
+
+ proxy_context_resource_remove(ctx, res_id);
+}
+
+static void
+proxy_context_attach_resource(struct virgl_context *base, struct virgl_resource *res)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+ const uint32_t res_id = res->res_id;
+
+ /* avoid importing resources created from RENDER_CONTEXT_OP_CREATE_RESOURCE */
+ if (proxy_context_resource_find(ctx, res_id))
+ return;
+
+ enum virgl_resource_fd_type res_fd_type = res->fd_type;
+ int res_fd = res->fd;
+ bool close_res_fd = false;
+ if (res_fd_type == VIRGL_RESOURCE_FD_INVALID) {
+ res_fd_type = virgl_resource_export_fd(res, &res_fd);
+ if (res_fd_type == VIRGL_RESOURCE_FD_INVALID) {
+ proxy_log("failed to export res %d", res_id);
+ return;
+ }
+
+ close_res_fd = true;
+ }
+
+ /* the proxy ignores iovs since transfer_3d is not supported */
+ const struct render_context_op_import_resource_request req = {
+ .header.op = RENDER_CONTEXT_OP_IMPORT_RESOURCE,
+ .res_id = res_id,
+ .fd_type = res_fd_type,
+ .size = virgl_resource_get_size(res),
+ };
+ if (!proxy_socket_send_request_with_fds(&ctx->socket, &req, sizeof(req), &res_fd, 1))
+ proxy_log("failed to attach res %d", res_id);
+
+ if (res_fd >= 0 && close_res_fd)
+ close(res_fd);
+
+ proxy_context_resource_add(ctx, res_id);
+}
+
+static void
+proxy_context_destroy(struct virgl_context *base)
+{
+ struct proxy_context *ctx = (struct proxy_context *)base;
+
+ /* ask the server process to terminate the context process */
+ if (!proxy_client_destroy_context(ctx->client, ctx->base.ctx_id))
+ proxy_log("failed to destroy ctx %d", ctx->base.ctx_id);
+
+ if (ctx->sync_thread.fence_eventfd >= 0) {
+ if (ctx->sync_thread.created) {
+ ctx->sync_thread.stop = true;
+ write_eventfd(ctx->sync_thread.fence_eventfd, 1);
+ thrd_join(ctx->sync_thread.thread, NULL);
+ }
+
+ close(ctx->sync_thread.fence_eventfd);
+ }
+
+ if (ctx->shmem.ptr)
+ munmap(ctx->shmem.ptr, ctx->shmem.size);
+ if (ctx->shmem.fd >= 0)
+ close(ctx->shmem.fd);
+
+ if (ctx->timeline_seqnos) {
+ for (uint32_t i = 0; i < PROXY_CONTEXT_TIMELINE_COUNT; i++) {
+ struct proxy_timeline *timeline = &ctx->timelines[i];
+ list_for_each_entry_safe (struct proxy_fence, fence, &timeline->fences, head)
+ free(fence);
+ }
+ }
+ mtx_destroy(&ctx->timeline_mutex);
+
+ list_for_each_entry_safe (struct proxy_fence, fence, &ctx->free_fences, head)
+ free(fence);
+ mtx_destroy(&ctx->free_fences_mutex);
+
+ proxy_context_resource_table_fini(ctx);
+
+ proxy_socket_fini(&ctx->socket);
+
+ free(ctx);
+}
+
+static void
+proxy_context_init_base(struct proxy_context *ctx)
+{
+ ctx->base.destroy = proxy_context_destroy;
+ ctx->base.attach_resource = proxy_context_attach_resource;
+ ctx->base.detach_resource = proxy_context_detach_resource;
+ ctx->base.transfer_3d = proxy_context_transfer_3d;
+ ctx->base.get_blob = proxy_context_get_blob;
+ ctx->base.submit_cmd = proxy_context_submit_cmd;
+
+ ctx->base.get_fencing_fd = proxy_context_get_fencing_fd;
+ ctx->base.retire_fences = proxy_context_retire_fences;
+ ctx->base.submit_fence = proxy_context_submit_fence;
+}
+
+static bool
+proxy_context_init_fencing(struct proxy_context *ctx)
+{
+ /* The render server updates the shmem for the current seqnos and
+ * optionally notifies using the eventfd. That means, when only
+ * VIRGL_RENDERER_THREAD_SYNC is set, we just need to set up the eventfd.
+ * When VIRGL_RENDERER_ASYNC_FENCE_CB is also set, we need to create a sync
+ * thread as well.
+ *
+ * Fence polling can always check the shmem directly.
+ */
+ if (!(proxy_renderer.flags & VIRGL_RENDERER_THREAD_SYNC))
+ return true;
+
+ ctx->sync_thread.fence_eventfd = create_eventfd(0);
+ if (ctx->sync_thread.fence_eventfd < 0) {
+ proxy_log("failed to create fence eventfd");
+ return false;
+ }
+
+ if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB) {
+ int ret = thrd_create(&ctx->sync_thread.thread, proxy_context_sync_thread, ctx);
+ if (ret != thrd_success) {
+ proxy_log("failed to create sync thread");
+ return false;
+ }
+ ctx->sync_thread.created = true;
+ }
+
+ return true;
+}
+
+static bool
+proxy_context_init_timelines(struct proxy_context *ctx)
+{
+ atomic_uint *timeline_seqnos = ctx->shmem.ptr;
+ for (uint32_t i = 0; i < ARRAY_SIZE(ctx->timelines); i++) {
+ atomic_init(&timeline_seqnos[i], 0);
+
+ struct proxy_timeline *timeline = &ctx->timelines[i];
+ timeline->cur_seqno = 0;
+ timeline->next_seqno = 1;
+ list_inithead(&timeline->fences);
+ }
+
+ ctx->timeline_seqnos = timeline_seqnos;
+
+ return true;
+}
+
+static int
+alloc_memfd(const char *name, size_t size, void **out_ptr)
+{
+ int fd = os_create_anonymous_file(size, name);
+ if (fd < 0)
+ return -1;
+
+ int ret = add_required_seals_to_fd(fd);
+ if (ret)
+ goto fail;
+
+ if (!out_ptr)
+ return fd;
+
+ void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (ptr == MAP_FAILED)
+ goto fail;
+
+ *out_ptr = ptr;
+ return fd;
+
+fail:
+ close(fd);
+ return -1;
+}
+
+static bool
+proxy_context_init_shmem(struct proxy_context *ctx)
+{
+ const size_t shmem_size = sizeof(*ctx->timeline_seqnos) * PROXY_CONTEXT_TIMELINE_COUNT;
+ ctx->shmem.fd = alloc_memfd("proxy-ctx", shmem_size, &ctx->shmem.ptr);
+ if (ctx->shmem.fd < 0)
+ return false;
+
+ ctx->shmem.size = shmem_size;
+
+ return true;
+}
+
+static bool
+proxy_context_init(struct proxy_context *ctx, uint32_t ctx_flags)
+{
+ if (!proxy_context_init_shmem(ctx) || !proxy_context_init_timelines(ctx) ||
+ !proxy_context_init_fencing(ctx) || !proxy_context_resource_table_init(ctx))
+ return false;
+
+ const struct render_context_op_init_request req = {
+ .header.op = RENDER_CONTEXT_OP_INIT,
+ .flags = ctx_flags,
+ .shmem_size = ctx->shmem.size,
+ };
+ const int req_fds[2] = { ctx->shmem.fd, ctx->sync_thread.fence_eventfd };
+ const int req_fd_count = req_fds[1] >= 0 ? 2 : 1;
+ if (!proxy_socket_send_request_with_fds(&ctx->socket, &req, sizeof(req), req_fds,
+ req_fd_count)) {
+ proxy_log("failed to initialize context");
+ return false;
+ }
+
+ return true;
+}
+
+struct virgl_context *
+proxy_context_create(uint32_t ctx_id,
+ uint32_t ctx_flags,
+ size_t debug_len,
+ const char *debug_name)
+{
+ struct proxy_client *client = proxy_renderer.client;
+ struct proxy_context *ctx;
+
+ int ctx_fd;
+ if (!proxy_client_create_context(client, ctx_id, debug_len, debug_name, &ctx_fd)) {
+ proxy_log("failed to create a context");
+ return NULL;
+ }
+
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx) {
+ close(ctx_fd);
+ return NULL;
+ }
+
+ proxy_context_init_base(ctx);
+ ctx->client = client;
+ proxy_socket_init(&ctx->socket, ctx_fd);
+ ctx->shmem.fd = -1;
+ mtx_init(&ctx->timeline_mutex, mtx_plain);
+ mtx_init(&ctx->free_fences_mutex, mtx_plain);
+ list_inithead(&ctx->free_fences);
+ ctx->sync_thread.fence_eventfd = -1;
+
+ if (!proxy_context_init(ctx, ctx_flags)) {
+ proxy_context_destroy(&ctx->base);
+ return NULL;
+ }
+
+ return &ctx->base;
+}
diff --git a/src/proxy/proxy_context.h b/src/proxy/proxy_context.h
new file mode 100644
index 00000000..ce29ecae
--- /dev/null
+++ b/src/proxy/proxy_context.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef PROXY_CONTEXT_H
+#define PROXY_CONTEXT_H
+
+#include "proxy_common.h"
+
+#include "c11/threads.h"
+#include "virgl_context.h"
+
+/* matches virtio-gpu */
+#define PROXY_CONTEXT_TIMELINE_COUNT 64
+
+static_assert(ATOMIC_INT_LOCK_FREE == 2, "proxy renderer requires lock-free atomic_uint");
+
+struct proxy_timeline {
+ uint32_t cur_seqno;
+ uint32_t next_seqno;
+ struct list_head fences;
+
+ int cur_seqno_stall_count;
+};
+
+struct proxy_context {
+ struct virgl_context base;
+
+ struct proxy_client *client;
+ struct proxy_socket socket;
+
+ /* this tracks resources early attached in get_blob */
+ struct hash_table *resource_table;
+
+ /* this is shared with the render worker */
+ struct {
+ int fd;
+ size_t size;
+ void *ptr;
+ } shmem;
+
+ mtx_t timeline_mutex;
+ struct proxy_timeline timelines[PROXY_CONTEXT_TIMELINE_COUNT];
+ /* which timelines have fences */
+ uint64_t timeline_busy_mask;
+ /* this points a region of shmem updated by the render worker */
+ const volatile atomic_uint *timeline_seqnos;
+
+ mtx_t free_fences_mutex;
+ struct list_head free_fences;
+
+ struct {
+ /* when VIRGL_RENDERER_THREAD_SYNC is set */
+ int fence_eventfd;
+
+ /* when VIRGL_RENDERER_ASYNC_FENCE_CB is also set */
+ thrd_t thread;
+ bool created;
+ bool stop;
+ } sync_thread;
+};
+
+#endif /* PROXY_CONTEXT_H */
diff --git a/src/proxy/proxy_renderer.c b/src/proxy/proxy_renderer.c
new file mode 100644
index 00000000..c42b7c8c
--- /dev/null
+++ b/src/proxy/proxy_renderer.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "proxy_common.h"
+
+#include "proxy_client.h"
+#include "proxy_renderer.h"
+#include "proxy_server.h"
+
+int
+proxy_renderer_init(const struct proxy_renderer_cbs *cbs, uint32_t flags)
+{
+ proxy_renderer.cbs = cbs;
+ proxy_renderer.flags = flags;
+
+ proxy_renderer.server = proxy_server_create();
+ if (!proxy_renderer.server)
+ goto fail;
+
+ proxy_renderer.client =
+ proxy_client_create(proxy_renderer.server, proxy_renderer.flags);
+ if (!proxy_renderer.client)
+ goto fail;
+
+ return 0;
+
+fail:
+ proxy_renderer_fini();
+ return -1;
+}
+
+void
+proxy_renderer_fini(void)
+{
+ if (proxy_renderer.server)
+ proxy_server_destroy(proxy_renderer.server);
+
+ if (proxy_renderer.client)
+ proxy_client_destroy(proxy_renderer.client);
+
+ memset(&proxy_renderer, 0, sizeof(struct proxy_renderer));
+}
+
+void
+proxy_renderer_reset(void)
+{
+ proxy_client_reset(proxy_renderer.client);
+}
diff --git a/src/proxy/proxy_renderer.h b/src/proxy/proxy_renderer.h
new file mode 100644
index 00000000..dd7e181d
--- /dev/null
+++ b/src/proxy/proxy_renderer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef PROXY_RENDERER_H
+#define PROXY_RENDERER_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+struct iovec;
+struct virgl_context;
+
+struct proxy_renderer_cbs {
+ int (*get_server_fd)(uint32_t version);
+};
+
+#ifdef ENABLE_RENDER_SERVER
+
+int
+proxy_renderer_init(const struct proxy_renderer_cbs *cbs, uint32_t flags);
+
+void
+proxy_renderer_fini(void);
+
+void
+proxy_renderer_reset(void);
+
+struct virgl_context *
+proxy_context_create(uint32_t ctx_id,
+ uint32_t ctx_flags,
+ size_t debug_len,
+ const char *debug_name);
+
+#else /* ENABLE_RENDER_SERVER */
+
+static inline int
+proxy_renderer_init(UNUSED const struct proxy_renderer_cbs *cbs, UNUSED uint32_t flags)
+{
+ virgl_log("Render server support was not enabled in virglrenderer\n");
+ return -1;
+}
+
+static inline void
+proxy_renderer_fini(void)
+{
+}
+
+static inline void
+proxy_renderer_reset(void)
+{
+}
+
+static inline struct virgl_context *
+proxy_context_create(UNUSED uint32_t ctx_id,
+ UNUSED uint32_t ctx_flags,
+ UNUSED size_t debug_len,
+ UNUSED const char *debug_name)
+{
+ return NULL;
+}
+
+#endif /* ENABLE_RENDER_SERVER */
+
+#endif /* PROXY_RENDERER_H */
diff --git a/src/proxy/proxy_server.c b/src/proxy/proxy_server.c
new file mode 100644
index 00000000..b0d3d060
--- /dev/null
+++ b/src/proxy/proxy_server.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "proxy_server.h"
+
+#include <signal.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "server/render_protocol.h"
+
+int
+proxy_server_connect(struct proxy_server *srv)
+{
+ int client_fd = srv->client_fd;
+ /* transfer ownership */
+ srv->client_fd = -1;
+ return client_fd;
+}
+
+void
+proxy_server_destroy(struct proxy_server *srv)
+{
+ if (srv->pid >= 0) {
+ kill(srv->pid, SIGKILL);
+
+ siginfo_t siginfo = { 0 };
+ waitid(P_PID, srv->pid, &siginfo, WEXITED);
+ }
+
+ if (srv->client_fd >= 0)
+ close(srv->client_fd);
+
+ free(srv);
+}
+
+static bool
+proxy_server_fork(struct proxy_server *srv)
+{
+ int socket_fds[2];
+ if (!proxy_socket_pair(socket_fds))
+ return false;
+ const int client_fd = socket_fds[0];
+ const int remote_fd = socket_fds[1];
+
+ pid_t pid = fork();
+ if (pid < 0) {
+ proxy_log("failed to fork proxy server");
+ close(client_fd);
+ close(remote_fd);
+ return false;
+ }
+
+ if (pid > 0) {
+ srv->pid = pid;
+ srv->client_fd = client_fd;
+ close(remote_fd);
+ } else {
+ close(client_fd);
+
+ /* do not receive signals from terminal */
+ setpgid(0, 0);
+
+ char fd_str[16];
+ snprintf(fd_str, sizeof(fd_str), "%d", remote_fd);
+
+ char *const argv[] = {
+ RENDER_SERVER_EXEC_PATH,
+ "--socket-fd",
+ fd_str,
+ NULL,
+ };
+ execv(argv[0], argv);
+
+ proxy_log("failed to exec %s: %s", argv[0], strerror(errno));
+ close(remote_fd);
+ exit(-1);
+ }
+
+ return true;
+}
+
+static bool
+proxy_server_init_fd(struct proxy_server *srv)
+{
+ /* the fd represents a connection to the server */
+ srv->client_fd = proxy_renderer.cbs->get_server_fd(RENDER_SERVER_VERSION);
+ if (srv->client_fd < 0)
+ return false;
+
+ return true;
+}
+
+struct proxy_server *
+proxy_server_create(void)
+{
+ struct proxy_server *srv = calloc(1, sizeof(*srv));
+ if (!srv)
+ return NULL;
+
+ srv->pid = -1;
+
+ if (!proxy_server_init_fd(srv)) {
+ /* start the render server on demand when the client does not provide a
+ * server fd
+ */
+ if (!proxy_server_fork(srv)) {
+ free(srv);
+ return NULL;
+ }
+ }
+
+ if (!proxy_socket_is_seqpacket(srv->client_fd)) {
+ proxy_log("invalid client fd type");
+ close(srv->client_fd);
+ free(srv);
+ return NULL;
+ }
+
+ proxy_log("proxy server with pid %d", srv->pid);
+
+ return srv;
+}
diff --git a/src/proxy/proxy_server.h b/src/proxy/proxy_server.h
new file mode 100644
index 00000000..1b5ca02b
--- /dev/null
+++ b/src/proxy/proxy_server.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef PROXY_SERVER_H
+#define PROXY_SERVER_H
+
+#include "proxy_common.h"
+
+#include <sys/types.h>
+
+struct proxy_server {
+ pid_t pid;
+ int client_fd;
+};
+
+struct proxy_server *
+proxy_server_create(void);
+
+void
+proxy_server_destroy(struct proxy_server *srv);
+
+int
+proxy_server_connect(struct proxy_server *srv);
+
+#endif /* PROXY_SERVER_H */
diff --git a/src/proxy/proxy_socket.c b/src/proxy/proxy_socket.c
new file mode 100644
index 00000000..51223c6c
--- /dev/null
+++ b/src/proxy/proxy_socket.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "proxy_socket.h"
+
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <unistd.h>
+
+#define PROXY_SOCKET_MAX_FD_COUNT 8
+
+/* this is only used when the render server is started on demand */
+bool
+proxy_socket_pair(int out_fds[static 2])
+{
+ int ret = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, out_fds);
+ if (ret) {
+ proxy_log("failed to create socket pair");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+proxy_socket_is_seqpacket(int fd)
+{
+ int type;
+ socklen_t len = sizeof(type);
+ if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len)) {
+ proxy_log("fd %d err %s", fd, strerror(errno));
+ return false;
+ }
+ return type == SOCK_SEQPACKET;
+}
+
+void
+proxy_socket_init(struct proxy_socket *socket, int fd)
+{
+ /* TODO make fd non-blocking and perform io with timeout */
+ assert(fd >= 0);
+ *socket = (struct proxy_socket){
+ .fd = fd,
+ };
+}
+
+void
+proxy_socket_fini(struct proxy_socket *socket)
+{
+ close(socket->fd);
+}
+
+bool
+proxy_socket_is_connected(const struct proxy_socket *socket)
+{
+ struct pollfd poll_fd = {
+ .fd = socket->fd,
+ };
+
+ while (true) {
+ const int ret = poll(&poll_fd, 1, 0);
+ if (ret == 0) {
+ return true;
+ } else if (ret < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+
+ proxy_log("failed to poll socket");
+ return false;
+ }
+
+ if (poll_fd.revents & (POLLERR | POLLHUP | POLLNVAL)) {
+ proxy_log("socket disconnected");
+ return false;
+ }
+
+ return true;
+ }
+}
+
+static const int *
+get_received_fds(const struct msghdr *msg, int *out_count)
+{
+ const struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ if (unlikely(!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
+ cmsg->cmsg_type != SCM_RIGHTS || cmsg->cmsg_len < CMSG_LEN(0))) {
+ *out_count = 0;
+ return NULL;
+ }
+
+ *out_count = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
+ return (const int *)CMSG_DATA(cmsg);
+}
+
+static bool
+proxy_socket_recvmsg(struct proxy_socket *socket, struct msghdr *msg)
+{
+ do {
+ const ssize_t s = recvmsg(socket->fd, msg, MSG_CMSG_CLOEXEC);
+ if (unlikely(s < 0)) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+
+ proxy_log("failed to receive message: %s", strerror(errno));
+ return false;
+ }
+
+ assert(msg->msg_iovlen == 1);
+ if (unlikely((msg->msg_flags & (MSG_TRUNC | MSG_CTRUNC)) ||
+ msg->msg_iov[0].iov_len != (size_t)s)) {
+ proxy_log("failed to receive message: truncated or incomplete");
+
+ int fd_count;
+ const int *fds = get_received_fds(msg, &fd_count);
+ for (int i = 0; i < fd_count; i++)
+ close(fds[i]);
+
+ return false;
+ }
+
+ return true;
+ } while (true);
+}
+
+static bool
+proxy_socket_receive_reply_internal(struct proxy_socket *socket,
+ void *data,
+ size_t size,
+ int *fds,
+ int max_fd_count,
+ int *out_fd_count)
+{
+ assert(data && size);
+ struct msghdr msg = {
+ .msg_iov =
+ &(struct iovec){
+ .iov_base = data,
+ .iov_len = size,
+ },
+ .msg_iovlen = 1,
+ };
+
+ char cmsg_buf[CMSG_SPACE(sizeof(*fds) * PROXY_SOCKET_MAX_FD_COUNT)];
+ if (max_fd_count) {
+ assert(fds && max_fd_count <= PROXY_SOCKET_MAX_FD_COUNT);
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = CMSG_SPACE(sizeof(*fds) * max_fd_count);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ memset(cmsg, 0, sizeof(*cmsg));
+ }
+
+ if (!proxy_socket_recvmsg(socket, &msg))
+ return false;
+
+ if (max_fd_count) {
+ int received_fd_count;
+ const int *received_fds = get_received_fds(&msg, &received_fd_count);
+ assert(received_fd_count <= max_fd_count);
+
+ memcpy(fds, received_fds, sizeof(*fds) * received_fd_count);
+ *out_fd_count = received_fd_count;
+ } else if (out_fd_count) {
+ *out_fd_count = 0;
+ }
+
+ return true;
+}
+
+bool
+proxy_socket_receive_reply(struct proxy_socket *socket, void *data, size_t size)
+{
+ return proxy_socket_receive_reply_internal(socket, data, size, NULL, 0, NULL);
+}
+
+bool
+proxy_socket_receive_reply_with_fds(struct proxy_socket *socket,
+ void *data,
+ size_t size,
+ int *fds,
+ int max_fd_count,
+ int *out_fd_count)
+{
+ return proxy_socket_receive_reply_internal(socket, data, size, fds, max_fd_count,
+ out_fd_count);
+}
+
+static bool
+proxy_socket_sendmsg(struct proxy_socket *socket, const struct msghdr *msg)
+{
+ do {
+ const ssize_t s = sendmsg(socket->fd, msg, MSG_NOSIGNAL);
+ if (unlikely(s < 0)) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+
+ proxy_log("failed to send message: %s", strerror(errno));
+ return false;
+ }
+
+ /* no partial send since the socket type is SOCK_SEQPACKET */
+ assert(msg->msg_iovlen == 1 && msg->msg_iov[0].iov_len == (size_t)s);
+ return true;
+ } while (true);
+}
+
+static bool
+proxy_socket_send_request_internal(struct proxy_socket *socket,
+ const void *data,
+ size_t size,
+ const int *fds,
+ int fd_count)
+{
+ assert(data && size);
+ struct msghdr msg = {
+ .msg_iov =
+ &(struct iovec){
+ .iov_base = (void *)data,
+ .iov_len = size,
+ },
+ .msg_iovlen = 1,
+ };
+
+ char cmsg_buf[CMSG_SPACE(sizeof(*fds) * PROXY_SOCKET_MAX_FD_COUNT)];
+ if (fd_count) {
+ assert(fds && fd_count <= PROXY_SOCKET_MAX_FD_COUNT);
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = CMSG_SPACE(sizeof(*fds) * fd_count);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(*fds) * fd_count);
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(*fds) * fd_count);
+ }
+
+ return proxy_socket_sendmsg(socket, &msg);
+}
+
+bool
+proxy_socket_send_request(struct proxy_socket *socket, const void *data, size_t size)
+{
+ return proxy_socket_send_request_internal(socket, data, size, NULL, 0);
+}
+
+bool
+proxy_socket_send_request_with_fds(struct proxy_socket *socket,
+ const void *data,
+ size_t size,
+ const int *fds,
+ int fd_count)
+{
+ return proxy_socket_send_request_internal(socket, data, size, fds, fd_count);
+}
diff --git a/src/proxy/proxy_socket.h b/src/proxy/proxy_socket.h
new file mode 100644
index 00000000..04e2f543
--- /dev/null
+++ b/src/proxy/proxy_socket.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef PROXY_SOCKET_H
+#define PROXY_SOCKET_H
+
+#include "proxy_common.h"
+
+struct proxy_socket {
+ int fd;
+};
+
+bool
+proxy_socket_pair(int out_fds[static 2]);
+
+bool
+proxy_socket_is_seqpacket(int fd);
+
+void
+proxy_socket_init(struct proxy_socket *socket, int fd);
+
+void
+proxy_socket_fini(struct proxy_socket *socket);
+
+bool
+proxy_socket_is_connected(const struct proxy_socket *socket);
+
+bool
+proxy_socket_receive_reply(struct proxy_socket *socket, void *data, size_t size);
+
+bool
+proxy_socket_receive_reply_with_fds(struct proxy_socket *socket,
+ void *data,
+ size_t size,
+ int *fds,
+ int max_fd_count,
+ int *out_fd_count);
+
+bool
+proxy_socket_send_request(struct proxy_socket *socket, const void *data, size_t size);
+
+bool
+proxy_socket_send_request_with_fds(struct proxy_socket *socket,
+ const void *data,
+ size_t size,
+ const int *fds,
+ int fd_count);
+
+#endif /* PROXY_SOCKET_H */
diff --git a/src/venus/.clang-format b/src/venus/.clang-format
index 00011ee2..74cb184c 100644
--- a/src/venus/.clang-format
+++ b/src/venus/.clang-format
@@ -15,13 +15,15 @@ Cpp11BracedListStyle: false
ForEachMacros:
- LIST_FOR_EACH_ENTRY
- LIST_FOR_EACH_ENTRY_SAFE
+ - list_for_each_entry
+ - list_for_each_entry_safe
IncludeBlocks: Regroup
IncludeCategories:
- - Regex: '^("config.h"|"vkr_common.h")$'
+ - Regex: '^"(config|vkr_common|proxy_common|render_common).h"$'
Priority: 0
- - Regex: '^"vkr_'
+ - Regex: '^"(vkr|proxy|render)_'
Priority: 3
- - Regex: '^"(virgl|vrend_|c11/|util/|os/|pipe/|venus-protocol/)'
+ - Regex: '^"(virgl|vrend_|c11/|util/|os/|pipe/|venus-protocol/|server/)'
Priority: 2
- Regex: '.*'
Priority: 1
diff --git a/src/venus/venus-protocol/vk_platform.h b/src/venus/venus-protocol/vk_platform.h
index 18b913ab..3ff8c5d1 100644
--- a/src/venus/venus-protocol/vk_platform.h
+++ b/src/venus/venus-protocol/vk_platform.h
@@ -2,7 +2,7 @@
// File: vk_platform.h
//
/*
-** Copyright 2014-2021 The Khronos Group Inc.
+** Copyright 2014-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
@@ -42,7 +42,7 @@ extern "C"
#define VKAPI_CALL __stdcall
#define VKAPI_PTR VKAPI_CALL
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
- #error "Vulkan isn't supported for the 'armeabi' NDK ABI"
+ #error "Vulkan is not supported for the 'armeabi' NDK ABI"
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
// On Android 32-bit ARM targets, Vulkan functions use the "hardfloat"
// calling convention, i.e. float parameters are passed in registers. This
diff --git a/src/venus/venus-protocol/vn_protocol_renderer.h b/src/venus/venus-protocol/vn_protocol_renderer.h
index c714d40c..52ef7d5e 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer.h
@@ -1,4 +1,4 @@
-/* This file is generated by venus-protocol git-74c1c432. */
+/* This file is generated by venus-protocol git-c692a30d. */
/*
* Copyright 2020 Google LLC
@@ -13,6 +13,7 @@
#include "vn_protocol_renderer_info.h"
#include "vn_protocol_renderer_types.h"
#include "vn_protocol_renderer_handles.h"
+#include "vn_protocol_renderer_util.h"
#include "vn_protocol_renderer_dispatches.h"
#include "vn_protocol_renderer_structs.h"
#include "vn_protocol_renderer_transport.h"
@@ -42,5 +43,6 @@
#include "vn_protocol_renderer_pipeline_cache.h"
#include "vn_protocol_renderer_command_pool.h"
#include "vn_protocol_renderer_command_buffer.h"
+#include "vn_protocol_renderer_private_data_slot.h"
#endif /* VN_PROTOCOL_RENDERER_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_buffer.h b/src/venus/venus-protocol/vn_protocol_renderer_buffer.h
index 44305f39..06002418 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_buffer.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_buffer.h
@@ -239,35 +239,6 @@ vn_replace_VkBufferCreateInfo_handle(VkBufferCreateInfo *val)
/* struct VkBindBufferMemoryDeviceGroupInfo chain */
-static inline void
-vn_encode_VkBindBufferMemoryDeviceGroupInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkBindBufferMemoryDeviceGroupInfo_self(struct vn_cs_encoder *enc, const VkBindBufferMemoryDeviceGroupInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_uint32_t(enc, &val->deviceIndexCount);
- if (val->pDeviceIndices) {
- vn_encode_array_size(enc, val->deviceIndexCount);
- vn_encode_uint32_t_array(enc, val->pDeviceIndices, val->deviceIndexCount);
- } else {
- vn_encode_array_size(enc, 0);
- }
-}
-
-static inline void
-vn_encode_VkBindBufferMemoryDeviceGroupInfo(struct vn_cs_encoder *enc, const VkBindBufferMemoryDeviceGroupInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO });
- vn_encode_VkBindBufferMemoryDeviceGroupInfo_pnext(enc, val->pNext);
- vn_encode_VkBindBufferMemoryDeviceGroupInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkBindBufferMemoryDeviceGroupInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -335,47 +306,6 @@ vn_replace_VkBindBufferMemoryDeviceGroupInfo_handle(VkBindBufferMemoryDeviceGrou
/* struct VkBindBufferMemoryInfo chain */
-static inline void
-vn_encode_VkBindBufferMemoryInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- const VkBaseInStructure *pnext = val;
-
- while (pnext) {
- switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
- vn_encode_simple_pointer(enc, pnext);
- vn_encode_VkStructureType(enc, &pnext->sType);
- vn_encode_VkBindBufferMemoryInfo_pnext(enc, pnext->pNext);
- vn_encode_VkBindBufferMemoryDeviceGroupInfo_self(enc, (const VkBindBufferMemoryDeviceGroupInfo *)pnext);
- return;
- default:
- /* ignore unknown/unsupported struct */
- break;
- }
- pnext = pnext->pNext;
- }
-
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkBindBufferMemoryInfo_self(struct vn_cs_encoder *enc, const VkBindBufferMemoryInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_VkBuffer(enc, &val->buffer);
- vn_encode_VkDeviceMemory(enc, &val->memory);
- vn_encode_VkDeviceSize(enc, &val->memoryOffset);
-}
-
-static inline void
-vn_encode_VkBindBufferMemoryInfo(struct vn_cs_encoder *enc, const VkBindBufferMemoryInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO });
- vn_encode_VkBindBufferMemoryInfo_pnext(enc, val->pNext);
- vn_encode_VkBindBufferMemoryInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkBindBufferMemoryInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -515,31 +445,73 @@ vn_replace_VkBufferMemoryRequirementsInfo2_handle(VkBufferMemoryRequirementsInfo
} while (pnext);
}
-/* struct VkBufferDeviceAddressInfo chain */
+/* struct VkDeviceBufferMemoryRequirements chain */
-static inline void
-vn_encode_VkBufferDeviceAddressInfo_pnext(struct vn_cs_encoder *enc, const void *val)
+static inline void *
+vn_decode_VkDeviceBufferMemoryRequirements_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
}
static inline void
-vn_encode_VkBufferDeviceAddressInfo_self(struct vn_cs_encoder *enc, const VkBufferDeviceAddressInfo *val)
+vn_decode_VkDeviceBufferMemoryRequirements_self_temp(struct vn_cs_decoder *dec, VkDeviceBufferMemoryRequirements *val)
{
/* skip val->{sType,pNext} */
- vn_encode_VkBuffer(enc, &val->buffer);
+ if (vn_decode_simple_pointer(dec)) {
+ val->pCreateInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pCreateInfo));
+ if (!val->pCreateInfo) return;
+ vn_decode_VkBufferCreateInfo_temp(dec, (VkBufferCreateInfo *)val->pCreateInfo);
+ } else {
+ val->pCreateInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
}
static inline void
-vn_encode_VkBufferDeviceAddressInfo(struct vn_cs_encoder *enc, const VkBufferDeviceAddressInfo *val)
+vn_decode_VkDeviceBufferMemoryRequirements_temp(struct vn_cs_decoder *dec, VkDeviceBufferMemoryRequirements *val)
{
- assert(val->sType == VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO });
- vn_encode_VkBufferDeviceAddressInfo_pnext(enc, val->pNext);
- vn_encode_VkBufferDeviceAddressInfo_self(enc, val);
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDeviceBufferMemoryRequirements_pnext_temp(dec);
+ vn_decode_VkDeviceBufferMemoryRequirements_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkDeviceBufferMemoryRequirements_handle_self(VkDeviceBufferMemoryRequirements *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ if (val->pCreateInfo)
+ vn_replace_VkBufferCreateInfo_handle((VkBufferCreateInfo *)val->pCreateInfo);
}
+static inline void
+vn_replace_VkDeviceBufferMemoryRequirements_handle(VkDeviceBufferMemoryRequirements *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS:
+ vn_replace_VkDeviceBufferMemoryRequirements_handle_self((VkDeviceBufferMemoryRequirements *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkBufferDeviceAddressInfo chain */
+
static inline void *
vn_decode_VkBufferDeviceAddressInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -802,6 +774,45 @@ static inline void vn_encode_vkGetBufferMemoryRequirements2_reply(struct vn_cs_e
vn_encode_VkMemoryRequirements2(enc, args->pMemoryRequirements);
}
+static inline void vn_decode_vkGetDeviceBufferMemoryRequirements_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetDeviceBufferMemoryRequirements *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pInfo));
+ if (!args->pInfo) return;
+ vn_decode_VkDeviceBufferMemoryRequirements_temp(dec, (VkDeviceBufferMemoryRequirements *)args->pInfo);
+ } else {
+ args->pInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ args->pMemoryRequirements = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pMemoryRequirements));
+ if (!args->pMemoryRequirements) return;
+ vn_decode_VkMemoryRequirements2_partial_temp(dec, args->pMemoryRequirements);
+ } else {
+ args->pMemoryRequirements = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkGetDeviceBufferMemoryRequirements_args_handle(struct vn_command_vkGetDeviceBufferMemoryRequirements *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ if (args->pInfo)
+ vn_replace_VkDeviceBufferMemoryRequirements_handle((VkDeviceBufferMemoryRequirements *)args->pInfo);
+ /* skip args->pMemoryRequirements */
+}
+
+static inline void vn_encode_vkGetDeviceBufferMemoryRequirements_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetDeviceBufferMemoryRequirements *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetDeviceBufferMemoryRequirements_EXT});
+
+ /* skip args->device */
+ /* skip args->pInfo */
+ if (vn_encode_simple_pointer(enc, args->pMemoryRequirements))
+ vn_encode_VkMemoryRequirements2(enc, args->pMemoryRequirements);
+}
+
static inline void vn_decode_vkGetBufferOpaqueCaptureAddress_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetBufferOpaqueCaptureAddress *args)
{
vn_decode_VkDevice_lookup(dec, &args->device);
@@ -1022,6 +1033,31 @@ static inline void vn_dispatch_vkGetBufferMemoryRequirements2(struct vn_dispatch
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkGetDeviceBufferMemoryRequirements(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetDeviceBufferMemoryRequirements args;
+
+ if (!ctx->dispatch_vkGetDeviceBufferMemoryRequirements) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetDeviceBufferMemoryRequirements_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetDeviceBufferMemoryRequirements(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetDeviceBufferMemoryRequirements_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
static inline void vn_dispatch_vkGetBufferOpaqueCaptureAddress(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkGetBufferOpaqueCaptureAddress args;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_command_buffer.h b/src/venus/venus-protocol/vn_protocol_renderer_command_buffer.h
index b00cbfa1..e78502d2 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_command_buffer.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_command_buffer.h
@@ -14,6 +14,12 @@
#pragma GCC diagnostic ignored "-Wpointer-arith"
#pragma GCC diagnostic ignored "-Wunused-parameter"
+/*
+ * These structs/unions/commands are not included
+ *
+ * vkCmdPushDescriptorSetWithTemplateKHR
+ */
+
/* struct VkCommandBufferAllocateInfo chain */
static inline void *
@@ -75,38 +81,67 @@ vn_replace_VkCommandBufferAllocateInfo_handle(VkCommandBufferAllocateInfo *val)
} while (pnext);
}
-/* struct VkCommandBufferInheritanceInfo chain */
+/* struct VkCommandBufferInheritanceConditionalRenderingInfoEXT chain */
-static inline void
-vn_encode_VkCommandBufferInheritanceInfo_pnext(struct vn_cs_encoder *enc, const void *val)
+static inline void *
+vn_decode_VkCommandBufferInheritanceConditionalRenderingInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
}
static inline void
-vn_encode_VkCommandBufferInheritanceInfo_self(struct vn_cs_encoder *enc, const VkCommandBufferInheritanceInfo *val)
+vn_decode_VkCommandBufferInheritanceConditionalRenderingInfoEXT_self_temp(struct vn_cs_decoder *dec, VkCommandBufferInheritanceConditionalRenderingInfoEXT *val)
{
/* skip val->{sType,pNext} */
- vn_encode_VkRenderPass(enc, &val->renderPass);
- vn_encode_uint32_t(enc, &val->subpass);
- vn_encode_VkFramebuffer(enc, &val->framebuffer);
- vn_encode_VkBool32(enc, &val->occlusionQueryEnable);
- vn_encode_VkFlags(enc, &val->queryFlags);
- vn_encode_VkFlags(enc, &val->pipelineStatistics);
+ vn_decode_VkBool32(dec, &val->conditionalRenderingEnable);
+}
+
+static inline void
+vn_decode_VkCommandBufferInheritanceConditionalRenderingInfoEXT_temp(struct vn_cs_decoder *dec, VkCommandBufferInheritanceConditionalRenderingInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCommandBufferInheritanceConditionalRenderingInfoEXT_pnext_temp(dec);
+ vn_decode_VkCommandBufferInheritanceConditionalRenderingInfoEXT_self_temp(dec, val);
}
static inline void
-vn_encode_VkCommandBufferInheritanceInfo(struct vn_cs_encoder *enc, const VkCommandBufferInheritanceInfo *val)
+vn_replace_VkCommandBufferInheritanceConditionalRenderingInfoEXT_handle_self(VkCommandBufferInheritanceConditionalRenderingInfoEXT *val)
{
- assert(val->sType == VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO });
- vn_encode_VkCommandBufferInheritanceInfo_pnext(enc, val->pNext);
- vn_encode_VkCommandBufferInheritanceInfo_self(enc, val);
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->conditionalRenderingEnable */
}
+static inline void
+vn_replace_VkCommandBufferInheritanceConditionalRenderingInfoEXT_handle(VkCommandBufferInheritanceConditionalRenderingInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
+ vn_replace_VkCommandBufferInheritanceConditionalRenderingInfoEXT_handle_self((VkCommandBufferInheritanceConditionalRenderingInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCommandBufferInheritanceRenderingInfo chain */
+
static inline void *
-vn_decode_VkCommandBufferInheritanceInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkCommandBufferInheritanceRenderingInfo_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -115,6 +150,111 @@ vn_decode_VkCommandBufferInheritanceInfo_pnext_temp(struct vn_cs_decoder *dec)
}
static inline void
+vn_decode_VkCommandBufferInheritanceRenderingInfo_self_temp(struct vn_cs_decoder *dec, VkCommandBufferInheritanceRenderingInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_uint32_t(dec, &val->viewMask);
+ vn_decode_uint32_t(dec, &val->colorAttachmentCount);
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, val->colorAttachmentCount);
+ val->pColorAttachmentFormats = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pColorAttachmentFormats) * array_size);
+ if (!val->pColorAttachmentFormats) return;
+ vn_decode_VkFormat_array(dec, (VkFormat *)val->pColorAttachmentFormats, array_size);
+ } else {
+ vn_decode_array_size(dec, val->colorAttachmentCount);
+ val->pColorAttachmentFormats = NULL;
+ }
+ vn_decode_VkFormat(dec, &val->depthAttachmentFormat);
+ vn_decode_VkFormat(dec, &val->stencilAttachmentFormat);
+ vn_decode_VkSampleCountFlagBits(dec, &val->rasterizationSamples);
+}
+
+static inline void
+vn_decode_VkCommandBufferInheritanceRenderingInfo_temp(struct vn_cs_decoder *dec, VkCommandBufferInheritanceRenderingInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCommandBufferInheritanceRenderingInfo_pnext_temp(dec);
+ vn_decode_VkCommandBufferInheritanceRenderingInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCommandBufferInheritanceRenderingInfo_handle_self(VkCommandBufferInheritanceRenderingInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+ /* skip val->viewMask */
+ /* skip val->colorAttachmentCount */
+ /* skip val->pColorAttachmentFormats */
+ /* skip val->depthAttachmentFormat */
+ /* skip val->stencilAttachmentFormat */
+ /* skip val->rasterizationSamples */
+}
+
+static inline void
+vn_replace_VkCommandBufferInheritanceRenderingInfo_handle(VkCommandBufferInheritanceRenderingInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO:
+ vn_replace_VkCommandBufferInheritanceRenderingInfo_handle_self((VkCommandBufferInheritanceRenderingInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCommandBufferInheritanceInfo chain */
+
+static inline void *
+vn_decode_VkCommandBufferInheritanceInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkCommandBufferInheritanceConditionalRenderingInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkCommandBufferInheritanceInfo_pnext_temp(dec);
+ vn_decode_VkCommandBufferInheritanceConditionalRenderingInfoEXT_self_temp(dec, (VkCommandBufferInheritanceConditionalRenderingInfoEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkCommandBufferInheritanceRenderingInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkCommandBufferInheritanceInfo_pnext_temp(dec);
+ vn_decode_VkCommandBufferInheritanceRenderingInfo_self_temp(dec, (VkCommandBufferInheritanceRenderingInfo *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkCommandBufferInheritanceInfo_self_temp(struct vn_cs_decoder *dec, VkCommandBufferInheritanceInfo *val)
{
/* skip val->{sType,pNext} */
@@ -162,6 +302,12 @@ vn_replace_VkCommandBufferInheritanceInfo_handle(VkCommandBufferInheritanceInfo
case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO:
vn_replace_VkCommandBufferInheritanceInfo_handle_self((VkCommandBufferInheritanceInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
+ vn_replace_VkCommandBufferInheritanceConditionalRenderingInfoEXT_handle_self((VkCommandBufferInheritanceConditionalRenderingInfoEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO:
+ vn_replace_VkCommandBufferInheritanceRenderingInfo_handle_self((VkCommandBufferInheritanceRenderingInfo *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -172,29 +318,6 @@ vn_replace_VkCommandBufferInheritanceInfo_handle(VkCommandBufferInheritanceInfo
/* struct VkDeviceGroupCommandBufferBeginInfo chain */
-static inline void
-vn_encode_VkDeviceGroupCommandBufferBeginInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkDeviceGroupCommandBufferBeginInfo_self(struct vn_cs_encoder *enc, const VkDeviceGroupCommandBufferBeginInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_uint32_t(enc, &val->deviceMask);
-}
-
-static inline void
-vn_encode_VkDeviceGroupCommandBufferBeginInfo(struct vn_cs_encoder *enc, const VkDeviceGroupCommandBufferBeginInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO });
- vn_encode_VkDeviceGroupCommandBufferBeginInfo_pnext(enc, val->pNext);
- vn_encode_VkDeviceGroupCommandBufferBeginInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkDeviceGroupCommandBufferBeginInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -252,47 +375,6 @@ vn_replace_VkDeviceGroupCommandBufferBeginInfo_handle(VkDeviceGroupCommandBuffer
/* struct VkCommandBufferBeginInfo chain */
-static inline void
-vn_encode_VkCommandBufferBeginInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- const VkBaseInStructure *pnext = val;
-
- while (pnext) {
- switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
- vn_encode_simple_pointer(enc, pnext);
- vn_encode_VkStructureType(enc, &pnext->sType);
- vn_encode_VkCommandBufferBeginInfo_pnext(enc, pnext->pNext);
- vn_encode_VkDeviceGroupCommandBufferBeginInfo_self(enc, (const VkDeviceGroupCommandBufferBeginInfo *)pnext);
- return;
- default:
- /* ignore unknown/unsupported struct */
- break;
- }
- pnext = pnext->pNext;
- }
-
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkCommandBufferBeginInfo_self(struct vn_cs_encoder *enc, const VkCommandBufferBeginInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_VkFlags(enc, &val->flags);
- if (vn_encode_simple_pointer(enc, val->pInheritanceInfo))
- vn_encode_VkCommandBufferInheritanceInfo(enc, val->pInheritanceInfo);
-}
-
-static inline void
-vn_encode_VkCommandBufferBeginInfo(struct vn_cs_encoder *enc, const VkCommandBufferBeginInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO });
- vn_encode_VkCommandBufferBeginInfo_pnext(enc, val->pNext);
- vn_encode_VkCommandBufferBeginInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkCommandBufferBeginInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -380,6 +462,40 @@ vn_replace_VkCommandBufferBeginInfo_handle(VkCommandBufferBeginInfo *val)
} while (pnext);
}
+/* struct VkMultiDrawInfoEXT */
+
+static inline void
+vn_decode_VkMultiDrawInfoEXT_temp(struct vn_cs_decoder *dec, VkMultiDrawInfoEXT *val)
+{
+ vn_decode_uint32_t(dec, &val->firstVertex);
+ vn_decode_uint32_t(dec, &val->vertexCount);
+}
+
+static inline void
+vn_replace_VkMultiDrawInfoEXT_handle(VkMultiDrawInfoEXT *val)
+{
+ /* skip val->firstVertex */
+ /* skip val->vertexCount */
+}
+
+/* struct VkMultiDrawIndexedInfoEXT */
+
+static inline void
+vn_decode_VkMultiDrawIndexedInfoEXT_temp(struct vn_cs_decoder *dec, VkMultiDrawIndexedInfoEXT *val)
+{
+ vn_decode_uint32_t(dec, &val->firstIndex);
+ vn_decode_uint32_t(dec, &val->indexCount);
+ vn_decode_int32_t(dec, &val->vertexOffset);
+}
+
+static inline void
+vn_replace_VkMultiDrawIndexedInfoEXT_handle(VkMultiDrawIndexedInfoEXT *val)
+{
+ /* skip val->firstIndex */
+ /* skip val->indexCount */
+ /* skip val->vertexOffset */
+}
+
/* struct VkBufferCopy */
static inline void
@@ -494,38 +610,6 @@ vn_replace_VkBufferImageCopy_handle(VkBufferImageCopy *val)
vn_replace_VkExtent3D_handle(&val->imageExtent);
}
-/* union VkClearColorValue */
-
-static inline void
-vn_decode_VkClearColorValue_temp(struct vn_cs_decoder *dec, VkClearColorValue *val)
-{
- uint32_t tag;
- vn_decode_uint32_t(dec, &tag);
- switch (tag) {
- case 0:
- {
- const size_t array_size = vn_decode_array_size(dec, 4);
- vn_decode_float_array(dec, val->float32, array_size);
- }
- break;
- case 1:
- {
- const size_t array_size = vn_decode_array_size(dec, 4);
- vn_decode_int32_t_array(dec, val->int32, array_size);
- }
- break;
- case 2:
- {
- const size_t array_size = vn_decode_array_size(dec, 4);
- vn_decode_uint32_t_array(dec, val->uint32, array_size);
- }
- break;
- default:
- vn_cs_decoder_set_fatal(dec);
- break;
- }
-}
-
/* struct VkClearDepthStencilValue */
static inline void
@@ -819,6 +903,67 @@ vn_replace_VkImageMemoryBarrier_handle(VkImageMemoryBarrier *val)
} while (pnext);
}
+/* struct VkConditionalRenderingBeginInfoEXT chain */
+
+static inline void *
+vn_decode_VkConditionalRenderingBeginInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkConditionalRenderingBeginInfoEXT_self_temp(struct vn_cs_decoder *dec, VkConditionalRenderingBeginInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBuffer_lookup(dec, &val->buffer);
+ vn_decode_VkDeviceSize(dec, &val->offset);
+ vn_decode_VkFlags(dec, &val->flags);
+}
+
+static inline void
+vn_decode_VkConditionalRenderingBeginInfoEXT_temp(struct vn_cs_decoder *dec, VkConditionalRenderingBeginInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkConditionalRenderingBeginInfoEXT_pnext_temp(dec);
+ vn_decode_VkConditionalRenderingBeginInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkConditionalRenderingBeginInfoEXT_handle_self(VkConditionalRenderingBeginInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkBuffer_handle(&val->buffer);
+ /* skip val->offset */
+ /* skip val->flags */
+}
+
+static inline void
+vn_replace_VkConditionalRenderingBeginInfoEXT_handle(VkConditionalRenderingBeginInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT:
+ vn_replace_VkConditionalRenderingBeginInfoEXT_handle_self((VkConditionalRenderingBeginInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkDeviceGroupRenderPassBeginInfo chain */
static inline void *
@@ -1183,6 +1328,1249 @@ vn_replace_VkSubpassEndInfo_handle(VkSubpassEndInfo *val)
} while (pnext);
}
+/* struct VkBufferCopy2 chain */
+
+static inline void *
+vn_decode_VkBufferCopy2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkBufferCopy2_self_temp(struct vn_cs_decoder *dec, VkBufferCopy2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkDeviceSize(dec, &val->srcOffset);
+ vn_decode_VkDeviceSize(dec, &val->dstOffset);
+ vn_decode_VkDeviceSize(dec, &val->size);
+}
+
+static inline void
+vn_decode_VkBufferCopy2_temp(struct vn_cs_decoder *dec, VkBufferCopy2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_BUFFER_COPY_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkBufferCopy2_pnext_temp(dec);
+ vn_decode_VkBufferCopy2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkBufferCopy2_handle_self(VkBufferCopy2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->srcOffset */
+ /* skip val->dstOffset */
+ /* skip val->size */
+}
+
+static inline void
+vn_replace_VkBufferCopy2_handle(VkBufferCopy2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_BUFFER_COPY_2:
+ vn_replace_VkBufferCopy2_handle_self((VkBufferCopy2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCopyBufferInfo2 chain */
+
+static inline void *
+vn_decode_VkCopyBufferInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkCopyBufferInfo2_self_temp(struct vn_cs_decoder *dec, VkCopyBufferInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBuffer_lookup(dec, &val->srcBuffer);
+ vn_decode_VkBuffer_lookup(dec, &val->dstBuffer);
+ vn_decode_uint32_t(dec, &val->regionCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pRegions) * iter_count);
+ if (!val->pRegions) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkBufferCopy2_temp(dec, &((VkBufferCopy2 *)val->pRegions)[i]);
+ } else {
+ vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkCopyBufferInfo2_temp(struct vn_cs_decoder *dec, VkCopyBufferInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCopyBufferInfo2_pnext_temp(dec);
+ vn_decode_VkCopyBufferInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCopyBufferInfo2_handle_self(VkCopyBufferInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkBuffer_handle(&val->srcBuffer);
+ vn_replace_VkBuffer_handle(&val->dstBuffer);
+ /* skip val->regionCount */
+ if (val->pRegions) {
+ for (uint32_t i = 0; i < val->regionCount; i++)
+ vn_replace_VkBufferCopy2_handle(&((VkBufferCopy2 *)val->pRegions)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkCopyBufferInfo2_handle(VkCopyBufferInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2:
+ vn_replace_VkCopyBufferInfo2_handle_self((VkCopyBufferInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkImageCopy2 chain */
+
+static inline void *
+vn_decode_VkImageCopy2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkImageCopy2_self_temp(struct vn_cs_decoder *dec, VkImageCopy2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->srcSubresource);
+ vn_decode_VkOffset3D_temp(dec, &val->srcOffset);
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->dstSubresource);
+ vn_decode_VkOffset3D_temp(dec, &val->dstOffset);
+ vn_decode_VkExtent3D_temp(dec, &val->extent);
+}
+
+static inline void
+vn_decode_VkImageCopy2_temp(struct vn_cs_decoder *dec, VkImageCopy2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_IMAGE_COPY_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkImageCopy2_pnext_temp(dec);
+ vn_decode_VkImageCopy2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkImageCopy2_handle_self(VkImageCopy2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImageSubresourceLayers_handle(&val->srcSubresource);
+ vn_replace_VkOffset3D_handle(&val->srcOffset);
+ vn_replace_VkImageSubresourceLayers_handle(&val->dstSubresource);
+ vn_replace_VkOffset3D_handle(&val->dstOffset);
+ vn_replace_VkExtent3D_handle(&val->extent);
+}
+
+static inline void
+vn_replace_VkImageCopy2_handle(VkImageCopy2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_COPY_2:
+ vn_replace_VkImageCopy2_handle_self((VkImageCopy2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCopyImageInfo2 chain */
+
+static inline void *
+vn_decode_VkCopyImageInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkCopyImageInfo2_self_temp(struct vn_cs_decoder *dec, VkCopyImageInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImage_lookup(dec, &val->srcImage);
+ vn_decode_VkImageLayout(dec, &val->srcImageLayout);
+ vn_decode_VkImage_lookup(dec, &val->dstImage);
+ vn_decode_VkImageLayout(dec, &val->dstImageLayout);
+ vn_decode_uint32_t(dec, &val->regionCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pRegions) * iter_count);
+ if (!val->pRegions) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkImageCopy2_temp(dec, &((VkImageCopy2 *)val->pRegions)[i]);
+ } else {
+ vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkCopyImageInfo2_temp(struct vn_cs_decoder *dec, VkCopyImageInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCopyImageInfo2_pnext_temp(dec);
+ vn_decode_VkCopyImageInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCopyImageInfo2_handle_self(VkCopyImageInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImage_handle(&val->srcImage);
+ /* skip val->srcImageLayout */
+ vn_replace_VkImage_handle(&val->dstImage);
+ /* skip val->dstImageLayout */
+ /* skip val->regionCount */
+ if (val->pRegions) {
+ for (uint32_t i = 0; i < val->regionCount; i++)
+ vn_replace_VkImageCopy2_handle(&((VkImageCopy2 *)val->pRegions)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkCopyImageInfo2_handle(VkCopyImageInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2:
+ vn_replace_VkCopyImageInfo2_handle_self((VkCopyImageInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkImageBlit2 chain */
+
+static inline void *
+vn_decode_VkImageBlit2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkImageBlit2_self_temp(struct vn_cs_decoder *dec, VkImageBlit2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->srcSubresource);
+ {
+ const uint32_t iter_count = vn_decode_array_size(dec, 2);
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkOffset3D_temp(dec, &val->srcOffsets[i]);
+ }
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->dstSubresource);
+ {
+ const uint32_t iter_count = vn_decode_array_size(dec, 2);
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkOffset3D_temp(dec, &val->dstOffsets[i]);
+ }
+}
+
+static inline void
+vn_decode_VkImageBlit2_temp(struct vn_cs_decoder *dec, VkImageBlit2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_IMAGE_BLIT_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkImageBlit2_pnext_temp(dec);
+ vn_decode_VkImageBlit2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkImageBlit2_handle_self(VkImageBlit2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImageSubresourceLayers_handle(&val->srcSubresource);
+ for (uint32_t i = 0; i < 2; i++)
+ vn_replace_VkOffset3D_handle(&val->srcOffsets[i]);
+ vn_replace_VkImageSubresourceLayers_handle(&val->dstSubresource);
+ for (uint32_t i = 0; i < 2; i++)
+ vn_replace_VkOffset3D_handle(&val->dstOffsets[i]);
+}
+
+static inline void
+vn_replace_VkImageBlit2_handle(VkImageBlit2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_BLIT_2:
+ vn_replace_VkImageBlit2_handle_self((VkImageBlit2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkBlitImageInfo2 chain */
+
+static inline void *
+vn_decode_VkBlitImageInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkBlitImageInfo2_self_temp(struct vn_cs_decoder *dec, VkBlitImageInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImage_lookup(dec, &val->srcImage);
+ vn_decode_VkImageLayout(dec, &val->srcImageLayout);
+ vn_decode_VkImage_lookup(dec, &val->dstImage);
+ vn_decode_VkImageLayout(dec, &val->dstImageLayout);
+ vn_decode_uint32_t(dec, &val->regionCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pRegions) * iter_count);
+ if (!val->pRegions) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkImageBlit2_temp(dec, &((VkImageBlit2 *)val->pRegions)[i]);
+ } else {
+ vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = NULL;
+ }
+ vn_decode_VkFilter(dec, &val->filter);
+}
+
+static inline void
+vn_decode_VkBlitImageInfo2_temp(struct vn_cs_decoder *dec, VkBlitImageInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkBlitImageInfo2_pnext_temp(dec);
+ vn_decode_VkBlitImageInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkBlitImageInfo2_handle_self(VkBlitImageInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImage_handle(&val->srcImage);
+ /* skip val->srcImageLayout */
+ vn_replace_VkImage_handle(&val->dstImage);
+ /* skip val->dstImageLayout */
+ /* skip val->regionCount */
+ if (val->pRegions) {
+ for (uint32_t i = 0; i < val->regionCount; i++)
+ vn_replace_VkImageBlit2_handle(&((VkImageBlit2 *)val->pRegions)[i]);
+ }
+ /* skip val->filter */
+}
+
+static inline void
+vn_replace_VkBlitImageInfo2_handle(VkBlitImageInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2:
+ vn_replace_VkBlitImageInfo2_handle_self((VkBlitImageInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkBufferImageCopy2 chain */
+
+static inline void *
+vn_decode_VkBufferImageCopy2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkBufferImageCopy2_self_temp(struct vn_cs_decoder *dec, VkBufferImageCopy2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkDeviceSize(dec, &val->bufferOffset);
+ vn_decode_uint32_t(dec, &val->bufferRowLength);
+ vn_decode_uint32_t(dec, &val->bufferImageHeight);
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->imageSubresource);
+ vn_decode_VkOffset3D_temp(dec, &val->imageOffset);
+ vn_decode_VkExtent3D_temp(dec, &val->imageExtent);
+}
+
+static inline void
+vn_decode_VkBufferImageCopy2_temp(struct vn_cs_decoder *dec, VkBufferImageCopy2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkBufferImageCopy2_pnext_temp(dec);
+ vn_decode_VkBufferImageCopy2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkBufferImageCopy2_handle_self(VkBufferImageCopy2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->bufferOffset */
+ /* skip val->bufferRowLength */
+ /* skip val->bufferImageHeight */
+ vn_replace_VkImageSubresourceLayers_handle(&val->imageSubresource);
+ vn_replace_VkOffset3D_handle(&val->imageOffset);
+ vn_replace_VkExtent3D_handle(&val->imageExtent);
+}
+
+static inline void
+vn_replace_VkBufferImageCopy2_handle(VkBufferImageCopy2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2:
+ vn_replace_VkBufferImageCopy2_handle_self((VkBufferImageCopy2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCopyBufferToImageInfo2 chain */
+
+static inline void *
+vn_decode_VkCopyBufferToImageInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkCopyBufferToImageInfo2_self_temp(struct vn_cs_decoder *dec, VkCopyBufferToImageInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBuffer_lookup(dec, &val->srcBuffer);
+ vn_decode_VkImage_lookup(dec, &val->dstImage);
+ vn_decode_VkImageLayout(dec, &val->dstImageLayout);
+ vn_decode_uint32_t(dec, &val->regionCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pRegions) * iter_count);
+ if (!val->pRegions) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkBufferImageCopy2_temp(dec, &((VkBufferImageCopy2 *)val->pRegions)[i]);
+ } else {
+ vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkCopyBufferToImageInfo2_temp(struct vn_cs_decoder *dec, VkCopyBufferToImageInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCopyBufferToImageInfo2_pnext_temp(dec);
+ vn_decode_VkCopyBufferToImageInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCopyBufferToImageInfo2_handle_self(VkCopyBufferToImageInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkBuffer_handle(&val->srcBuffer);
+ vn_replace_VkImage_handle(&val->dstImage);
+ /* skip val->dstImageLayout */
+ /* skip val->regionCount */
+ if (val->pRegions) {
+ for (uint32_t i = 0; i < val->regionCount; i++)
+ vn_replace_VkBufferImageCopy2_handle(&((VkBufferImageCopy2 *)val->pRegions)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkCopyBufferToImageInfo2_handle(VkCopyBufferToImageInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2:
+ vn_replace_VkCopyBufferToImageInfo2_handle_self((VkCopyBufferToImageInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCopyImageToBufferInfo2 chain */
+
+static inline void *
+vn_decode_VkCopyImageToBufferInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkCopyImageToBufferInfo2_self_temp(struct vn_cs_decoder *dec, VkCopyImageToBufferInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImage_lookup(dec, &val->srcImage);
+ vn_decode_VkImageLayout(dec, &val->srcImageLayout);
+ vn_decode_VkBuffer_lookup(dec, &val->dstBuffer);
+ vn_decode_uint32_t(dec, &val->regionCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pRegions) * iter_count);
+ if (!val->pRegions) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkBufferImageCopy2_temp(dec, &((VkBufferImageCopy2 *)val->pRegions)[i]);
+ } else {
+ vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkCopyImageToBufferInfo2_temp(struct vn_cs_decoder *dec, VkCopyImageToBufferInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCopyImageToBufferInfo2_pnext_temp(dec);
+ vn_decode_VkCopyImageToBufferInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCopyImageToBufferInfo2_handle_self(VkCopyImageToBufferInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImage_handle(&val->srcImage);
+ /* skip val->srcImageLayout */
+ vn_replace_VkBuffer_handle(&val->dstBuffer);
+ /* skip val->regionCount */
+ if (val->pRegions) {
+ for (uint32_t i = 0; i < val->regionCount; i++)
+ vn_replace_VkBufferImageCopy2_handle(&((VkBufferImageCopy2 *)val->pRegions)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkCopyImageToBufferInfo2_handle(VkCopyImageToBufferInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2:
+ vn_replace_VkCopyImageToBufferInfo2_handle_self((VkCopyImageToBufferInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkImageResolve2 chain */
+
+static inline void *
+vn_decode_VkImageResolve2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkImageResolve2_self_temp(struct vn_cs_decoder *dec, VkImageResolve2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->srcSubresource);
+ vn_decode_VkOffset3D_temp(dec, &val->srcOffset);
+ vn_decode_VkImageSubresourceLayers_temp(dec, &val->dstSubresource);
+ vn_decode_VkOffset3D_temp(dec, &val->dstOffset);
+ vn_decode_VkExtent3D_temp(dec, &val->extent);
+}
+
+static inline void
+vn_decode_VkImageResolve2_temp(struct vn_cs_decoder *dec, VkImageResolve2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkImageResolve2_pnext_temp(dec);
+ vn_decode_VkImageResolve2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkImageResolve2_handle_self(VkImageResolve2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImageSubresourceLayers_handle(&val->srcSubresource);
+ vn_replace_VkOffset3D_handle(&val->srcOffset);
+ vn_replace_VkImageSubresourceLayers_handle(&val->dstSubresource);
+ vn_replace_VkOffset3D_handle(&val->dstOffset);
+ vn_replace_VkExtent3D_handle(&val->extent);
+}
+
+static inline void
+vn_replace_VkImageResolve2_handle(VkImageResolve2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2:
+ vn_replace_VkImageResolve2_handle_self((VkImageResolve2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkResolveImageInfo2 chain */
+
+static inline void *
+vn_decode_VkResolveImageInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkResolveImageInfo2_self_temp(struct vn_cs_decoder *dec, VkResolveImageInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImage_lookup(dec, &val->srcImage);
+ vn_decode_VkImageLayout(dec, &val->srcImageLayout);
+ vn_decode_VkImage_lookup(dec, &val->dstImage);
+ vn_decode_VkImageLayout(dec, &val->dstImageLayout);
+ vn_decode_uint32_t(dec, &val->regionCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pRegions) * iter_count);
+ if (!val->pRegions) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkImageResolve2_temp(dec, &((VkImageResolve2 *)val->pRegions)[i]);
+ } else {
+ vn_decode_array_size(dec, val->regionCount);
+ val->pRegions = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkResolveImageInfo2_temp(struct vn_cs_decoder *dec, VkResolveImageInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkResolveImageInfo2_pnext_temp(dec);
+ vn_decode_VkResolveImageInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkResolveImageInfo2_handle_self(VkResolveImageInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImage_handle(&val->srcImage);
+ /* skip val->srcImageLayout */
+ vn_replace_VkImage_handle(&val->dstImage);
+ /* skip val->dstImageLayout */
+ /* skip val->regionCount */
+ if (val->pRegions) {
+ for (uint32_t i = 0; i < val->regionCount; i++)
+ vn_replace_VkImageResolve2_handle(&((VkImageResolve2 *)val->pRegions)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkResolveImageInfo2_handle(VkResolveImageInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2:
+ vn_replace_VkResolveImageInfo2_handle_self((VkResolveImageInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkBufferMemoryBarrier2 chain */
+
+static inline void *
+vn_decode_VkBufferMemoryBarrier2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkBufferMemoryBarrier2_self_temp(struct vn_cs_decoder *dec, VkBufferMemoryBarrier2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags64(dec, &val->srcStageMask);
+ vn_decode_VkFlags64(dec, &val->srcAccessMask);
+ vn_decode_VkFlags64(dec, &val->dstStageMask);
+ vn_decode_VkFlags64(dec, &val->dstAccessMask);
+ vn_decode_uint32_t(dec, &val->srcQueueFamilyIndex);
+ vn_decode_uint32_t(dec, &val->dstQueueFamilyIndex);
+ vn_decode_VkBuffer_lookup(dec, &val->buffer);
+ vn_decode_VkDeviceSize(dec, &val->offset);
+ vn_decode_VkDeviceSize(dec, &val->size);
+}
+
+static inline void
+vn_decode_VkBufferMemoryBarrier2_temp(struct vn_cs_decoder *dec, VkBufferMemoryBarrier2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkBufferMemoryBarrier2_pnext_temp(dec);
+ vn_decode_VkBufferMemoryBarrier2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkBufferMemoryBarrier2_handle_self(VkBufferMemoryBarrier2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->srcStageMask */
+ /* skip val->srcAccessMask */
+ /* skip val->dstStageMask */
+ /* skip val->dstAccessMask */
+ /* skip val->srcQueueFamilyIndex */
+ /* skip val->dstQueueFamilyIndex */
+ vn_replace_VkBuffer_handle(&val->buffer);
+ /* skip val->offset */
+ /* skip val->size */
+}
+
+static inline void
+vn_replace_VkBufferMemoryBarrier2_handle(VkBufferMemoryBarrier2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2:
+ vn_replace_VkBufferMemoryBarrier2_handle_self((VkBufferMemoryBarrier2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkImageMemoryBarrier2 chain */
+
+static inline void *
+vn_decode_VkImageMemoryBarrier2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkImageMemoryBarrier2_self_temp(struct vn_cs_decoder *dec, VkImageMemoryBarrier2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags64(dec, &val->srcStageMask);
+ vn_decode_VkFlags64(dec, &val->srcAccessMask);
+ vn_decode_VkFlags64(dec, &val->dstStageMask);
+ vn_decode_VkFlags64(dec, &val->dstAccessMask);
+ vn_decode_VkImageLayout(dec, &val->oldLayout);
+ vn_decode_VkImageLayout(dec, &val->newLayout);
+ vn_decode_uint32_t(dec, &val->srcQueueFamilyIndex);
+ vn_decode_uint32_t(dec, &val->dstQueueFamilyIndex);
+ vn_decode_VkImage_lookup(dec, &val->image);
+ vn_decode_VkImageSubresourceRange_temp(dec, &val->subresourceRange);
+}
+
+static inline void
+vn_decode_VkImageMemoryBarrier2_temp(struct vn_cs_decoder *dec, VkImageMemoryBarrier2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkImageMemoryBarrier2_pnext_temp(dec);
+ vn_decode_VkImageMemoryBarrier2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkImageMemoryBarrier2_handle_self(VkImageMemoryBarrier2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->srcStageMask */
+ /* skip val->srcAccessMask */
+ /* skip val->dstStageMask */
+ /* skip val->dstAccessMask */
+ /* skip val->oldLayout */
+ /* skip val->newLayout */
+ /* skip val->srcQueueFamilyIndex */
+ /* skip val->dstQueueFamilyIndex */
+ vn_replace_VkImage_handle(&val->image);
+ vn_replace_VkImageSubresourceRange_handle(&val->subresourceRange);
+}
+
+static inline void
+vn_replace_VkImageMemoryBarrier2_handle(VkImageMemoryBarrier2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2:
+ vn_replace_VkImageMemoryBarrier2_handle_self((VkImageMemoryBarrier2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkDependencyInfo chain */
+
+static inline void *
+vn_decode_VkDependencyInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkDependencyInfo_self_temp(struct vn_cs_decoder *dec, VkDependencyInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->dependencyFlags);
+ vn_decode_uint32_t(dec, &val->memoryBarrierCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->memoryBarrierCount);
+ val->pMemoryBarriers = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pMemoryBarriers) * iter_count);
+ if (!val->pMemoryBarriers) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkMemoryBarrier2_temp(dec, &((VkMemoryBarrier2 *)val->pMemoryBarriers)[i]);
+ } else {
+ vn_decode_array_size(dec, val->memoryBarrierCount);
+ val->pMemoryBarriers = NULL;
+ }
+ vn_decode_uint32_t(dec, &val->bufferMemoryBarrierCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->bufferMemoryBarrierCount);
+ val->pBufferMemoryBarriers = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pBufferMemoryBarriers) * iter_count);
+ if (!val->pBufferMemoryBarriers) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkBufferMemoryBarrier2_temp(dec, &((VkBufferMemoryBarrier2 *)val->pBufferMemoryBarriers)[i]);
+ } else {
+ vn_decode_array_size(dec, val->bufferMemoryBarrierCount);
+ val->pBufferMemoryBarriers = NULL;
+ }
+ vn_decode_uint32_t(dec, &val->imageMemoryBarrierCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->imageMemoryBarrierCount);
+ val->pImageMemoryBarriers = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pImageMemoryBarriers) * iter_count);
+ if (!val->pImageMemoryBarriers) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkImageMemoryBarrier2_temp(dec, &((VkImageMemoryBarrier2 *)val->pImageMemoryBarriers)[i]);
+ } else {
+ vn_decode_array_size(dec, val->imageMemoryBarrierCount);
+ val->pImageMemoryBarriers = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkDependencyInfo_temp(struct vn_cs_decoder *dec, VkDependencyInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DEPENDENCY_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDependencyInfo_pnext_temp(dec);
+ vn_decode_VkDependencyInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkDependencyInfo_handle_self(VkDependencyInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->dependencyFlags */
+ /* skip val->memoryBarrierCount */
+ if (val->pMemoryBarriers) {
+ for (uint32_t i = 0; i < val->memoryBarrierCount; i++)
+ vn_replace_VkMemoryBarrier2_handle(&((VkMemoryBarrier2 *)val->pMemoryBarriers)[i]);
+ }
+ /* skip val->bufferMemoryBarrierCount */
+ if (val->pBufferMemoryBarriers) {
+ for (uint32_t i = 0; i < val->bufferMemoryBarrierCount; i++)
+ vn_replace_VkBufferMemoryBarrier2_handle(&((VkBufferMemoryBarrier2 *)val->pBufferMemoryBarriers)[i]);
+ }
+ /* skip val->imageMemoryBarrierCount */
+ if (val->pImageMemoryBarriers) {
+ for (uint32_t i = 0; i < val->imageMemoryBarrierCount; i++)
+ vn_replace_VkImageMemoryBarrier2_handle(&((VkImageMemoryBarrier2 *)val->pImageMemoryBarriers)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkDependencyInfo_handle(VkDependencyInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_DEPENDENCY_INFO:
+ vn_replace_VkDependencyInfo_handle_self((VkDependencyInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkRenderingAttachmentInfo chain */
+
+static inline void *
+vn_decode_VkRenderingAttachmentInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkRenderingAttachmentInfo_self_temp(struct vn_cs_decoder *dec, VkRenderingAttachmentInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkImageView_lookup(dec, &val->imageView);
+ vn_decode_VkImageLayout(dec, &val->imageLayout);
+ vn_decode_VkResolveModeFlagBits(dec, &val->resolveMode);
+ vn_decode_VkImageView_lookup(dec, &val->resolveImageView);
+ vn_decode_VkImageLayout(dec, &val->resolveImageLayout);
+ vn_decode_VkAttachmentLoadOp(dec, &val->loadOp);
+ vn_decode_VkAttachmentStoreOp(dec, &val->storeOp);
+ vn_decode_VkClearValue_temp(dec, &val->clearValue);
+}
+
+static inline void
+vn_decode_VkRenderingAttachmentInfo_temp(struct vn_cs_decoder *dec, VkRenderingAttachmentInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkRenderingAttachmentInfo_pnext_temp(dec);
+ vn_decode_VkRenderingAttachmentInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkRenderingAttachmentInfo_handle_self(VkRenderingAttachmentInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkImageView_handle(&val->imageView);
+ /* skip val->imageLayout */
+ /* skip val->resolveMode */
+ vn_replace_VkImageView_handle(&val->resolveImageView);
+ /* skip val->resolveImageLayout */
+ /* skip val->loadOp */
+ /* skip val->storeOp */
+ /* skip val->clearValue */
+}
+
+static inline void
+vn_replace_VkRenderingAttachmentInfo_handle(VkRenderingAttachmentInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO:
+ vn_replace_VkRenderingAttachmentInfo_handle_self((VkRenderingAttachmentInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkRenderingInfo chain */
+
+static inline void *
+vn_decode_VkRenderingInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkDeviceGroupRenderPassBeginInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkRenderingInfo_pnext_temp(dec);
+ vn_decode_VkDeviceGroupRenderPassBeginInfo_self_temp(dec, (VkDeviceGroupRenderPassBeginInfo *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
+vn_decode_VkRenderingInfo_self_temp(struct vn_cs_decoder *dec, VkRenderingInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_VkRect2D_temp(dec, &val->renderArea);
+ vn_decode_uint32_t(dec, &val->layerCount);
+ vn_decode_uint32_t(dec, &val->viewMask);
+ vn_decode_uint32_t(dec, &val->colorAttachmentCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->colorAttachmentCount);
+ val->pColorAttachments = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pColorAttachments) * iter_count);
+ if (!val->pColorAttachments) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkRenderingAttachmentInfo_temp(dec, &((VkRenderingAttachmentInfo *)val->pColorAttachments)[i]);
+ } else {
+ vn_decode_array_size(dec, val->colorAttachmentCount);
+ val->pColorAttachments = NULL;
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ val->pDepthAttachment = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pDepthAttachment));
+ if (!val->pDepthAttachment) return;
+ vn_decode_VkRenderingAttachmentInfo_temp(dec, (VkRenderingAttachmentInfo *)val->pDepthAttachment);
+ } else {
+ val->pDepthAttachment = NULL;
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ val->pStencilAttachment = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pStencilAttachment));
+ if (!val->pStencilAttachment) return;
+ vn_decode_VkRenderingAttachmentInfo_temp(dec, (VkRenderingAttachmentInfo *)val->pStencilAttachment);
+ } else {
+ val->pStencilAttachment = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkRenderingInfo_temp(struct vn_cs_decoder *dec, VkRenderingInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_RENDERING_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkRenderingInfo_pnext_temp(dec);
+ vn_decode_VkRenderingInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkRenderingInfo_handle_self(VkRenderingInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+ vn_replace_VkRect2D_handle(&val->renderArea);
+ /* skip val->layerCount */
+ /* skip val->viewMask */
+ /* skip val->colorAttachmentCount */
+ if (val->pColorAttachments) {
+ for (uint32_t i = 0; i < val->colorAttachmentCount; i++)
+ vn_replace_VkRenderingAttachmentInfo_handle(&((VkRenderingAttachmentInfo *)val->pColorAttachments)[i]);
+ }
+ if (val->pDepthAttachment)
+ vn_replace_VkRenderingAttachmentInfo_handle((VkRenderingAttachmentInfo *)val->pDepthAttachment);
+ if (val->pStencilAttachment)
+ vn_replace_VkRenderingAttachmentInfo_handle((VkRenderingAttachmentInfo *)val->pStencilAttachment);
+}
+
+static inline void
+vn_replace_VkRenderingInfo_handle(VkRenderingInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_RENDERING_INFO:
+ vn_replace_VkRenderingInfo_handle_self((VkRenderingInfo *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+ vn_replace_VkDeviceGroupRenderPassBeginInfo_handle_self((VkDeviceGroupRenderPassBeginInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
static inline void vn_decode_vkAllocateCommandBuffers_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkAllocateCommandBuffers *args)
{
vn_decode_VkDevice_lookup(dec, &args->device);
@@ -1790,6 +3178,103 @@ static inline void vn_encode_vkCmdDrawIndexed_reply(struct vn_cs_encoder *enc, c
/* skip args->firstInstance */
}
+static inline void vn_decode_vkCmdDrawMultiEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdDrawMultiEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->drawCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->drawCount);
+ args->pVertexInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pVertexInfo) * iter_count);
+ if (!args->pVertexInfo) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkMultiDrawInfoEXT_temp(dec, &((VkMultiDrawInfoEXT *)args->pVertexInfo)[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pVertexInfo = NULL;
+ }
+ vn_decode_uint32_t(dec, &args->instanceCount);
+ vn_decode_uint32_t(dec, &args->firstInstance);
+ vn_decode_uint32_t(dec, &args->stride);
+}
+
+static inline void vn_replace_vkCmdDrawMultiEXT_args_handle(struct vn_command_vkCmdDrawMultiEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->drawCount */
+ if (args->pVertexInfo) {
+ for (uint32_t i = 0; i < args->drawCount; i++)
+ vn_replace_VkMultiDrawInfoEXT_handle(&((VkMultiDrawInfoEXT *)args->pVertexInfo)[i]);
+ }
+ /* skip args->instanceCount */
+ /* skip args->firstInstance */
+ /* skip args->stride */
+}
+
+static inline void vn_encode_vkCmdDrawMultiEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdDrawMultiEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdDrawMultiEXT_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->drawCount */
+ /* skip args->pVertexInfo */
+ /* skip args->instanceCount */
+ /* skip args->firstInstance */
+ /* skip args->stride */
+}
+
+static inline void vn_decode_vkCmdDrawMultiIndexedEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdDrawMultiIndexedEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->drawCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->drawCount);
+ args->pIndexInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pIndexInfo) * iter_count);
+ if (!args->pIndexInfo) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkMultiDrawIndexedInfoEXT_temp(dec, &((VkMultiDrawIndexedInfoEXT *)args->pIndexInfo)[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pIndexInfo = NULL;
+ }
+ vn_decode_uint32_t(dec, &args->instanceCount);
+ vn_decode_uint32_t(dec, &args->firstInstance);
+ vn_decode_uint32_t(dec, &args->stride);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pVertexOffset = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pVertexOffset));
+ if (!args->pVertexOffset) return;
+ vn_decode_int32_t(dec, (int32_t *)args->pVertexOffset);
+ } else {
+ args->pVertexOffset = NULL;
+ }
+}
+
+static inline void vn_replace_vkCmdDrawMultiIndexedEXT_args_handle(struct vn_command_vkCmdDrawMultiIndexedEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->drawCount */
+ if (args->pIndexInfo) {
+ for (uint32_t i = 0; i < args->drawCount; i++)
+ vn_replace_VkMultiDrawIndexedInfoEXT_handle(&((VkMultiDrawIndexedInfoEXT *)args->pIndexInfo)[i]);
+ }
+ /* skip args->instanceCount */
+ /* skip args->firstInstance */
+ /* skip args->stride */
+ /* skip args->pVertexOffset */
+}
+
+static inline void vn_encode_vkCmdDrawMultiIndexedEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdDrawMultiIndexedEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdDrawMultiIndexedEXT_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->drawCount */
+ /* skip args->pIndexInfo */
+ /* skip args->instanceCount */
+ /* skip args->firstInstance */
+ /* skip args->stride */
+ /* skip args->pVertexOffset */
+}
+
static inline void vn_decode_vkCmdDrawIndirect_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdDrawIndirect *args)
{
vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
@@ -2661,6 +4146,51 @@ static inline void vn_encode_vkCmdEndQuery_reply(struct vn_cs_encoder *enc, cons
/* skip args->query */
}
+static inline void vn_decode_vkCmdBeginConditionalRenderingEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdBeginConditionalRenderingEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pConditionalRenderingBegin = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pConditionalRenderingBegin));
+ if (!args->pConditionalRenderingBegin) return;
+ vn_decode_VkConditionalRenderingBeginInfoEXT_temp(dec, (VkConditionalRenderingBeginInfoEXT *)args->pConditionalRenderingBegin);
+ } else {
+ args->pConditionalRenderingBegin = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdBeginConditionalRenderingEXT_args_handle(struct vn_command_vkCmdBeginConditionalRenderingEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pConditionalRenderingBegin)
+ vn_replace_VkConditionalRenderingBeginInfoEXT_handle((VkConditionalRenderingBeginInfoEXT *)args->pConditionalRenderingBegin);
+}
+
+static inline void vn_encode_vkCmdBeginConditionalRenderingEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdBeginConditionalRenderingEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdBeginConditionalRenderingEXT_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pConditionalRenderingBegin */
+}
+
+static inline void vn_decode_vkCmdEndConditionalRenderingEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdEndConditionalRenderingEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+}
+
+static inline void vn_replace_vkCmdEndConditionalRenderingEXT_args_handle(struct vn_command_vkCmdEndConditionalRenderingEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+}
+
+static inline void vn_encode_vkCmdEndConditionalRenderingEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdEndConditionalRenderingEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdEndConditionalRenderingEXT_EXT});
+
+ /* skip args->commandBuffer */
+}
+
static inline void vn_decode_vkCmdResetQueryPool_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdResetQueryPool *args)
{
vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
@@ -2894,6 +4424,50 @@ static inline void vn_encode_vkCmdExecuteCommands_reply(struct vn_cs_encoder *en
/* skip args->pCommandBuffers */
}
+static inline void vn_decode_vkCmdPushDescriptorSetKHR_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdPushDescriptorSetKHR *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkPipelineBindPoint(dec, &args->pipelineBindPoint);
+ vn_decode_VkPipelineLayout_lookup(dec, &args->layout);
+ vn_decode_uint32_t(dec, &args->set);
+ vn_decode_uint32_t(dec, &args->descriptorWriteCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->descriptorWriteCount);
+ args->pDescriptorWrites = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pDescriptorWrites) * iter_count);
+ if (!args->pDescriptorWrites) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkWriteDescriptorSet_temp(dec, &((VkWriteDescriptorSet *)args->pDescriptorWrites)[i]);
+ } else {
+ vn_decode_array_size(dec, args->descriptorWriteCount);
+ args->pDescriptorWrites = NULL;
+ }
+}
+
+static inline void vn_replace_vkCmdPushDescriptorSetKHR_args_handle(struct vn_command_vkCmdPushDescriptorSetKHR *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->pipelineBindPoint */
+ vn_replace_VkPipelineLayout_handle(&args->layout);
+ /* skip args->set */
+ /* skip args->descriptorWriteCount */
+ if (args->pDescriptorWrites) {
+ for (uint32_t i = 0; i < args->descriptorWriteCount; i++)
+ vn_replace_VkWriteDescriptorSet_handle(&((VkWriteDescriptorSet *)args->pDescriptorWrites)[i]);
+ }
+}
+
+static inline void vn_encode_vkCmdPushDescriptorSetKHR_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdPushDescriptorSetKHR *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdPushDescriptorSetKHR_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pipelineBindPoint */
+ /* skip args->layout */
+ /* skip args->set */
+ /* skip args->descriptorWriteCount */
+ /* skip args->pDescriptorWrites */
+}
+
static inline void vn_decode_vkCmdSetDeviceMask_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetDeviceMask *args)
{
vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
@@ -3373,6 +4947,833 @@ static inline void vn_encode_vkCmdDrawIndirectByteCountEXT_reply(struct vn_cs_en
/* skip args->vertexStride */
}
+static inline void vn_decode_vkCmdSetLineStippleEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetLineStippleEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->lineStippleFactor);
+ vn_decode_uint16_t(dec, &args->lineStipplePattern);
+}
+
+static inline void vn_replace_vkCmdSetLineStippleEXT_args_handle(struct vn_command_vkCmdSetLineStippleEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->lineStippleFactor */
+ /* skip args->lineStipplePattern */
+}
+
+static inline void vn_encode_vkCmdSetLineStippleEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetLineStippleEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetLineStippleEXT_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->lineStippleFactor */
+ /* skip args->lineStipplePattern */
+}
+
+static inline void vn_decode_vkCmdSetCullMode_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetCullMode *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkFlags(dec, &args->cullMode);
+}
+
+static inline void vn_replace_vkCmdSetCullMode_args_handle(struct vn_command_vkCmdSetCullMode *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->cullMode */
+}
+
+static inline void vn_encode_vkCmdSetCullMode_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetCullMode *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetCullMode_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->cullMode */
+}
+
+static inline void vn_decode_vkCmdSetFrontFace_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetFrontFace *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkFrontFace(dec, &args->frontFace);
+}
+
+static inline void vn_replace_vkCmdSetFrontFace_args_handle(struct vn_command_vkCmdSetFrontFace *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->frontFace */
+}
+
+static inline void vn_encode_vkCmdSetFrontFace_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetFrontFace *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetFrontFace_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->frontFace */
+}
+
+static inline void vn_decode_vkCmdSetPrimitiveTopology_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetPrimitiveTopology *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkPrimitiveTopology(dec, &args->primitiveTopology);
+}
+
+static inline void vn_replace_vkCmdSetPrimitiveTopology_args_handle(struct vn_command_vkCmdSetPrimitiveTopology *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->primitiveTopology */
+}
+
+static inline void vn_encode_vkCmdSetPrimitiveTopology_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetPrimitiveTopology *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetPrimitiveTopology_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->primitiveTopology */
+}
+
+static inline void vn_decode_vkCmdSetViewportWithCount_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetViewportWithCount *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->viewportCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->viewportCount);
+ args->pViewports = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pViewports) * iter_count);
+ if (!args->pViewports) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkViewport_temp(dec, &((VkViewport *)args->pViewports)[i]);
+ } else {
+ vn_decode_array_size(dec, args->viewportCount);
+ args->pViewports = NULL;
+ }
+}
+
+static inline void vn_replace_vkCmdSetViewportWithCount_args_handle(struct vn_command_vkCmdSetViewportWithCount *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->viewportCount */
+ if (args->pViewports) {
+ for (uint32_t i = 0; i < args->viewportCount; i++)
+ vn_replace_VkViewport_handle(&((VkViewport *)args->pViewports)[i]);
+ }
+}
+
+static inline void vn_encode_vkCmdSetViewportWithCount_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetViewportWithCount *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetViewportWithCount_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->viewportCount */
+ /* skip args->pViewports */
+}
+
+static inline void vn_decode_vkCmdSetScissorWithCount_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetScissorWithCount *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->scissorCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->scissorCount);
+ args->pScissors = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pScissors) * iter_count);
+ if (!args->pScissors) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkRect2D_temp(dec, &((VkRect2D *)args->pScissors)[i]);
+ } else {
+ vn_decode_array_size(dec, args->scissorCount);
+ args->pScissors = NULL;
+ }
+}
+
+static inline void vn_replace_vkCmdSetScissorWithCount_args_handle(struct vn_command_vkCmdSetScissorWithCount *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->scissorCount */
+ if (args->pScissors) {
+ for (uint32_t i = 0; i < args->scissorCount; i++)
+ vn_replace_VkRect2D_handle(&((VkRect2D *)args->pScissors)[i]);
+ }
+}
+
+static inline void vn_encode_vkCmdSetScissorWithCount_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetScissorWithCount *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetScissorWithCount_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->scissorCount */
+ /* skip args->pScissors */
+}
+
+static inline void vn_decode_vkCmdBindVertexBuffers2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdBindVertexBuffers2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->firstBinding);
+ vn_decode_uint32_t(dec, &args->bindingCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->bindingCount);
+ args->pBuffers = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pBuffers) * iter_count);
+ if (!args->pBuffers) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkBuffer_lookup(dec, &((VkBuffer *)args->pBuffers)[i]);
+ } else {
+ vn_decode_array_size(dec, args->bindingCount);
+ args->pBuffers = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, args->bindingCount);
+ args->pOffsets = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pOffsets) * array_size);
+ if (!args->pOffsets) return;
+ vn_decode_VkDeviceSize_array(dec, (VkDeviceSize *)args->pOffsets, array_size);
+ } else {
+ vn_decode_array_size(dec, args->bindingCount);
+ args->pOffsets = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, args->bindingCount);
+ args->pSizes = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pSizes) * array_size);
+ if (!args->pSizes) return;
+ vn_decode_VkDeviceSize_array(dec, (VkDeviceSize *)args->pSizes, array_size);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pSizes = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, args->bindingCount);
+ args->pStrides = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pStrides) * array_size);
+ if (!args->pStrides) return;
+ vn_decode_VkDeviceSize_array(dec, (VkDeviceSize *)args->pStrides, array_size);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pStrides = NULL;
+ }
+}
+
+static inline void vn_replace_vkCmdBindVertexBuffers2_args_handle(struct vn_command_vkCmdBindVertexBuffers2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->firstBinding */
+ /* skip args->bindingCount */
+ if (args->pBuffers) {
+ for (uint32_t i = 0; i < args->bindingCount; i++)
+ vn_replace_VkBuffer_handle(&((VkBuffer *)args->pBuffers)[i]);
+ }
+ /* skip args->pOffsets */
+ /* skip args->pSizes */
+ /* skip args->pStrides */
+}
+
+static inline void vn_encode_vkCmdBindVertexBuffers2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdBindVertexBuffers2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdBindVertexBuffers2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->firstBinding */
+ /* skip args->bindingCount */
+ /* skip args->pBuffers */
+ /* skip args->pOffsets */
+ /* skip args->pSizes */
+ /* skip args->pStrides */
+}
+
+static inline void vn_decode_vkCmdSetDepthTestEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetDepthTestEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->depthTestEnable);
+}
+
+static inline void vn_replace_vkCmdSetDepthTestEnable_args_handle(struct vn_command_vkCmdSetDepthTestEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->depthTestEnable */
+}
+
+static inline void vn_encode_vkCmdSetDepthTestEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetDepthTestEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetDepthTestEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->depthTestEnable */
+}
+
+static inline void vn_decode_vkCmdSetDepthWriteEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetDepthWriteEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->depthWriteEnable);
+}
+
+static inline void vn_replace_vkCmdSetDepthWriteEnable_args_handle(struct vn_command_vkCmdSetDepthWriteEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->depthWriteEnable */
+}
+
+static inline void vn_encode_vkCmdSetDepthWriteEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetDepthWriteEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetDepthWriteEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->depthWriteEnable */
+}
+
+static inline void vn_decode_vkCmdSetDepthCompareOp_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetDepthCompareOp *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkCompareOp(dec, &args->depthCompareOp);
+}
+
+static inline void vn_replace_vkCmdSetDepthCompareOp_args_handle(struct vn_command_vkCmdSetDepthCompareOp *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->depthCompareOp */
+}
+
+static inline void vn_encode_vkCmdSetDepthCompareOp_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetDepthCompareOp *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetDepthCompareOp_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->depthCompareOp */
+}
+
+static inline void vn_decode_vkCmdSetDepthBoundsTestEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetDepthBoundsTestEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->depthBoundsTestEnable);
+}
+
+static inline void vn_replace_vkCmdSetDepthBoundsTestEnable_args_handle(struct vn_command_vkCmdSetDepthBoundsTestEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->depthBoundsTestEnable */
+}
+
+static inline void vn_encode_vkCmdSetDepthBoundsTestEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetDepthBoundsTestEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetDepthBoundsTestEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->depthBoundsTestEnable */
+}
+
+static inline void vn_decode_vkCmdSetStencilTestEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetStencilTestEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->stencilTestEnable);
+}
+
+static inline void vn_replace_vkCmdSetStencilTestEnable_args_handle(struct vn_command_vkCmdSetStencilTestEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->stencilTestEnable */
+}
+
+static inline void vn_encode_vkCmdSetStencilTestEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetStencilTestEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetStencilTestEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->stencilTestEnable */
+}
+
+static inline void vn_decode_vkCmdSetStencilOp_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetStencilOp *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkFlags(dec, &args->faceMask);
+ vn_decode_VkStencilOp(dec, &args->failOp);
+ vn_decode_VkStencilOp(dec, &args->passOp);
+ vn_decode_VkStencilOp(dec, &args->depthFailOp);
+ vn_decode_VkCompareOp(dec, &args->compareOp);
+}
+
+static inline void vn_replace_vkCmdSetStencilOp_args_handle(struct vn_command_vkCmdSetStencilOp *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->faceMask */
+ /* skip args->failOp */
+ /* skip args->passOp */
+ /* skip args->depthFailOp */
+ /* skip args->compareOp */
+}
+
+static inline void vn_encode_vkCmdSetStencilOp_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetStencilOp *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetStencilOp_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->faceMask */
+ /* skip args->failOp */
+ /* skip args->passOp */
+ /* skip args->depthFailOp */
+ /* skip args->compareOp */
+}
+
+static inline void vn_decode_vkCmdSetPatchControlPointsEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetPatchControlPointsEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->patchControlPoints);
+}
+
+static inline void vn_replace_vkCmdSetPatchControlPointsEXT_args_handle(struct vn_command_vkCmdSetPatchControlPointsEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->patchControlPoints */
+}
+
+static inline void vn_encode_vkCmdSetPatchControlPointsEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetPatchControlPointsEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetPatchControlPointsEXT_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->patchControlPoints */
+}
+
+static inline void vn_decode_vkCmdSetRasterizerDiscardEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetRasterizerDiscardEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->rasterizerDiscardEnable);
+}
+
+static inline void vn_replace_vkCmdSetRasterizerDiscardEnable_args_handle(struct vn_command_vkCmdSetRasterizerDiscardEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->rasterizerDiscardEnable */
+}
+
+static inline void vn_encode_vkCmdSetRasterizerDiscardEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetRasterizerDiscardEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetRasterizerDiscardEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->rasterizerDiscardEnable */
+}
+
+static inline void vn_decode_vkCmdSetDepthBiasEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetDepthBiasEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->depthBiasEnable);
+}
+
+static inline void vn_replace_vkCmdSetDepthBiasEnable_args_handle(struct vn_command_vkCmdSetDepthBiasEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->depthBiasEnable */
+}
+
+static inline void vn_encode_vkCmdSetDepthBiasEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetDepthBiasEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetDepthBiasEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->depthBiasEnable */
+}
+
+static inline void vn_decode_vkCmdSetLogicOpEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetLogicOpEXT *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkLogicOp(dec, &args->logicOp);
+}
+
+static inline void vn_replace_vkCmdSetLogicOpEXT_args_handle(struct vn_command_vkCmdSetLogicOpEXT *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->logicOp */
+}
+
+static inline void vn_encode_vkCmdSetLogicOpEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetLogicOpEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetLogicOpEXT_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->logicOp */
+}
+
+static inline void vn_decode_vkCmdSetPrimitiveRestartEnable_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetPrimitiveRestartEnable *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkBool32(dec, &args->primitiveRestartEnable);
+}
+
+static inline void vn_replace_vkCmdSetPrimitiveRestartEnable_args_handle(struct vn_command_vkCmdSetPrimitiveRestartEnable *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->primitiveRestartEnable */
+}
+
+static inline void vn_encode_vkCmdSetPrimitiveRestartEnable_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetPrimitiveRestartEnable *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetPrimitiveRestartEnable_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->primitiveRestartEnable */
+}
+
+static inline void vn_decode_vkCmdCopyBuffer2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdCopyBuffer2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pCopyBufferInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pCopyBufferInfo));
+ if (!args->pCopyBufferInfo) return;
+ vn_decode_VkCopyBufferInfo2_temp(dec, (VkCopyBufferInfo2 *)args->pCopyBufferInfo);
+ } else {
+ args->pCopyBufferInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdCopyBuffer2_args_handle(struct vn_command_vkCmdCopyBuffer2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pCopyBufferInfo)
+ vn_replace_VkCopyBufferInfo2_handle((VkCopyBufferInfo2 *)args->pCopyBufferInfo);
+}
+
+static inline void vn_encode_vkCmdCopyBuffer2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdCopyBuffer2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdCopyBuffer2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pCopyBufferInfo */
+}
+
+static inline void vn_decode_vkCmdCopyImage2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdCopyImage2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pCopyImageInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pCopyImageInfo));
+ if (!args->pCopyImageInfo) return;
+ vn_decode_VkCopyImageInfo2_temp(dec, (VkCopyImageInfo2 *)args->pCopyImageInfo);
+ } else {
+ args->pCopyImageInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdCopyImage2_args_handle(struct vn_command_vkCmdCopyImage2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pCopyImageInfo)
+ vn_replace_VkCopyImageInfo2_handle((VkCopyImageInfo2 *)args->pCopyImageInfo);
+}
+
+static inline void vn_encode_vkCmdCopyImage2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdCopyImage2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdCopyImage2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pCopyImageInfo */
+}
+
+static inline void vn_decode_vkCmdBlitImage2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdBlitImage2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pBlitImageInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pBlitImageInfo));
+ if (!args->pBlitImageInfo) return;
+ vn_decode_VkBlitImageInfo2_temp(dec, (VkBlitImageInfo2 *)args->pBlitImageInfo);
+ } else {
+ args->pBlitImageInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdBlitImage2_args_handle(struct vn_command_vkCmdBlitImage2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pBlitImageInfo)
+ vn_replace_VkBlitImageInfo2_handle((VkBlitImageInfo2 *)args->pBlitImageInfo);
+}
+
+static inline void vn_encode_vkCmdBlitImage2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdBlitImage2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdBlitImage2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pBlitImageInfo */
+}
+
+static inline void vn_decode_vkCmdCopyBufferToImage2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdCopyBufferToImage2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pCopyBufferToImageInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pCopyBufferToImageInfo));
+ if (!args->pCopyBufferToImageInfo) return;
+ vn_decode_VkCopyBufferToImageInfo2_temp(dec, (VkCopyBufferToImageInfo2 *)args->pCopyBufferToImageInfo);
+ } else {
+ args->pCopyBufferToImageInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdCopyBufferToImage2_args_handle(struct vn_command_vkCmdCopyBufferToImage2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pCopyBufferToImageInfo)
+ vn_replace_VkCopyBufferToImageInfo2_handle((VkCopyBufferToImageInfo2 *)args->pCopyBufferToImageInfo);
+}
+
+static inline void vn_encode_vkCmdCopyBufferToImage2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdCopyBufferToImage2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdCopyBufferToImage2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pCopyBufferToImageInfo */
+}
+
+static inline void vn_decode_vkCmdCopyImageToBuffer2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdCopyImageToBuffer2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pCopyImageToBufferInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pCopyImageToBufferInfo));
+ if (!args->pCopyImageToBufferInfo) return;
+ vn_decode_VkCopyImageToBufferInfo2_temp(dec, (VkCopyImageToBufferInfo2 *)args->pCopyImageToBufferInfo);
+ } else {
+ args->pCopyImageToBufferInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdCopyImageToBuffer2_args_handle(struct vn_command_vkCmdCopyImageToBuffer2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pCopyImageToBufferInfo)
+ vn_replace_VkCopyImageToBufferInfo2_handle((VkCopyImageToBufferInfo2 *)args->pCopyImageToBufferInfo);
+}
+
+static inline void vn_encode_vkCmdCopyImageToBuffer2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdCopyImageToBuffer2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdCopyImageToBuffer2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pCopyImageToBufferInfo */
+}
+
+static inline void vn_decode_vkCmdResolveImage2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdResolveImage2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pResolveImageInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pResolveImageInfo));
+ if (!args->pResolveImageInfo) return;
+ vn_decode_VkResolveImageInfo2_temp(dec, (VkResolveImageInfo2 *)args->pResolveImageInfo);
+ } else {
+ args->pResolveImageInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdResolveImage2_args_handle(struct vn_command_vkCmdResolveImage2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pResolveImageInfo)
+ vn_replace_VkResolveImageInfo2_handle((VkResolveImageInfo2 *)args->pResolveImageInfo);
+}
+
+static inline void vn_encode_vkCmdResolveImage2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdResolveImage2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdResolveImage2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pResolveImageInfo */
+}
+
+static inline void vn_decode_vkCmdSetEvent2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdSetEvent2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkEvent_lookup(dec, &args->event);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pDependencyInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pDependencyInfo));
+ if (!args->pDependencyInfo) return;
+ vn_decode_VkDependencyInfo_temp(dec, (VkDependencyInfo *)args->pDependencyInfo);
+ } else {
+ args->pDependencyInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdSetEvent2_args_handle(struct vn_command_vkCmdSetEvent2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ vn_replace_VkEvent_handle(&args->event);
+ if (args->pDependencyInfo)
+ vn_replace_VkDependencyInfo_handle((VkDependencyInfo *)args->pDependencyInfo);
+}
+
+static inline void vn_encode_vkCmdSetEvent2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdSetEvent2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdSetEvent2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->event */
+ /* skip args->pDependencyInfo */
+}
+
+static inline void vn_decode_vkCmdResetEvent2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdResetEvent2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkEvent_lookup(dec, &args->event);
+ vn_decode_VkFlags64(dec, &args->stageMask);
+}
+
+static inline void vn_replace_vkCmdResetEvent2_args_handle(struct vn_command_vkCmdResetEvent2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ vn_replace_VkEvent_handle(&args->event);
+ /* skip args->stageMask */
+}
+
+static inline void vn_encode_vkCmdResetEvent2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdResetEvent2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdResetEvent2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->event */
+ /* skip args->stageMask */
+}
+
+static inline void vn_decode_vkCmdWaitEvents2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdWaitEvents2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_uint32_t(dec, &args->eventCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->eventCount);
+ args->pEvents = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pEvents) * iter_count);
+ if (!args->pEvents) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkEvent_lookup(dec, &((VkEvent *)args->pEvents)[i]);
+ } else {
+ vn_decode_array_size(dec, args->eventCount);
+ args->pEvents = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->eventCount);
+ args->pDependencyInfos = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pDependencyInfos) * iter_count);
+ if (!args->pDependencyInfos) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkDependencyInfo_temp(dec, &((VkDependencyInfo *)args->pDependencyInfos)[i]);
+ } else {
+ vn_decode_array_size(dec, args->eventCount);
+ args->pDependencyInfos = NULL;
+ }
+}
+
+static inline void vn_replace_vkCmdWaitEvents2_args_handle(struct vn_command_vkCmdWaitEvents2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->eventCount */
+ if (args->pEvents) {
+ for (uint32_t i = 0; i < args->eventCount; i++)
+ vn_replace_VkEvent_handle(&((VkEvent *)args->pEvents)[i]);
+ }
+ if (args->pDependencyInfos) {
+ for (uint32_t i = 0; i < args->eventCount; i++)
+ vn_replace_VkDependencyInfo_handle(&((VkDependencyInfo *)args->pDependencyInfos)[i]);
+ }
+}
+
+static inline void vn_encode_vkCmdWaitEvents2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdWaitEvents2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdWaitEvents2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->eventCount */
+ /* skip args->pEvents */
+ /* skip args->pDependencyInfos */
+}
+
+static inline void vn_decode_vkCmdPipelineBarrier2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdPipelineBarrier2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pDependencyInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pDependencyInfo));
+ if (!args->pDependencyInfo) return;
+ vn_decode_VkDependencyInfo_temp(dec, (VkDependencyInfo *)args->pDependencyInfo);
+ } else {
+ args->pDependencyInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdPipelineBarrier2_args_handle(struct vn_command_vkCmdPipelineBarrier2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pDependencyInfo)
+ vn_replace_VkDependencyInfo_handle((VkDependencyInfo *)args->pDependencyInfo);
+}
+
+static inline void vn_encode_vkCmdPipelineBarrier2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdPipelineBarrier2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdPipelineBarrier2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pDependencyInfo */
+}
+
+static inline void vn_decode_vkCmdWriteTimestamp2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdWriteTimestamp2 *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ vn_decode_VkFlags64(dec, &args->stage);
+ vn_decode_VkQueryPool_lookup(dec, &args->queryPool);
+ vn_decode_uint32_t(dec, &args->query);
+}
+
+static inline void vn_replace_vkCmdWriteTimestamp2_args_handle(struct vn_command_vkCmdWriteTimestamp2 *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ /* skip args->stage */
+ vn_replace_VkQueryPool_handle(&args->queryPool);
+ /* skip args->query */
+}
+
+static inline void vn_encode_vkCmdWriteTimestamp2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdWriteTimestamp2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdWriteTimestamp2_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->stage */
+ /* skip args->queryPool */
+ /* skip args->query */
+}
+
+static inline void vn_decode_vkCmdBeginRendering_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdBeginRendering *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pRenderingInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pRenderingInfo));
+ if (!args->pRenderingInfo) return;
+ vn_decode_VkRenderingInfo_temp(dec, (VkRenderingInfo *)args->pRenderingInfo);
+ } else {
+ args->pRenderingInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCmdBeginRendering_args_handle(struct vn_command_vkCmdBeginRendering *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+ if (args->pRenderingInfo)
+ vn_replace_VkRenderingInfo_handle((VkRenderingInfo *)args->pRenderingInfo);
+}
+
+static inline void vn_encode_vkCmdBeginRendering_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdBeginRendering *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdBeginRendering_EXT});
+
+ /* skip args->commandBuffer */
+ /* skip args->pRenderingInfo */
+}
+
+static inline void vn_decode_vkCmdEndRendering_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCmdEndRendering *args)
+{
+ vn_decode_VkCommandBuffer_lookup(dec, &args->commandBuffer);
+}
+
+static inline void vn_replace_vkCmdEndRendering_args_handle(struct vn_command_vkCmdEndRendering *args)
+{
+ vn_replace_VkCommandBuffer_handle(&args->commandBuffer);
+}
+
+static inline void vn_encode_vkCmdEndRendering_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCmdEndRendering *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCmdEndRendering_EXT});
+
+ /* skip args->commandBuffer */
+}
+
static inline void vn_dispatch_vkAllocateCommandBuffers(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkAllocateCommandBuffers args;
@@ -3889,6 +6290,56 @@ static inline void vn_dispatch_vkCmdDrawIndexed(struct vn_dispatch_context *ctx,
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkCmdDrawMultiEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdDrawMultiEXT args;
+
+ if (!ctx->dispatch_vkCmdDrawMultiEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdDrawMultiEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdDrawMultiEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdDrawMultiEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdDrawMultiIndexedEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdDrawMultiIndexedEXT args;
+
+ if (!ctx->dispatch_vkCmdDrawMultiIndexedEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdDrawMultiIndexedEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdDrawMultiIndexedEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdDrawMultiIndexedEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
static inline void vn_dispatch_vkCmdDrawIndirect(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkCmdDrawIndirect args;
@@ -4414,6 +6865,56 @@ static inline void vn_dispatch_vkCmdEndQuery(struct vn_dispatch_context *ctx, Vk
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkCmdBeginConditionalRenderingEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdBeginConditionalRenderingEXT args;
+
+ if (!ctx->dispatch_vkCmdBeginConditionalRenderingEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdBeginConditionalRenderingEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdBeginConditionalRenderingEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdBeginConditionalRenderingEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdEndConditionalRenderingEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdEndConditionalRenderingEXT args;
+
+ if (!ctx->dispatch_vkCmdEndConditionalRenderingEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdEndConditionalRenderingEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdEndConditionalRenderingEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdEndConditionalRenderingEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
static inline void vn_dispatch_vkCmdResetQueryPool(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkCmdResetQueryPool args;
@@ -4614,6 +7115,31 @@ static inline void vn_dispatch_vkCmdExecuteCommands(struct vn_dispatch_context *
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkCmdPushDescriptorSetKHR(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdPushDescriptorSetKHR args;
+
+ if (!ctx->dispatch_vkCmdPushDescriptorSetKHR) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdPushDescriptorSetKHR_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdPushDescriptorSetKHR(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdPushDescriptorSetKHR_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
static inline void vn_dispatch_vkCmdSetDeviceMask(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkCmdSetDeviceMask args;
@@ -4939,6 +7465,781 @@ static inline void vn_dispatch_vkCmdDrawIndirectByteCountEXT(struct vn_dispatch_
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkCmdSetLineStippleEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetLineStippleEXT args;
+
+ if (!ctx->dispatch_vkCmdSetLineStippleEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetLineStippleEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetLineStippleEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetLineStippleEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetCullMode(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetCullMode args;
+
+ if (!ctx->dispatch_vkCmdSetCullMode) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetCullMode_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetCullMode(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetCullMode_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetFrontFace(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetFrontFace args;
+
+ if (!ctx->dispatch_vkCmdSetFrontFace) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetFrontFace_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetFrontFace(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetFrontFace_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetPrimitiveTopology(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetPrimitiveTopology args;
+
+ if (!ctx->dispatch_vkCmdSetPrimitiveTopology) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetPrimitiveTopology_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetPrimitiveTopology(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetPrimitiveTopology_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetViewportWithCount(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetViewportWithCount args;
+
+ if (!ctx->dispatch_vkCmdSetViewportWithCount) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetViewportWithCount_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetViewportWithCount(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetViewportWithCount_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetScissorWithCount(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetScissorWithCount args;
+
+ if (!ctx->dispatch_vkCmdSetScissorWithCount) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetScissorWithCount_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetScissorWithCount(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetScissorWithCount_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdBindVertexBuffers2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdBindVertexBuffers2 args;
+
+ if (!ctx->dispatch_vkCmdBindVertexBuffers2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdBindVertexBuffers2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdBindVertexBuffers2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdBindVertexBuffers2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetDepthTestEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetDepthTestEnable args;
+
+ if (!ctx->dispatch_vkCmdSetDepthTestEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetDepthTestEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetDepthTestEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetDepthTestEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetDepthWriteEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetDepthWriteEnable args;
+
+ if (!ctx->dispatch_vkCmdSetDepthWriteEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetDepthWriteEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetDepthWriteEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetDepthWriteEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetDepthCompareOp(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetDepthCompareOp args;
+
+ if (!ctx->dispatch_vkCmdSetDepthCompareOp) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetDepthCompareOp_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetDepthCompareOp(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetDepthCompareOp_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetDepthBoundsTestEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetDepthBoundsTestEnable args;
+
+ if (!ctx->dispatch_vkCmdSetDepthBoundsTestEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetDepthBoundsTestEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetDepthBoundsTestEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetDepthBoundsTestEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetStencilTestEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetStencilTestEnable args;
+
+ if (!ctx->dispatch_vkCmdSetStencilTestEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetStencilTestEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetStencilTestEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetStencilTestEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetStencilOp(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetStencilOp args;
+
+ if (!ctx->dispatch_vkCmdSetStencilOp) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetStencilOp_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetStencilOp(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetStencilOp_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetPatchControlPointsEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetPatchControlPointsEXT args;
+
+ if (!ctx->dispatch_vkCmdSetPatchControlPointsEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetPatchControlPointsEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetPatchControlPointsEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetPatchControlPointsEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetRasterizerDiscardEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetRasterizerDiscardEnable args;
+
+ if (!ctx->dispatch_vkCmdSetRasterizerDiscardEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetRasterizerDiscardEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetRasterizerDiscardEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetRasterizerDiscardEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetDepthBiasEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetDepthBiasEnable args;
+
+ if (!ctx->dispatch_vkCmdSetDepthBiasEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetDepthBiasEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetDepthBiasEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetDepthBiasEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetLogicOpEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetLogicOpEXT args;
+
+ if (!ctx->dispatch_vkCmdSetLogicOpEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetLogicOpEXT_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetLogicOpEXT(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetLogicOpEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetPrimitiveRestartEnable(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetPrimitiveRestartEnable args;
+
+ if (!ctx->dispatch_vkCmdSetPrimitiveRestartEnable) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetPrimitiveRestartEnable_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetPrimitiveRestartEnable(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetPrimitiveRestartEnable_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdCopyBuffer2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdCopyBuffer2 args;
+
+ if (!ctx->dispatch_vkCmdCopyBuffer2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdCopyBuffer2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdCopyBuffer2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdCopyBuffer2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdCopyImage2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdCopyImage2 args;
+
+ if (!ctx->dispatch_vkCmdCopyImage2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdCopyImage2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdCopyImage2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdCopyImage2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdBlitImage2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdBlitImage2 args;
+
+ if (!ctx->dispatch_vkCmdBlitImage2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdBlitImage2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdBlitImage2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdBlitImage2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdCopyBufferToImage2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdCopyBufferToImage2 args;
+
+ if (!ctx->dispatch_vkCmdCopyBufferToImage2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdCopyBufferToImage2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdCopyBufferToImage2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdCopyBufferToImage2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdCopyImageToBuffer2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdCopyImageToBuffer2 args;
+
+ if (!ctx->dispatch_vkCmdCopyImageToBuffer2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdCopyImageToBuffer2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdCopyImageToBuffer2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdCopyImageToBuffer2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdResolveImage2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdResolveImage2 args;
+
+ if (!ctx->dispatch_vkCmdResolveImage2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdResolveImage2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdResolveImage2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdResolveImage2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdSetEvent2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdSetEvent2 args;
+
+ if (!ctx->dispatch_vkCmdSetEvent2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdSetEvent2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdSetEvent2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdSetEvent2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdResetEvent2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdResetEvent2 args;
+
+ if (!ctx->dispatch_vkCmdResetEvent2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdResetEvent2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdResetEvent2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdResetEvent2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdWaitEvents2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdWaitEvents2 args;
+
+ if (!ctx->dispatch_vkCmdWaitEvents2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdWaitEvents2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdWaitEvents2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdWaitEvents2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdPipelineBarrier2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdPipelineBarrier2 args;
+
+ if (!ctx->dispatch_vkCmdPipelineBarrier2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdPipelineBarrier2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdPipelineBarrier2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdPipelineBarrier2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdWriteTimestamp2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdWriteTimestamp2 args;
+
+ if (!ctx->dispatch_vkCmdWriteTimestamp2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdWriteTimestamp2_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdWriteTimestamp2(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdWriteTimestamp2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdBeginRendering(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdBeginRendering args;
+
+ if (!ctx->dispatch_vkCmdBeginRendering) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdBeginRendering_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdBeginRendering(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdBeginRendering_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkCmdEndRendering(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCmdEndRendering args;
+
+ if (!ctx->dispatch_vkCmdEndRendering) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCmdEndRendering_args_temp(ctx->decoder, &args);
+ if (!args.commandBuffer) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCmdEndRendering(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCmdEndRendering_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_COMMAND_BUFFER_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_defines.h b/src/venus/venus-protocol/vn_protocol_renderer_defines.h
index a2410e4f..b95e5250 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_defines.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_defines.h
@@ -22,6 +22,8 @@
#define VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA ((VkStructureType)1000384001)
#define VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA ((VkStructureType)1000384002)
#define VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA ((VkStructureType)1000384003)
+#define VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_RESOURCE_INFO_100000_MESA ((VkStructureType)1000384004)
+#define VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA ((VkStructureType)1000384005)
typedef enum VkCommandTypeEXT {
VK_COMMAND_TYPE_vkCreateInstance_EXT = 0,
@@ -244,6 +246,80 @@ typedef enum VkCommandTypeEXT {
VK_COMMAND_TYPE_vkGetBufferOpaqueCaptureAddressKHR_EXT = 176,
VK_COMMAND_TYPE_vkGetDeviceMemoryOpaqueCaptureAddress_EXT = 177,
VK_COMMAND_TYPE_vkGetDeviceMemoryOpaqueCaptureAddressKHR_EXT = 177,
+ VK_COMMAND_TYPE_vkGetPhysicalDeviceToolProperties_EXT = 196,
+ VK_COMMAND_TYPE_vkGetPhysicalDeviceToolPropertiesEXT_EXT = 196,
+ VK_COMMAND_TYPE_vkCreatePrivateDataSlot_EXT = 197,
+ VK_COMMAND_TYPE_vkCreatePrivateDataSlotEXT_EXT = 197,
+ VK_COMMAND_TYPE_vkDestroyPrivateDataSlot_EXT = 198,
+ VK_COMMAND_TYPE_vkDestroyPrivateDataSlotEXT_EXT = 198,
+ VK_COMMAND_TYPE_vkSetPrivateData_EXT = 199,
+ VK_COMMAND_TYPE_vkSetPrivateDataEXT_EXT = 199,
+ VK_COMMAND_TYPE_vkGetPrivateData_EXT = 200,
+ VK_COMMAND_TYPE_vkGetPrivateDataEXT_EXT = 200,
+ VK_COMMAND_TYPE_vkCmdSetEvent2_EXT = 201,
+ VK_COMMAND_TYPE_vkCmdSetEvent2KHR_EXT = 201,
+ VK_COMMAND_TYPE_vkCmdResetEvent2_EXT = 202,
+ VK_COMMAND_TYPE_vkCmdResetEvent2KHR_EXT = 202,
+ VK_COMMAND_TYPE_vkCmdWaitEvents2_EXT = 203,
+ VK_COMMAND_TYPE_vkCmdWaitEvents2KHR_EXT = 203,
+ VK_COMMAND_TYPE_vkCmdPipelineBarrier2_EXT = 204,
+ VK_COMMAND_TYPE_vkCmdPipelineBarrier2KHR_EXT = 204,
+ VK_COMMAND_TYPE_vkCmdWriteTimestamp2_EXT = 205,
+ VK_COMMAND_TYPE_vkCmdWriteTimestamp2KHR_EXT = 205,
+ VK_COMMAND_TYPE_vkQueueSubmit2_EXT = 206,
+ VK_COMMAND_TYPE_vkQueueSubmit2KHR_EXT = 206,
+ VK_COMMAND_TYPE_vkCmdCopyBuffer2_EXT = 207,
+ VK_COMMAND_TYPE_vkCmdCopyBuffer2KHR_EXT = 207,
+ VK_COMMAND_TYPE_vkCmdCopyImage2_EXT = 208,
+ VK_COMMAND_TYPE_vkCmdCopyImage2KHR_EXT = 208,
+ VK_COMMAND_TYPE_vkCmdCopyBufferToImage2_EXT = 209,
+ VK_COMMAND_TYPE_vkCmdCopyBufferToImage2KHR_EXT = 209,
+ VK_COMMAND_TYPE_vkCmdCopyImageToBuffer2_EXT = 210,
+ VK_COMMAND_TYPE_vkCmdCopyImageToBuffer2KHR_EXT = 210,
+ VK_COMMAND_TYPE_vkCmdBlitImage2_EXT = 211,
+ VK_COMMAND_TYPE_vkCmdBlitImage2KHR_EXT = 211,
+ VK_COMMAND_TYPE_vkCmdResolveImage2_EXT = 212,
+ VK_COMMAND_TYPE_vkCmdResolveImage2KHR_EXT = 212,
+ VK_COMMAND_TYPE_vkCmdBeginRendering_EXT = 213,
+ VK_COMMAND_TYPE_vkCmdBeginRenderingKHR_EXT = 213,
+ VK_COMMAND_TYPE_vkCmdEndRendering_EXT = 214,
+ VK_COMMAND_TYPE_vkCmdEndRenderingKHR_EXT = 214,
+ VK_COMMAND_TYPE_vkCmdSetCullMode_EXT = 215,
+ VK_COMMAND_TYPE_vkCmdSetCullModeEXT_EXT = 215,
+ VK_COMMAND_TYPE_vkCmdSetFrontFace_EXT = 216,
+ VK_COMMAND_TYPE_vkCmdSetFrontFaceEXT_EXT = 216,
+ VK_COMMAND_TYPE_vkCmdSetPrimitiveTopology_EXT = 217,
+ VK_COMMAND_TYPE_vkCmdSetPrimitiveTopologyEXT_EXT = 217,
+ VK_COMMAND_TYPE_vkCmdSetViewportWithCount_EXT = 218,
+ VK_COMMAND_TYPE_vkCmdSetViewportWithCountEXT_EXT = 218,
+ VK_COMMAND_TYPE_vkCmdSetScissorWithCount_EXT = 219,
+ VK_COMMAND_TYPE_vkCmdSetScissorWithCountEXT_EXT = 219,
+ VK_COMMAND_TYPE_vkCmdBindVertexBuffers2_EXT = 220,
+ VK_COMMAND_TYPE_vkCmdBindVertexBuffers2EXT_EXT = 220,
+ VK_COMMAND_TYPE_vkCmdSetDepthTestEnable_EXT = 221,
+ VK_COMMAND_TYPE_vkCmdSetDepthTestEnableEXT_EXT = 221,
+ VK_COMMAND_TYPE_vkCmdSetDepthWriteEnable_EXT = 222,
+ VK_COMMAND_TYPE_vkCmdSetDepthWriteEnableEXT_EXT = 222,
+ VK_COMMAND_TYPE_vkCmdSetDepthCompareOp_EXT = 223,
+ VK_COMMAND_TYPE_vkCmdSetDepthCompareOpEXT_EXT = 223,
+ VK_COMMAND_TYPE_vkCmdSetDepthBoundsTestEnable_EXT = 224,
+ VK_COMMAND_TYPE_vkCmdSetDepthBoundsTestEnableEXT_EXT = 224,
+ VK_COMMAND_TYPE_vkCmdSetStencilTestEnable_EXT = 225,
+ VK_COMMAND_TYPE_vkCmdSetStencilTestEnableEXT_EXT = 225,
+ VK_COMMAND_TYPE_vkCmdSetStencilOp_EXT = 226,
+ VK_COMMAND_TYPE_vkCmdSetStencilOpEXT_EXT = 226,
+ VK_COMMAND_TYPE_vkCmdSetRasterizerDiscardEnable_EXT = 227,
+ VK_COMMAND_TYPE_vkCmdSetRasterizerDiscardEnableEXT_EXT = 227,
+ VK_COMMAND_TYPE_vkCmdSetDepthBiasEnable_EXT = 228,
+ VK_COMMAND_TYPE_vkCmdSetDepthBiasEnableEXT_EXT = 228,
+ VK_COMMAND_TYPE_vkCmdSetPrimitiveRestartEnable_EXT = 229,
+ VK_COMMAND_TYPE_vkCmdSetPrimitiveRestartEnableEXT_EXT = 229,
+ VK_COMMAND_TYPE_vkGetDeviceBufferMemoryRequirements_EXT = 230,
+ VK_COMMAND_TYPE_vkGetDeviceBufferMemoryRequirementsKHR_EXT = 230,
+ VK_COMMAND_TYPE_vkGetDeviceImageMemoryRequirements_EXT = 231,
+ VK_COMMAND_TYPE_vkGetDeviceImageMemoryRequirementsKHR_EXT = 231,
+ VK_COMMAND_TYPE_vkGetDeviceImageSparseMemoryRequirements_EXT = 232,
+ VK_COMMAND_TYPE_vkGetDeviceImageSparseMemoryRequirementsKHR_EXT = 232,
VK_COMMAND_TYPE_vkCmdBindTransformFeedbackBuffersEXT_EXT = 181,
VK_COMMAND_TYPE_vkCmdBeginTransformFeedbackEXT_EXT = 182,
VK_COMMAND_TYPE_vkCmdEndTransformFeedbackEXT_EXT = 183,
@@ -252,7 +328,22 @@ typedef enum VkCommandTypeEXT {
VK_COMMAND_TYPE_vkCmdDrawIndirectByteCountEXT_EXT = 186,
VK_COMMAND_TYPE_vkGetMemoryFdKHR_EXT = 193,
VK_COMMAND_TYPE_vkGetMemoryFdPropertiesKHR_EXT = 194,
+ VK_COMMAND_TYPE_vkImportSemaphoreFdKHR_EXT = 242,
+ VK_COMMAND_TYPE_vkGetSemaphoreFdKHR_EXT = 243,
+ VK_COMMAND_TYPE_vkCmdPushDescriptorSetKHR_EXT = 249,
+ VK_COMMAND_TYPE_vkCmdPushDescriptorSetWithTemplateKHR_EXT = 250,
+ VK_COMMAND_TYPE_vkCmdBeginConditionalRenderingEXT_EXT = 240,
+ VK_COMMAND_TYPE_vkCmdEndConditionalRenderingEXT_EXT = 241,
+ VK_COMMAND_TYPE_vkImportFenceFdKHR_EXT = 238,
+ VK_COMMAND_TYPE_vkGetFenceFdKHR_EXT = 239,
VK_COMMAND_TYPE_vkGetImageDrmFormatModifierPropertiesEXT_EXT = 187,
+ VK_COMMAND_TYPE_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_EXT = 235,
+ VK_COMMAND_TYPE_vkGetCalibratedTimestampsEXT_EXT = 236,
+ VK_COMMAND_TYPE_vkCmdSetLineStippleEXT_EXT = 237,
+ VK_COMMAND_TYPE_vkCmdSetPatchControlPointsEXT_EXT = 233,
+ VK_COMMAND_TYPE_vkCmdSetLogicOpEXT_EXT = 234,
+ VK_COMMAND_TYPE_vkCmdDrawMultiEXT_EXT = 247,
+ VK_COMMAND_TYPE_vkCmdDrawMultiIndexedEXT_EXT = 248,
VK_COMMAND_TYPE_vkSetReplyCommandStreamMESA_EXT = 178,
VK_COMMAND_TYPE_vkSeekReplyCommandStreamMESA_EXT = 179,
VK_COMMAND_TYPE_vkExecuteCommandStreamsMESA_EXT = 180,
@@ -261,6 +352,9 @@ typedef enum VkCommandTypeEXT {
VK_COMMAND_TYPE_vkNotifyRingMESA_EXT = 190,
VK_COMMAND_TYPE_vkWriteRingExtraMESA_EXT = 191,
VK_COMMAND_TYPE_vkGetMemoryResourcePropertiesMESA_EXT = 192,
+ VK_COMMAND_TYPE_vkResetFenceResource100000MESA_EXT = 244,
+ VK_COMMAND_TYPE_vkWaitSemaphoreResource100000MESA_EXT = 245,
+ VK_COMMAND_TYPE_vkImportSemaphoreResource100000MESA_EXT = 246,
VK_COMMAND_TYPE_vkGetVenusExperimentalFeatureData100000MESA_EXT = 195,
} VkCommandTypeEXT;
@@ -320,6 +414,7 @@ typedef struct VkVenusExperimentalFeatures100000MESA {
VkBool32 memoryResourceAllocationSize;
VkBool32 globalFencing;
VkBool32 largeRing;
+ VkBool32 syncFdFencing;
} VkVenusExperimentalFeatures100000MESA;
typedef struct VkMemoryResourceAllocationSizeProperties100000MESA {
@@ -328,6 +423,19 @@ typedef struct VkMemoryResourceAllocationSizeProperties100000MESA {
uint64_t allocationSize;
} VkMemoryResourceAllocationSizeProperties100000MESA;
+typedef struct VkImportSemaphoreResourceInfo100000MESA {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ uint32_t resourceId;
+} VkImportSemaphoreResourceInfo100000MESA;
+
+typedef struct VkDeviceQueueTimelineInfoMESA {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t ringIdx;
+} VkDeviceQueueTimelineInfoMESA;
+
struct vn_command_vkCreateInstance {
const VkInstanceCreateInfo* pCreateInfo;
const VkAllocationCallbacks* pAllocator;
@@ -1152,6 +1260,25 @@ struct vn_command_vkCmdDrawIndexed {
uint32_t firstInstance;
};
+struct vn_command_vkCmdDrawMultiEXT {
+ VkCommandBuffer commandBuffer;
+ uint32_t drawCount;
+ const VkMultiDrawInfoEXT* pVertexInfo;
+ uint32_t instanceCount;
+ uint32_t firstInstance;
+ uint32_t stride;
+};
+
+struct vn_command_vkCmdDrawMultiIndexedEXT {
+ VkCommandBuffer commandBuffer;
+ uint32_t drawCount;
+ const VkMultiDrawIndexedInfoEXT* pIndexInfo;
+ uint32_t instanceCount;
+ uint32_t firstInstance;
+ uint32_t stride;
+ const int32_t* pVertexOffset;
+};
+
struct vn_command_vkCmdDrawIndirect {
VkCommandBuffer commandBuffer;
VkBuffer buffer;
@@ -1332,6 +1459,15 @@ struct vn_command_vkCmdEndQuery {
uint32_t query;
};
+struct vn_command_vkCmdBeginConditionalRenderingEXT {
+ VkCommandBuffer commandBuffer;
+ const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin;
+};
+
+struct vn_command_vkCmdEndConditionalRenderingEXT {
+ VkCommandBuffer commandBuffer;
+};
+
struct vn_command_vkCmdResetQueryPool {
VkCommandBuffer commandBuffer;
VkQueryPool queryPool;
@@ -1429,6 +1565,15 @@ struct vn_command_vkGetPhysicalDeviceSparseImageFormatProperties2 {
VkSparseImageFormatProperties2* pProperties;
};
+struct vn_command_vkCmdPushDescriptorSetKHR {
+ VkCommandBuffer commandBuffer;
+ VkPipelineBindPoint pipelineBindPoint;
+ VkPipelineLayout layout;
+ uint32_t set;
+ uint32_t descriptorWriteCount;
+ const VkWriteDescriptorSet* pDescriptorWrites;
+};
+
struct vn_command_vkTrimCommandPool {
VkDevice device;
VkCommandPool commandPool;
@@ -1464,12 +1609,42 @@ struct vn_command_vkGetPhysicalDeviceExternalSemaphoreProperties {
VkExternalSemaphoreProperties* pExternalSemaphoreProperties;
};
+struct vn_command_vkGetSemaphoreFdKHR {
+ VkDevice device;
+ const VkSemaphoreGetFdInfoKHR* pGetFdInfo;
+ int* pFd;
+
+ VkResult ret;
+};
+
+struct vn_command_vkImportSemaphoreFdKHR {
+ VkDevice device;
+ const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo;
+
+ VkResult ret;
+};
+
struct vn_command_vkGetPhysicalDeviceExternalFenceProperties {
VkPhysicalDevice physicalDevice;
const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo;
VkExternalFenceProperties* pExternalFenceProperties;
};
+struct vn_command_vkGetFenceFdKHR {
+ VkDevice device;
+ const VkFenceGetFdInfoKHR* pGetFdInfo;
+ int* pFd;
+
+ VkResult ret;
+};
+
+struct vn_command_vkImportFenceFdKHR {
+ VkDevice device;
+ const VkImportFenceFdInfoKHR* pImportFenceFdInfo;
+
+ VkResult ret;
+};
+
struct vn_command_vkEnumeratePhysicalDeviceGroups {
VkInstance instance;
uint32_t* pPhysicalDeviceGroupCount;
@@ -1539,6 +1714,14 @@ struct vn_command_vkUpdateDescriptorSetWithTemplate {
const void* pData;
};
+struct vn_command_vkCmdPushDescriptorSetWithTemplateKHR {
+ VkCommandBuffer commandBuffer;
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate;
+ VkPipelineLayout layout;
+ uint32_t set;
+ const void* pData;
+};
+
struct vn_command_vkGetBufferMemoryRequirements2 {
VkDevice device;
const VkBufferMemoryRequirementsInfo2* pInfo;
@@ -1558,6 +1741,25 @@ struct vn_command_vkGetImageSparseMemoryRequirements2 {
VkSparseImageMemoryRequirements2* pSparseMemoryRequirements;
};
+struct vn_command_vkGetDeviceBufferMemoryRequirements {
+ VkDevice device;
+ const VkDeviceBufferMemoryRequirements* pInfo;
+ VkMemoryRequirements2* pMemoryRequirements;
+};
+
+struct vn_command_vkGetDeviceImageMemoryRequirements {
+ VkDevice device;
+ const VkDeviceImageMemoryRequirements* pInfo;
+ VkMemoryRequirements2* pMemoryRequirements;
+};
+
+struct vn_command_vkGetDeviceImageSparseMemoryRequirements {
+ VkDevice device;
+ const VkDeviceImageMemoryRequirements* pInfo;
+ uint32_t* pSparseMemoryRequirementCount;
+ VkSparseImageMemoryRequirements2* pSparseMemoryRequirements;
+};
+
struct vn_command_vkCreateSamplerYcbcrConversion {
VkDevice device;
const VkSamplerYcbcrConversionCreateInfo* pCreateInfo;
@@ -1585,6 +1787,24 @@ struct vn_command_vkGetDescriptorSetLayoutSupport {
VkDescriptorSetLayoutSupport* pSupport;
};
+struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT {
+ VkPhysicalDevice physicalDevice;
+ uint32_t* pTimeDomainCount;
+ VkTimeDomainEXT* pTimeDomains;
+
+ VkResult ret;
+};
+
+struct vn_command_vkGetCalibratedTimestampsEXT {
+ VkDevice device;
+ uint32_t timestampCount;
+ const VkCalibratedTimestampInfoEXT* pTimestampInfos;
+ uint64_t* pTimestamps;
+ uint64_t* pMaxDeviation;
+
+ VkResult ret;
+};
+
struct vn_command_vkCreateRenderPass2 {
VkDevice device;
const VkRenderPassCreateInfo2* pCreateInfo;
@@ -1733,6 +1953,228 @@ struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress {
uint64_t ret;
};
+struct vn_command_vkCmdSetLineStippleEXT {
+ VkCommandBuffer commandBuffer;
+ uint32_t lineStippleFactor;
+ uint16_t lineStipplePattern;
+};
+
+struct vn_command_vkGetPhysicalDeviceToolProperties {
+ VkPhysicalDevice physicalDevice;
+ uint32_t* pToolCount;
+ VkPhysicalDeviceToolProperties* pToolProperties;
+
+ VkResult ret;
+};
+
+struct vn_command_vkCmdSetCullMode {
+ VkCommandBuffer commandBuffer;
+ VkCullModeFlags cullMode;
+};
+
+struct vn_command_vkCmdSetFrontFace {
+ VkCommandBuffer commandBuffer;
+ VkFrontFace frontFace;
+};
+
+struct vn_command_vkCmdSetPrimitiveTopology {
+ VkCommandBuffer commandBuffer;
+ VkPrimitiveTopology primitiveTopology;
+};
+
+struct vn_command_vkCmdSetViewportWithCount {
+ VkCommandBuffer commandBuffer;
+ uint32_t viewportCount;
+ const VkViewport* pViewports;
+};
+
+struct vn_command_vkCmdSetScissorWithCount {
+ VkCommandBuffer commandBuffer;
+ uint32_t scissorCount;
+ const VkRect2D* pScissors;
+};
+
+struct vn_command_vkCmdBindVertexBuffers2 {
+ VkCommandBuffer commandBuffer;
+ uint32_t firstBinding;
+ uint32_t bindingCount;
+ const VkBuffer* pBuffers;
+ const VkDeviceSize* pOffsets;
+ const VkDeviceSize* pSizes;
+ const VkDeviceSize* pStrides;
+};
+
+struct vn_command_vkCmdSetDepthTestEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 depthTestEnable;
+};
+
+struct vn_command_vkCmdSetDepthWriteEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 depthWriteEnable;
+};
+
+struct vn_command_vkCmdSetDepthCompareOp {
+ VkCommandBuffer commandBuffer;
+ VkCompareOp depthCompareOp;
+};
+
+struct vn_command_vkCmdSetDepthBoundsTestEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 depthBoundsTestEnable;
+};
+
+struct vn_command_vkCmdSetStencilTestEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 stencilTestEnable;
+};
+
+struct vn_command_vkCmdSetStencilOp {
+ VkCommandBuffer commandBuffer;
+ VkStencilFaceFlags faceMask;
+ VkStencilOp failOp;
+ VkStencilOp passOp;
+ VkStencilOp depthFailOp;
+ VkCompareOp compareOp;
+};
+
+struct vn_command_vkCmdSetPatchControlPointsEXT {
+ VkCommandBuffer commandBuffer;
+ uint32_t patchControlPoints;
+};
+
+struct vn_command_vkCmdSetRasterizerDiscardEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 rasterizerDiscardEnable;
+};
+
+struct vn_command_vkCmdSetDepthBiasEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 depthBiasEnable;
+};
+
+struct vn_command_vkCmdSetLogicOpEXT {
+ VkCommandBuffer commandBuffer;
+ VkLogicOp logicOp;
+};
+
+struct vn_command_vkCmdSetPrimitiveRestartEnable {
+ VkCommandBuffer commandBuffer;
+ VkBool32 primitiveRestartEnable;
+};
+
+struct vn_command_vkCreatePrivateDataSlot {
+ VkDevice device;
+ const VkPrivateDataSlotCreateInfo* pCreateInfo;
+ const VkAllocationCallbacks* pAllocator;
+ VkPrivateDataSlot* pPrivateDataSlot;
+
+ VkResult ret;
+};
+
+struct vn_command_vkDestroyPrivateDataSlot {
+ VkDevice device;
+ VkPrivateDataSlot privateDataSlot;
+ const VkAllocationCallbacks* pAllocator;
+};
+
+struct vn_command_vkSetPrivateData {
+ VkDevice device;
+ VkObjectType objectType;
+ uint64_t objectHandle;
+ VkPrivateDataSlot privateDataSlot;
+ uint64_t data;
+
+ VkResult ret;
+};
+
+struct vn_command_vkGetPrivateData {
+ VkDevice device;
+ VkObjectType objectType;
+ uint64_t objectHandle;
+ VkPrivateDataSlot privateDataSlot;
+ uint64_t* pData;
+};
+
+struct vn_command_vkCmdCopyBuffer2 {
+ VkCommandBuffer commandBuffer;
+ const VkCopyBufferInfo2* pCopyBufferInfo;
+};
+
+struct vn_command_vkCmdCopyImage2 {
+ VkCommandBuffer commandBuffer;
+ const VkCopyImageInfo2* pCopyImageInfo;
+};
+
+struct vn_command_vkCmdBlitImage2 {
+ VkCommandBuffer commandBuffer;
+ const VkBlitImageInfo2* pBlitImageInfo;
+};
+
+struct vn_command_vkCmdCopyBufferToImage2 {
+ VkCommandBuffer commandBuffer;
+ const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo;
+};
+
+struct vn_command_vkCmdCopyImageToBuffer2 {
+ VkCommandBuffer commandBuffer;
+ const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo;
+};
+
+struct vn_command_vkCmdResolveImage2 {
+ VkCommandBuffer commandBuffer;
+ const VkResolveImageInfo2* pResolveImageInfo;
+};
+
+struct vn_command_vkCmdSetEvent2 {
+ VkCommandBuffer commandBuffer;
+ VkEvent event;
+ const VkDependencyInfo* pDependencyInfo;
+};
+
+struct vn_command_vkCmdResetEvent2 {
+ VkCommandBuffer commandBuffer;
+ VkEvent event;
+ VkPipelineStageFlags2 stageMask;
+};
+
+struct vn_command_vkCmdWaitEvents2 {
+ VkCommandBuffer commandBuffer;
+ uint32_t eventCount;
+ const VkEvent* pEvents;
+ const VkDependencyInfo* pDependencyInfos;
+};
+
+struct vn_command_vkCmdPipelineBarrier2 {
+ VkCommandBuffer commandBuffer;
+ const VkDependencyInfo* pDependencyInfo;
+};
+
+struct vn_command_vkQueueSubmit2 {
+ VkQueue queue;
+ uint32_t submitCount;
+ const VkSubmitInfo2* pSubmits;
+ VkFence fence;
+
+ VkResult ret;
+};
+
+struct vn_command_vkCmdWriteTimestamp2 {
+ VkCommandBuffer commandBuffer;
+ VkPipelineStageFlags2 stage;
+ VkQueryPool queryPool;
+ uint32_t query;
+};
+
+struct vn_command_vkCmdBeginRendering {
+ VkCommandBuffer commandBuffer;
+ const VkRenderingInfo* pRenderingInfo;
+};
+
+struct vn_command_vkCmdEndRendering {
+ VkCommandBuffer commandBuffer;
+};
+
struct vn_command_vkSetReplyCommandStreamMESA {
const VkCommandStreamDescriptionMESA* pStream;
};
@@ -1779,6 +2221,21 @@ struct vn_command_vkGetMemoryResourcePropertiesMESA {
VkResult ret;
};
+struct vn_command_vkResetFenceResource100000MESA {
+ VkDevice device;
+ VkFence fence;
+};
+
+struct vn_command_vkWaitSemaphoreResource100000MESA {
+ VkDevice device;
+ VkSemaphore semaphore;
+};
+
+struct vn_command_vkImportSemaphoreResource100000MESA {
+ VkDevice device;
+ const VkImportSemaphoreResourceInfo100000MESA* pImportSemaphoreResourceInfo;
+};
+
struct vn_command_vkGetVenusExperimentalFeatureData100000MESA {
size_t* pDataSize;
void* pData;
@@ -1901,6 +2358,8 @@ struct vn_dispatch_context {
void (*dispatch_vkCmdBindVertexBuffers)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBindVertexBuffers *args);
void (*dispatch_vkCmdDraw)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDraw *args);
void (*dispatch_vkCmdDrawIndexed)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDrawIndexed *args);
+ void (*dispatch_vkCmdDrawMultiEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDrawMultiEXT *args);
+ void (*dispatch_vkCmdDrawMultiIndexedEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDrawMultiIndexedEXT *args);
void (*dispatch_vkCmdDrawIndirect)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDrawIndirect *args);
void (*dispatch_vkCmdDrawIndexedIndirect)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDrawIndexedIndirect *args);
void (*dispatch_vkCmdDispatch)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdDispatch *args);
@@ -1922,6 +2381,8 @@ struct vn_dispatch_context {
void (*dispatch_vkCmdPipelineBarrier)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdPipelineBarrier *args);
void (*dispatch_vkCmdBeginQuery)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBeginQuery *args);
void (*dispatch_vkCmdEndQuery)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdEndQuery *args);
+ void (*dispatch_vkCmdBeginConditionalRenderingEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBeginConditionalRenderingEXT *args);
+ void (*dispatch_vkCmdEndConditionalRenderingEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdEndConditionalRenderingEXT *args);
void (*dispatch_vkCmdResetQueryPool)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdResetQueryPool *args);
void (*dispatch_vkCmdWriteTimestamp)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdWriteTimestamp *args);
void (*dispatch_vkCmdCopyQueryPoolResults)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdCopyQueryPoolResults *args);
@@ -1937,12 +2398,17 @@ struct vn_dispatch_context {
void (*dispatch_vkGetPhysicalDeviceQueueFamilyProperties2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceQueueFamilyProperties2 *args);
void (*dispatch_vkGetPhysicalDeviceMemoryProperties2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceMemoryProperties2 *args);
void (*dispatch_vkGetPhysicalDeviceSparseImageFormatProperties2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceSparseImageFormatProperties2 *args);
+ void (*dispatch_vkCmdPushDescriptorSetKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdPushDescriptorSetKHR *args);
void (*dispatch_vkTrimCommandPool)(struct vn_dispatch_context *ctx, struct vn_command_vkTrimCommandPool *args);
void (*dispatch_vkGetPhysicalDeviceExternalBufferProperties)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceExternalBufferProperties *args);
void (*dispatch_vkGetMemoryFdKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkGetMemoryFdKHR *args);
void (*dispatch_vkGetMemoryFdPropertiesKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkGetMemoryFdPropertiesKHR *args);
void (*dispatch_vkGetPhysicalDeviceExternalSemaphoreProperties)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceExternalSemaphoreProperties *args);
+ void (*dispatch_vkGetSemaphoreFdKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkGetSemaphoreFdKHR *args);
+ void (*dispatch_vkImportSemaphoreFdKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkImportSemaphoreFdKHR *args);
void (*dispatch_vkGetPhysicalDeviceExternalFenceProperties)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceExternalFenceProperties *args);
+ void (*dispatch_vkGetFenceFdKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkGetFenceFdKHR *args);
+ void (*dispatch_vkImportFenceFdKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkImportFenceFdKHR *args);
void (*dispatch_vkEnumeratePhysicalDeviceGroups)(struct vn_dispatch_context *ctx, struct vn_command_vkEnumeratePhysicalDeviceGroups *args);
void (*dispatch_vkGetDeviceGroupPeerMemoryFeatures)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDeviceGroupPeerMemoryFeatures *args);
void (*dispatch_vkBindBufferMemory2)(struct vn_dispatch_context *ctx, struct vn_command_vkBindBufferMemory2 *args);
@@ -1952,13 +2418,19 @@ struct vn_dispatch_context {
void (*dispatch_vkCreateDescriptorUpdateTemplate)(struct vn_dispatch_context *ctx, struct vn_command_vkCreateDescriptorUpdateTemplate *args);
void (*dispatch_vkDestroyDescriptorUpdateTemplate)(struct vn_dispatch_context *ctx, struct vn_command_vkDestroyDescriptorUpdateTemplate *args);
void (*dispatch_vkUpdateDescriptorSetWithTemplate)(struct vn_dispatch_context *ctx, struct vn_command_vkUpdateDescriptorSetWithTemplate *args);
+ void (*dispatch_vkCmdPushDescriptorSetWithTemplateKHR)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdPushDescriptorSetWithTemplateKHR *args);
void (*dispatch_vkGetBufferMemoryRequirements2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetBufferMemoryRequirements2 *args);
void (*dispatch_vkGetImageMemoryRequirements2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetImageMemoryRequirements2 *args);
void (*dispatch_vkGetImageSparseMemoryRequirements2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetImageSparseMemoryRequirements2 *args);
+ void (*dispatch_vkGetDeviceBufferMemoryRequirements)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDeviceBufferMemoryRequirements *args);
+ void (*dispatch_vkGetDeviceImageMemoryRequirements)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDeviceImageMemoryRequirements *args);
+ void (*dispatch_vkGetDeviceImageSparseMemoryRequirements)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDeviceImageSparseMemoryRequirements *args);
void (*dispatch_vkCreateSamplerYcbcrConversion)(struct vn_dispatch_context *ctx, struct vn_command_vkCreateSamplerYcbcrConversion *args);
void (*dispatch_vkDestroySamplerYcbcrConversion)(struct vn_dispatch_context *ctx, struct vn_command_vkDestroySamplerYcbcrConversion *args);
void (*dispatch_vkGetDeviceQueue2)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDeviceQueue2 *args);
void (*dispatch_vkGetDescriptorSetLayoutSupport)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDescriptorSetLayoutSupport *args);
+ void (*dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT *args);
+ void (*dispatch_vkGetCalibratedTimestampsEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkGetCalibratedTimestampsEXT *args);
void (*dispatch_vkCreateRenderPass2)(struct vn_dispatch_context *ctx, struct vn_command_vkCreateRenderPass2 *args);
void (*dispatch_vkCmdBeginRenderPass2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBeginRenderPass2 *args);
void (*dispatch_vkCmdNextSubpass2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdNextSubpass2 *args);
@@ -1978,6 +2450,43 @@ struct vn_dispatch_context {
void (*dispatch_vkGetBufferOpaqueCaptureAddress)(struct vn_dispatch_context *ctx, struct vn_command_vkGetBufferOpaqueCaptureAddress *args);
void (*dispatch_vkGetBufferDeviceAddress)(struct vn_dispatch_context *ctx, struct vn_command_vkGetBufferDeviceAddress *args);
void (*dispatch_vkGetDeviceMemoryOpaqueCaptureAddress)(struct vn_dispatch_context *ctx, struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress *args);
+ void (*dispatch_vkCmdSetLineStippleEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetLineStippleEXT *args);
+ void (*dispatch_vkGetPhysicalDeviceToolProperties)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPhysicalDeviceToolProperties *args);
+ void (*dispatch_vkCmdSetCullMode)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetCullMode *args);
+ void (*dispatch_vkCmdSetFrontFace)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetFrontFace *args);
+ void (*dispatch_vkCmdSetPrimitiveTopology)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetPrimitiveTopology *args);
+ void (*dispatch_vkCmdSetViewportWithCount)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetViewportWithCount *args);
+ void (*dispatch_vkCmdSetScissorWithCount)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetScissorWithCount *args);
+ void (*dispatch_vkCmdBindVertexBuffers2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBindVertexBuffers2 *args);
+ void (*dispatch_vkCmdSetDepthTestEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetDepthTestEnable *args);
+ void (*dispatch_vkCmdSetDepthWriteEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetDepthWriteEnable *args);
+ void (*dispatch_vkCmdSetDepthCompareOp)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetDepthCompareOp *args);
+ void (*dispatch_vkCmdSetDepthBoundsTestEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetDepthBoundsTestEnable *args);
+ void (*dispatch_vkCmdSetStencilTestEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetStencilTestEnable *args);
+ void (*dispatch_vkCmdSetStencilOp)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetStencilOp *args);
+ void (*dispatch_vkCmdSetPatchControlPointsEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetPatchControlPointsEXT *args);
+ void (*dispatch_vkCmdSetRasterizerDiscardEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetRasterizerDiscardEnable *args);
+ void (*dispatch_vkCmdSetDepthBiasEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetDepthBiasEnable *args);
+ void (*dispatch_vkCmdSetLogicOpEXT)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetLogicOpEXT *args);
+ void (*dispatch_vkCmdSetPrimitiveRestartEnable)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetPrimitiveRestartEnable *args);
+ void (*dispatch_vkCreatePrivateDataSlot)(struct vn_dispatch_context *ctx, struct vn_command_vkCreatePrivateDataSlot *args);
+ void (*dispatch_vkDestroyPrivateDataSlot)(struct vn_dispatch_context *ctx, struct vn_command_vkDestroyPrivateDataSlot *args);
+ void (*dispatch_vkSetPrivateData)(struct vn_dispatch_context *ctx, struct vn_command_vkSetPrivateData *args);
+ void (*dispatch_vkGetPrivateData)(struct vn_dispatch_context *ctx, struct vn_command_vkGetPrivateData *args);
+ void (*dispatch_vkCmdCopyBuffer2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdCopyBuffer2 *args);
+ void (*dispatch_vkCmdCopyImage2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdCopyImage2 *args);
+ void (*dispatch_vkCmdBlitImage2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBlitImage2 *args);
+ void (*dispatch_vkCmdCopyBufferToImage2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdCopyBufferToImage2 *args);
+ void (*dispatch_vkCmdCopyImageToBuffer2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdCopyImageToBuffer2 *args);
+ void (*dispatch_vkCmdResolveImage2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdResolveImage2 *args);
+ void (*dispatch_vkCmdSetEvent2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdSetEvent2 *args);
+ void (*dispatch_vkCmdResetEvent2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdResetEvent2 *args);
+ void (*dispatch_vkCmdWaitEvents2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdWaitEvents2 *args);
+ void (*dispatch_vkCmdPipelineBarrier2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdPipelineBarrier2 *args);
+ void (*dispatch_vkQueueSubmit2)(struct vn_dispatch_context *ctx, struct vn_command_vkQueueSubmit2 *args);
+ void (*dispatch_vkCmdWriteTimestamp2)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdWriteTimestamp2 *args);
+ void (*dispatch_vkCmdBeginRendering)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdBeginRendering *args);
+ void (*dispatch_vkCmdEndRendering)(struct vn_dispatch_context *ctx, struct vn_command_vkCmdEndRendering *args);
void (*dispatch_vkSetReplyCommandStreamMESA)(struct vn_dispatch_context *ctx, struct vn_command_vkSetReplyCommandStreamMESA *args);
void (*dispatch_vkSeekReplyCommandStreamMESA)(struct vn_dispatch_context *ctx, struct vn_command_vkSeekReplyCommandStreamMESA *args);
void (*dispatch_vkExecuteCommandStreamsMESA)(struct vn_dispatch_context *ctx, struct vn_command_vkExecuteCommandStreamsMESA *args);
@@ -1986,6 +2495,9 @@ struct vn_dispatch_context {
void (*dispatch_vkNotifyRingMESA)(struct vn_dispatch_context *ctx, struct vn_command_vkNotifyRingMESA *args);
void (*dispatch_vkWriteRingExtraMESA)(struct vn_dispatch_context *ctx, struct vn_command_vkWriteRingExtraMESA *args);
void (*dispatch_vkGetMemoryResourcePropertiesMESA)(struct vn_dispatch_context *ctx, struct vn_command_vkGetMemoryResourcePropertiesMESA *args);
+ void (*dispatch_vkResetFenceResource100000MESA)(struct vn_dispatch_context *ctx, struct vn_command_vkResetFenceResource100000MESA *args);
+ void (*dispatch_vkWaitSemaphoreResource100000MESA)(struct vn_dispatch_context *ctx, struct vn_command_vkWaitSemaphoreResource100000MESA *args);
+ void (*dispatch_vkImportSemaphoreResource100000MESA)(struct vn_dispatch_context *ctx, struct vn_command_vkImportSemaphoreResource100000MESA *args);
void (*dispatch_vkGetVenusExperimentalFeatureData100000MESA)(struct vn_dispatch_context *ctx, struct vn_command_vkGetVenusExperimentalFeatureData100000MESA *args);
};
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_descriptor_pool.h b/src/venus/venus-protocol/vn_protocol_renderer_descriptor_pool.h
index f2656bfb..34058d43 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_descriptor_pool.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_descriptor_pool.h
@@ -30,10 +30,10 @@ vn_replace_VkDescriptorPoolSize_handle(VkDescriptorPoolSize *val)
/* skip val->descriptorCount */
}
-/* struct VkDescriptorPoolCreateInfo chain */
+/* struct VkDescriptorPoolInlineUniformBlockCreateInfo chain */
static inline void *
-vn_decode_VkDescriptorPoolCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -42,6 +42,91 @@ vn_decode_VkDescriptorPoolCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
}
static inline void
+vn_decode_VkDescriptorPoolInlineUniformBlockCreateInfo_self_temp(struct vn_cs_decoder *dec, VkDescriptorPoolInlineUniformBlockCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->maxInlineUniformBlockBindings);
+}
+
+static inline void
+vn_decode_VkDescriptorPoolInlineUniformBlockCreateInfo_temp(struct vn_cs_decoder *dec, VkDescriptorPoolInlineUniformBlockCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDescriptorPoolInlineUniformBlockCreateInfo_pnext_temp(dec);
+ vn_decode_VkDescriptorPoolInlineUniformBlockCreateInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkDescriptorPoolInlineUniformBlockCreateInfo_handle_self(VkDescriptorPoolInlineUniformBlockCreateInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->maxInlineUniformBlockBindings */
+}
+
+static inline void
+vn_replace_VkDescriptorPoolInlineUniformBlockCreateInfo_handle(VkDescriptorPoolInlineUniformBlockCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO:
+ vn_replace_VkDescriptorPoolInlineUniformBlockCreateInfo_handle_self((VkDescriptorPoolInlineUniformBlockCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkDescriptorPoolCreateInfo chain */
+
+static inline void *
+vn_decode_VkDescriptorPoolCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkDescriptorPoolInlineUniformBlockCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDescriptorPoolCreateInfo_pnext_temp(dec);
+ vn_decode_VkDescriptorPoolInlineUniformBlockCreateInfo_self_temp(dec, (VkDescriptorPoolInlineUniformBlockCreateInfo *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkMutableDescriptorTypeCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDescriptorPoolCreateInfo_pnext_temp(dec);
+ vn_decode_VkMutableDescriptorTypeCreateInfoEXT_self_temp(dec, (VkMutableDescriptorTypeCreateInfoEXT *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkDescriptorPoolCreateInfo_self_temp(struct vn_cs_decoder *dec, VkDescriptorPoolCreateInfo *val)
{
/* skip val->{sType,pNext} */
@@ -97,6 +182,12 @@ vn_replace_VkDescriptorPoolCreateInfo_handle(VkDescriptorPoolCreateInfo *val)
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO:
vn_replace_VkDescriptorPoolCreateInfo_handle_self((VkDescriptorPoolCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO:
+ vn_replace_VkDescriptorPoolInlineUniformBlockCreateInfo_handle_self((VkDescriptorPoolInlineUniformBlockCreateInfo *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
+ vn_replace_VkMutableDescriptorTypeCreateInfoEXT_handle_self((VkMutableDescriptorTypeCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set.h b/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set.h
index 9b7a164a..285c65a8 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set.h
@@ -183,149 +183,6 @@ vn_replace_VkDescriptorSetAllocateInfo_handle(VkDescriptorSetAllocateInfo *val)
} while (pnext);
}
-/* struct VkDescriptorImageInfo */
-
-static inline void
-vn_decode_VkDescriptorImageInfo_temp(struct vn_cs_decoder *dec, VkDescriptorImageInfo *val)
-{
- vn_decode_VkSampler_lookup(dec, &val->sampler);
- vn_decode_VkImageView_lookup(dec, &val->imageView);
- vn_decode_VkImageLayout(dec, &val->imageLayout);
-}
-
-static inline void
-vn_replace_VkDescriptorImageInfo_handle(VkDescriptorImageInfo *val)
-{
- vn_replace_VkSampler_handle(&val->sampler);
- vn_replace_VkImageView_handle(&val->imageView);
- /* skip val->imageLayout */
-}
-
-/* struct VkDescriptorBufferInfo */
-
-static inline void
-vn_decode_VkDescriptorBufferInfo_temp(struct vn_cs_decoder *dec, VkDescriptorBufferInfo *val)
-{
- vn_decode_VkBuffer_lookup(dec, &val->buffer);
- vn_decode_VkDeviceSize(dec, &val->offset);
- vn_decode_VkDeviceSize(dec, &val->range);
-}
-
-static inline void
-vn_replace_VkDescriptorBufferInfo_handle(VkDescriptorBufferInfo *val)
-{
- vn_replace_VkBuffer_handle(&val->buffer);
- /* skip val->offset */
- /* skip val->range */
-}
-
-/* struct VkWriteDescriptorSet chain */
-
-static inline void *
-vn_decode_VkWriteDescriptorSet_pnext_temp(struct vn_cs_decoder *dec)
-{
- /* no known/supported struct */
- if (vn_decode_simple_pointer(dec))
- vn_cs_decoder_set_fatal(dec);
- return NULL;
-}
-
-static inline void
-vn_decode_VkWriteDescriptorSet_self_temp(struct vn_cs_decoder *dec, VkWriteDescriptorSet *val)
-{
- /* skip val->{sType,pNext} */
- vn_decode_VkDescriptorSet_lookup(dec, &val->dstSet);
- vn_decode_uint32_t(dec, &val->dstBinding);
- vn_decode_uint32_t(dec, &val->dstArrayElement);
- vn_decode_uint32_t(dec, &val->descriptorCount);
- vn_decode_VkDescriptorType(dec, &val->descriptorType);
- if (vn_peek_array_size(dec)) {
- const uint32_t iter_count = vn_decode_array_size(dec, val->descriptorCount);
- val->pImageInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pImageInfo) * iter_count);
- if (!val->pImageInfo) return;
- for (uint32_t i = 0; i < iter_count; i++)
- vn_decode_VkDescriptorImageInfo_temp(dec, &((VkDescriptorImageInfo *)val->pImageInfo)[i]);
- } else {
- vn_decode_array_size_unchecked(dec);
- val->pImageInfo = NULL;
- }
- if (vn_peek_array_size(dec)) {
- const uint32_t iter_count = vn_decode_array_size(dec, val->descriptorCount);
- val->pBufferInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pBufferInfo) * iter_count);
- if (!val->pBufferInfo) return;
- for (uint32_t i = 0; i < iter_count; i++)
- vn_decode_VkDescriptorBufferInfo_temp(dec, &((VkDescriptorBufferInfo *)val->pBufferInfo)[i]);
- } else {
- vn_decode_array_size_unchecked(dec);
- val->pBufferInfo = NULL;
- }
- if (vn_peek_array_size(dec)) {
- const uint32_t iter_count = vn_decode_array_size(dec, val->descriptorCount);
- val->pTexelBufferView = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pTexelBufferView) * iter_count);
- if (!val->pTexelBufferView) return;
- for (uint32_t i = 0; i < iter_count; i++)
- vn_decode_VkBufferView_lookup(dec, &((VkBufferView *)val->pTexelBufferView)[i]);
- } else {
- vn_decode_array_size_unchecked(dec);
- val->pTexelBufferView = NULL;
- }
-}
-
-static inline void
-vn_decode_VkWriteDescriptorSet_temp(struct vn_cs_decoder *dec, VkWriteDescriptorSet *val)
-{
- VkStructureType stype;
- vn_decode_VkStructureType(dec, &stype);
- if (stype != VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)
- vn_cs_decoder_set_fatal(dec);
-
- val->sType = stype;
- val->pNext = vn_decode_VkWriteDescriptorSet_pnext_temp(dec);
- vn_decode_VkWriteDescriptorSet_self_temp(dec, val);
-}
-
-static inline void
-vn_replace_VkWriteDescriptorSet_handle_self(VkWriteDescriptorSet *val)
-{
- /* skip val->sType */
- /* skip val->pNext */
- vn_replace_VkDescriptorSet_handle(&val->dstSet);
- /* skip val->dstBinding */
- /* skip val->dstArrayElement */
- /* skip val->descriptorCount */
- /* skip val->descriptorType */
- if (val->pImageInfo) {
- for (uint32_t i = 0; i < val->descriptorCount; i++)
- vn_replace_VkDescriptorImageInfo_handle(&((VkDescriptorImageInfo *)val->pImageInfo)[i]);
- }
- if (val->pBufferInfo) {
- for (uint32_t i = 0; i < val->descriptorCount; i++)
- vn_replace_VkDescriptorBufferInfo_handle(&((VkDescriptorBufferInfo *)val->pBufferInfo)[i]);
- }
- if (val->pTexelBufferView) {
- for (uint32_t i = 0; i < val->descriptorCount; i++)
- vn_replace_VkBufferView_handle(&((VkBufferView *)val->pTexelBufferView)[i]);
- }
-}
-
-static inline void
-vn_replace_VkWriteDescriptorSet_handle(VkWriteDescriptorSet *val)
-{
- struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
-
- do {
- switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
- vn_replace_VkWriteDescriptorSet_handle_self((VkWriteDescriptorSet *)pnext);
- break;
- default:
- /* ignore unknown/unsupported struct */
- break;
- }
- pnext = pnext->pNext;
- } while (pnext);
-}
-
/* struct VkCopyDescriptorSet chain */
static inline void *
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set_layout.h b/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set_layout.h
index da976cb6..fe044242 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set_layout.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_descriptor_set_layout.h
@@ -137,6 +137,14 @@ vn_decode_VkDescriptorSetLayoutCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkDescriptorSetLayoutBindingFlagsCreateInfo_self_temp(dec, (VkDescriptorSetLayoutBindingFlagsCreateInfo *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkMutableDescriptorTypeCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDescriptorSetLayoutCreateInfo_pnext_temp(dec);
+ vn_decode_VkMutableDescriptorTypeCreateInfoEXT_self_temp(dec, (VkMutableDescriptorTypeCreateInfoEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -204,6 +212,9 @@ vn_replace_VkDescriptorSetLayoutCreateInfo_handle(VkDescriptorSetLayoutCreateInf
case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
vn_replace_VkDescriptorSetLayoutBindingFlagsCreateInfo_handle_self((VkDescriptorSetLayoutBindingFlagsCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
+ vn_replace_VkMutableDescriptorTypeCreateInfoEXT_handle_self((VkMutableDescriptorTypeCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_device.h b/src/venus/venus-protocol/vn_protocol_renderer_device.h
index 3601c74f..ebc0e532 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_device.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_device.h
@@ -741,6 +741,172 @@ vn_replace_VkDeviceQueueCreateInfo_handle(VkDeviceQueueCreateInfo *val)
} while (pnext);
}
+/* struct VkDevicePrivateDataCreateInfo chain */
+
+static inline void *
+vn_decode_VkDevicePrivateDataCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkDevicePrivateDataCreateInfo_self_temp(struct vn_cs_decoder *dec, VkDevicePrivateDataCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->privateDataSlotRequestCount);
+}
+
+static inline void
+vn_decode_VkDevicePrivateDataCreateInfo_temp(struct vn_cs_decoder *dec, VkDevicePrivateDataCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDevicePrivateDataCreateInfo_pnext_temp(dec);
+ vn_decode_VkDevicePrivateDataCreateInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkDevicePrivateDataCreateInfo_handle_self(VkDevicePrivateDataCreateInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->privateDataSlotRequestCount */
+}
+
+static inline void
+vn_replace_VkDevicePrivateDataCreateInfo_handle(VkDevicePrivateDataCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO:
+ vn_replace_VkDevicePrivateDataCreateInfo_handle_self((VkDevicePrivateDataCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDevicePrivateDataFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDevicePrivateDataFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePrivateDataFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDevicePrivateDataFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->privateData);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePrivateDataFeatures(struct vn_cs_encoder *enc, const VkPhysicalDevicePrivateDataFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES });
+ vn_encode_VkPhysicalDevicePrivateDataFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevicePrivateDataFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePrivateDataFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrivateDataFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrivateDataFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->privateData);
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrivateDataFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrivateDataFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePrivateDataFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrivateDataFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePrivateDataFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrivateDataFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrivateDataFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->privateData */
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrivateDataFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrivateDataFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePrivateDataFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePrivateDataFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePrivateDataFeatures_handle_self(VkPhysicalDevicePrivateDataFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->privateData */
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePrivateDataFeatures_handle(VkPhysicalDevicePrivateDataFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ vn_replace_VkPhysicalDevicePrivateDataFeatures_handle_self((VkPhysicalDevicePrivateDataFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceVariablePointersFeatures chain */
static inline void
@@ -1419,6 +1585,337 @@ vn_replace_VkPhysicalDeviceProtectedMemoryFeatures_handle(VkPhysicalDeviceProtec
} while (pnext);
}
+/* struct VkPhysicalDeviceMultiDrawFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceMultiDrawFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMultiDrawFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->multiDraw);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMultiDrawFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceMultiDrawFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceMultiDrawFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->multiDraw);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->multiDraw */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceMultiDrawFeaturesEXT_handle_self(VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->multiDraw */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceMultiDrawFeaturesEXT_handle(VkPhysicalDeviceMultiDrawFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceMultiDrawFeaturesEXT_handle_self((VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceInlineUniformBlockFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceInlineUniformBlockFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceInlineUniformBlockFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->inlineUniformBlock);
+ vn_encode_VkBool32(enc, &val->descriptorBindingInlineUniformBlockUpdateAfterBind);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceInlineUniformBlockFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES });
+ vn_encode_VkPhysicalDeviceInlineUniformBlockFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceInlineUniformBlockFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->inlineUniformBlock);
+ vn_decode_VkBool32(dec, &val->descriptorBindingInlineUniformBlockUpdateAfterBind);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->inlineUniformBlock */
+ /* skip val->descriptorBindingInlineUniformBlockUpdateAfterBind */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceInlineUniformBlockFeatures_handle_self(VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->inlineUniformBlock */
+ /* skip val->descriptorBindingInlineUniformBlockUpdateAfterBind */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceInlineUniformBlockFeatures_handle(VkPhysicalDeviceInlineUniformBlockFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ vn_replace_VkPhysicalDeviceInlineUniformBlockFeatures_handle_self((VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceMaintenance4Features chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceMaintenance4Features_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMaintenance4Features_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceMaintenance4Features *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->maintenance4);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMaintenance4Features(struct vn_cs_encoder *enc, const VkPhysicalDeviceMaintenance4Features *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES });
+ vn_encode_VkPhysicalDeviceMaintenance4Features_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceMaintenance4Features_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMaintenance4Features_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMaintenance4Features_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMaintenance4Features *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->maintenance4);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMaintenance4Features_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMaintenance4Features *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMaintenance4Features_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Features_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMaintenance4Features_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMaintenance4Features_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMaintenance4Features *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maintenance4 */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMaintenance4Features_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMaintenance4Features *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMaintenance4Features_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Features_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceMaintenance4Features_handle_self(VkPhysicalDeviceMaintenance4Features *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->maintenance4 */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceMaintenance4Features_handle(VkPhysicalDeviceMaintenance4Features *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ vn_replace_VkPhysicalDeviceMaintenance4Features_handle_self((VkPhysicalDeviceMaintenance4Features *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceShaderDrawParametersFeatures chain */
static inline void
@@ -2161,6 +2658,119 @@ vn_replace_VkPhysicalDevice8BitStorageFeatures_handle(VkPhysicalDevice8BitStorag
} while (pnext);
}
+/* struct VkPhysicalDeviceConditionalRenderingFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->conditionalRendering);
+ vn_encode_VkBool32(enc, &val->inheritedConditionalRendering);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceConditionalRenderingFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->conditionalRendering);
+ vn_decode_VkBool32(dec, &val->inheritedConditionalRendering);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->conditionalRendering */
+ /* skip val->inheritedConditionalRendering */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceConditionalRenderingFeaturesEXT_handle_self(VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->conditionalRendering */
+ /* skip val->inheritedConditionalRendering */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceConditionalRenderingFeaturesEXT_handle(VkPhysicalDeviceConditionalRenderingFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceConditionalRenderingFeaturesEXT_handle_self((VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceVulkanMemoryModelFeatures chain */
static inline void
@@ -2391,6 +3001,119 @@ vn_replace_VkPhysicalDeviceShaderAtomicInt64Features_handle(VkPhysicalDeviceShad
} while (pnext);
}
+/* struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->vertexAttributeInstanceRateDivisor);
+ vn_encode_VkBool32(enc, &val->vertexAttributeInstanceRateZeroDivisor);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->vertexAttributeInstanceRateDivisor);
+ vn_decode_VkBool32(dec, &val->vertexAttributeInstanceRateZeroDivisor);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->vertexAttributeInstanceRateDivisor */
+ /* skip val->vertexAttributeInstanceRateZeroDivisor */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_handle_self(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->vertexAttributeInstanceRateDivisor */
+ /* skip val->vertexAttributeInstanceRateZeroDivisor */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_handle(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_handle_self((VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceTransformFeedbackFeaturesEXT chain */
static inline void
@@ -2722,6 +3445,115 @@ vn_replace_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_handle(VkPhysical
} while (pnext);
}
+/* struct VkPhysicalDeviceDepthClipEnableFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->depthClipEnable);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceDepthClipEnableFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->depthClipEnable);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->depthClipEnable */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceDepthClipEnableFeaturesEXT_handle_self(VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->depthClipEnable */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceDepthClipEnableFeaturesEXT_handle(VkPhysicalDeviceDepthClipEnableFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceDepthClipEnableFeaturesEXT_handle_self((VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceBufferDeviceAddressFeatures chain */
static inline void
@@ -2948,6 +3780,224 @@ vn_replace_VkPhysicalDeviceImagelessFramebufferFeatures_handle(VkPhysicalDeviceI
} while (pnext);
}
+/* struct VkPhysicalDeviceTextureCompressionASTCHDRFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->textureCompressionASTC_HDR);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES });
+ vn_encode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->textureCompressionASTC_HDR);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->textureCompressionASTC_HDR */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_handle_self(VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->textureCompressionASTC_HDR */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_handle(VkPhysicalDeviceTextureCompressionASTCHDRFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ vn_replace_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_handle_self((VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->indexTypeUint8);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->indexTypeUint8);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->indexTypeUint8 */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_handle_self(VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->indexTypeUint8 */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_handle(VkPhysicalDeviceIndexTypeUint8FeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_handle_self((VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures chain */
static inline void
@@ -3057,6 +4107,688 @@ vn_replace_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_handle(VkPhysical
} while (pnext);
}
+/* struct VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->primitiveTopologyListRestart);
+ vn_encode_VkBool32(enc, &val->primitiveTopologyPatchListRestart);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT });
+ vn_encode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->primitiveTopologyListRestart);
+ vn_decode_VkBool32(dec, &val->primitiveTopologyPatchListRestart);
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->primitiveTopologyListRestart */
+ /* skip val->primitiveTopologyPatchListRestart */
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_handle_self(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->primitiveTopologyListRestart */
+ /* skip val->primitiveTopologyPatchListRestart */
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_handle(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ vn_replace_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_handle_self((VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->shaderDemoteToHelperInvocation);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES });
+ vn_encode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->shaderDemoteToHelperInvocation);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->shaderDemoteToHelperInvocation */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_handle_self(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->shaderDemoteToHelperInvocation */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_handle(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_handle_self((VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->texelBufferAlignment);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->texelBufferAlignment);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->texelBufferAlignment */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_handle_self(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->texelBufferAlignment */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_handle(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_handle_self((VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceSubgroupSizeControlFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceSubgroupSizeControlFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceSubgroupSizeControlFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->subgroupSizeControl);
+ vn_encode_VkBool32(enc, &val->computeFullSubgroups);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceSubgroupSizeControlFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES });
+ vn_encode_VkPhysicalDeviceSubgroupSizeControlFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceSubgroupSizeControlFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->subgroupSizeControl);
+ vn_decode_VkBool32(dec, &val->computeFullSubgroups);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->subgroupSizeControl */
+ /* skip val->computeFullSubgroups */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceSubgroupSizeControlFeatures_handle_self(VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->subgroupSizeControl */
+ /* skip val->computeFullSubgroups */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceSubgroupSizeControlFeatures_handle(VkPhysicalDeviceSubgroupSizeControlFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ vn_replace_VkPhysicalDeviceSubgroupSizeControlFeatures_handle_self((VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceLineRasterizationFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceLineRasterizationFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->rectangularLines);
+ vn_encode_VkBool32(enc, &val->bresenhamLines);
+ vn_encode_VkBool32(enc, &val->smoothLines);
+ vn_encode_VkBool32(enc, &val->stippledRectangularLines);
+ vn_encode_VkBool32(enc, &val->stippledBresenhamLines);
+ vn_encode_VkBool32(enc, &val->stippledSmoothLines);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceLineRasterizationFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceLineRasterizationFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->rectangularLines);
+ vn_decode_VkBool32(dec, &val->bresenhamLines);
+ vn_decode_VkBool32(dec, &val->smoothLines);
+ vn_decode_VkBool32(dec, &val->stippledRectangularLines);
+ vn_decode_VkBool32(dec, &val->stippledBresenhamLines);
+ vn_decode_VkBool32(dec, &val->stippledSmoothLines);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->rectangularLines */
+ /* skip val->bresenhamLines */
+ /* skip val->smoothLines */
+ /* skip val->stippledRectangularLines */
+ /* skip val->stippledBresenhamLines */
+ /* skip val->stippledSmoothLines */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceLineRasterizationFeaturesEXT_handle_self(VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->rectangularLines */
+ /* skip val->bresenhamLines */
+ /* skip val->smoothLines */
+ /* skip val->stippledRectangularLines */
+ /* skip val->stippledBresenhamLines */
+ /* skip val->stippledSmoothLines */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceLineRasterizationFeaturesEXT_handle(VkPhysicalDeviceLineRasterizationFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceLineRasterizationFeaturesEXT_handle_self((VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDevicePipelineCreationCacheControlFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDevicePipelineCreationCacheControlFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->pipelineCreationCacheControl);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePipelineCreationCacheControlFeatures(struct vn_cs_encoder *enc, const VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES });
+ vn_encode_VkPhysicalDevicePipelineCreationCacheControlFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->pipelineCreationCacheControl);
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->pipelineCreationCacheControl */
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePipelineCreationCacheControlFeatures_handle_self(VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->pipelineCreationCacheControl */
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePipelineCreationCacheControlFeatures_handle(VkPhysicalDevicePipelineCreationCacheControlFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ vn_replace_VkPhysicalDevicePipelineCreationCacheControlFeatures_handle_self((VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceVulkan11Features chain */
static inline void
@@ -3503,6 +5235,2060 @@ vn_replace_VkPhysicalDeviceVulkan12Features_handle(VkPhysicalDeviceVulkan12Featu
} while (pnext);
}
+/* struct VkPhysicalDeviceVulkan13Features chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceVulkan13Features_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVulkan13Features_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceVulkan13Features *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->robustImageAccess);
+ vn_encode_VkBool32(enc, &val->inlineUniformBlock);
+ vn_encode_VkBool32(enc, &val->descriptorBindingInlineUniformBlockUpdateAfterBind);
+ vn_encode_VkBool32(enc, &val->pipelineCreationCacheControl);
+ vn_encode_VkBool32(enc, &val->privateData);
+ vn_encode_VkBool32(enc, &val->shaderDemoteToHelperInvocation);
+ vn_encode_VkBool32(enc, &val->shaderTerminateInvocation);
+ vn_encode_VkBool32(enc, &val->subgroupSizeControl);
+ vn_encode_VkBool32(enc, &val->computeFullSubgroups);
+ vn_encode_VkBool32(enc, &val->synchronization2);
+ vn_encode_VkBool32(enc, &val->textureCompressionASTC_HDR);
+ vn_encode_VkBool32(enc, &val->shaderZeroInitializeWorkgroupMemory);
+ vn_encode_VkBool32(enc, &val->dynamicRendering);
+ vn_encode_VkBool32(enc, &val->shaderIntegerDotProduct);
+ vn_encode_VkBool32(enc, &val->maintenance4);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVulkan13Features(struct vn_cs_encoder *enc, const VkPhysicalDeviceVulkan13Features *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES });
+ vn_encode_VkPhysicalDeviceVulkan13Features_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceVulkan13Features_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceVulkan13Features_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVulkan13Features_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVulkan13Features *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->robustImageAccess);
+ vn_decode_VkBool32(dec, &val->inlineUniformBlock);
+ vn_decode_VkBool32(dec, &val->descriptorBindingInlineUniformBlockUpdateAfterBind);
+ vn_decode_VkBool32(dec, &val->pipelineCreationCacheControl);
+ vn_decode_VkBool32(dec, &val->privateData);
+ vn_decode_VkBool32(dec, &val->shaderDemoteToHelperInvocation);
+ vn_decode_VkBool32(dec, &val->shaderTerminateInvocation);
+ vn_decode_VkBool32(dec, &val->subgroupSizeControl);
+ vn_decode_VkBool32(dec, &val->computeFullSubgroups);
+ vn_decode_VkBool32(dec, &val->synchronization2);
+ vn_decode_VkBool32(dec, &val->textureCompressionASTC_HDR);
+ vn_decode_VkBool32(dec, &val->shaderZeroInitializeWorkgroupMemory);
+ vn_decode_VkBool32(dec, &val->dynamicRendering);
+ vn_decode_VkBool32(dec, &val->shaderIntegerDotProduct);
+ vn_decode_VkBool32(dec, &val->maintenance4);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVulkan13Features_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVulkan13Features *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceVulkan13Features_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Features_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceVulkan13Features_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVulkan13Features_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVulkan13Features *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->robustImageAccess */
+ /* skip val->inlineUniformBlock */
+ /* skip val->descriptorBindingInlineUniformBlockUpdateAfterBind */
+ /* skip val->pipelineCreationCacheControl */
+ /* skip val->privateData */
+ /* skip val->shaderDemoteToHelperInvocation */
+ /* skip val->shaderTerminateInvocation */
+ /* skip val->subgroupSizeControl */
+ /* skip val->computeFullSubgroups */
+ /* skip val->synchronization2 */
+ /* skip val->textureCompressionASTC_HDR */
+ /* skip val->shaderZeroInitializeWorkgroupMemory */
+ /* skip val->dynamicRendering */
+ /* skip val->shaderIntegerDotProduct */
+ /* skip val->maintenance4 */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVulkan13Features_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVulkan13Features *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceVulkan13Features_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Features_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceVulkan13Features_handle_self(VkPhysicalDeviceVulkan13Features *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->robustImageAccess */
+ /* skip val->inlineUniformBlock */
+ /* skip val->descriptorBindingInlineUniformBlockUpdateAfterBind */
+ /* skip val->pipelineCreationCacheControl */
+ /* skip val->privateData */
+ /* skip val->shaderDemoteToHelperInvocation */
+ /* skip val->shaderTerminateInvocation */
+ /* skip val->subgroupSizeControl */
+ /* skip val->computeFullSubgroups */
+ /* skip val->synchronization2 */
+ /* skip val->textureCompressionASTC_HDR */
+ /* skip val->shaderZeroInitializeWorkgroupMemory */
+ /* skip val->dynamicRendering */
+ /* skip val->shaderIntegerDotProduct */
+ /* skip val->maintenance4 */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceVulkan13Features_handle(VkPhysicalDeviceVulkan13Features *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ vn_replace_VkPhysicalDeviceVulkan13Features_handle_self((VkPhysicalDeviceVulkan13Features *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceCustomBorderColorFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->customBorderColors);
+ vn_encode_VkBool32(enc, &val->customBorderColorWithoutFormat);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceCustomBorderColorFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->customBorderColors);
+ vn_decode_VkBool32(dec, &val->customBorderColorWithoutFormat);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->customBorderColors */
+ /* skip val->customBorderColorWithoutFormat */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceCustomBorderColorFeaturesEXT_handle_self(VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->customBorderColors */
+ /* skip val->customBorderColorWithoutFormat */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceCustomBorderColorFeaturesEXT_handle(VkPhysicalDeviceCustomBorderColorFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceCustomBorderColorFeaturesEXT_handle_self((VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceExtendedDynamicStateFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->extendedDynamicState);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->extendedDynamicState);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->extendedDynamicState */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_handle_self(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->extendedDynamicState */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_handle(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_handle_self((VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceExtendedDynamicState2FeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->extendedDynamicState2);
+ vn_encode_VkBool32(enc, &val->extendedDynamicState2LogicOp);
+ vn_encode_VkBool32(enc, &val->extendedDynamicState2PatchControlPoints);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->extendedDynamicState2);
+ vn_decode_VkBool32(dec, &val->extendedDynamicState2LogicOp);
+ vn_decode_VkBool32(dec, &val->extendedDynamicState2PatchControlPoints);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->extendedDynamicState2 */
+ /* skip val->extendedDynamicState2LogicOp */
+ /* skip val->extendedDynamicState2PatchControlPoints */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_handle_self(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->extendedDynamicState2 */
+ /* skip val->extendedDynamicState2LogicOp */
+ /* skip val->extendedDynamicState2PatchControlPoints */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_handle(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_handle_self((VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->shaderZeroInitializeWorkgroupMemory);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES });
+ vn_encode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->shaderZeroInitializeWorkgroupMemory);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->shaderZeroInitializeWorkgroupMemory */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_handle_self(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->shaderZeroInitializeWorkgroupMemory */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_handle(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ vn_replace_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_handle_self((VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceRobustness2FeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceRobustness2FeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceRobustness2FeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->robustBufferAccess2);
+ vn_encode_VkBool32(enc, &val->robustImageAccess2);
+ vn_encode_VkBool32(enc, &val->nullDescriptor);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceRobustness2FeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceRobustness2FeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceRobustness2FeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->robustBufferAccess2);
+ vn_decode_VkBool32(dec, &val->robustImageAccess2);
+ vn_decode_VkBool32(dec, &val->nullDescriptor);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->robustBufferAccess2 */
+ /* skip val->robustImageAccess2 */
+ /* skip val->nullDescriptor */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceRobustness2FeaturesEXT_handle_self(VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->robustBufferAccess2 */
+ /* skip val->robustImageAccess2 */
+ /* skip val->nullDescriptor */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceRobustness2FeaturesEXT_handle(VkPhysicalDeviceRobustness2FeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceRobustness2FeaturesEXT_handle_self((VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceImageRobustnessFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceImageRobustnessFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceImageRobustnessFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->robustImageAccess);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceImageRobustnessFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES });
+ vn_encode_VkPhysicalDeviceImageRobustnessFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceImageRobustnessFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceImageRobustnessFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->robustImageAccess);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageRobustnessFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceImageRobustnessFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceImageRobustnessFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->robustImageAccess */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageRobustnessFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceImageRobustnessFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceImageRobustnessFeatures_handle_self(VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->robustImageAccess */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceImageRobustnessFeatures_handle(VkPhysicalDeviceImageRobustnessFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ vn_replace_VkPhysicalDeviceImageRobustnessFeatures_handle_self((VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDevice4444FormatsFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDevice4444FormatsFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevice4444FormatsFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->formatA4R4G4B4);
+ vn_encode_VkBool32(enc, &val->formatA4B4G4R4);
+}
+
+static inline void
+vn_encode_VkPhysicalDevice4444FormatsFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT });
+ vn_encode_VkPhysicalDevice4444FormatsFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevice4444FormatsFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->formatA4R4G4B4);
+ vn_decode_VkBool32(dec, &val->formatA4B4G4R4);
+}
+
+static inline void
+vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->formatA4R4G4B4 */
+ /* skip val->formatA4B4G4R4 */
+}
+
+static inline void
+vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDevice4444FormatsFeaturesEXT_handle_self(VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->formatA4R4G4B4 */
+ /* skip val->formatA4B4G4R4 */
+}
+
+static inline void
+vn_replace_VkPhysicalDevice4444FormatsFeaturesEXT_handle(VkPhysicalDevice4444FormatsFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ vn_replace_VkPhysicalDevice4444FormatsFeaturesEXT_handle_self((VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceShaderTerminateInvocationFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderTerminateInvocationFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->shaderTerminateInvocation);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderTerminateInvocationFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES });
+ vn_encode_VkPhysicalDeviceShaderTerminateInvocationFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->shaderTerminateInvocation);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->shaderTerminateInvocation */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceShaderTerminateInvocationFeatures_handle_self(VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->shaderTerminateInvocation */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceShaderTerminateInvocationFeatures_handle(VkPhysicalDeviceShaderTerminateInvocationFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderTerminateInvocationFeatures_handle_self((VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->mutableDescriptorType);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->mutableDescriptorType);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->mutableDescriptorType */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_handle_self(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->mutableDescriptorType */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_handle(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_handle_self((VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceDepthClipControlFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceDepthClipControlFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->depthClipControl);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceDepthClipControlFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceDepthClipControlFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->depthClipControl);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->depthClipControl */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceDepthClipControlFeaturesEXT_handle_self(VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->depthClipControl */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceDepthClipControlFeaturesEXT_handle(VkPhysicalDeviceDepthClipControlFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceDepthClipControlFeaturesEXT_handle_self((VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceSynchronization2Features chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceSynchronization2Features_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceSynchronization2Features_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceSynchronization2Features *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->synchronization2);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceSynchronization2Features(struct vn_cs_encoder *enc, const VkPhysicalDeviceSynchronization2Features *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES });
+ vn_encode_VkPhysicalDeviceSynchronization2Features_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceSynchronization2Features_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceSynchronization2Features_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSynchronization2Features_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSynchronization2Features *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->synchronization2);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSynchronization2Features_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSynchronization2Features *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceSynchronization2Features_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceSynchronization2Features_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceSynchronization2Features_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSynchronization2Features_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSynchronization2Features *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->synchronization2 */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSynchronization2Features_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSynchronization2Features *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceSynchronization2Features_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceSynchronization2Features_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceSynchronization2Features_handle_self(VkPhysicalDeviceSynchronization2Features *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->synchronization2 */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceSynchronization2Features_handle(VkPhysicalDeviceSynchronization2Features *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ vn_replace_VkPhysicalDeviceSynchronization2Features_handle_self((VkPhysicalDeviceSynchronization2Features *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->primitivesGeneratedQuery);
+ vn_encode_VkBool32(enc, &val->primitivesGeneratedQueryWithRasterizerDiscard);
+ vn_encode_VkBool32(enc, &val->primitivesGeneratedQueryWithNonZeroStreams);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT });
+ vn_encode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->primitivesGeneratedQuery);
+ vn_decode_VkBool32(dec, &val->primitivesGeneratedQueryWithRasterizerDiscard);
+ vn_decode_VkBool32(dec, &val->primitivesGeneratedQueryWithNonZeroStreams);
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->primitivesGeneratedQuery */
+ /* skip val->primitivesGeneratedQueryWithRasterizerDiscard */
+ /* skip val->primitivesGeneratedQueryWithNonZeroStreams */
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_handle_self(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->primitivesGeneratedQuery */
+ /* skip val->primitivesGeneratedQueryWithRasterizerDiscard */
+ /* skip val->primitivesGeneratedQueryWithNonZeroStreams */
+}
+
+static inline void
+vn_replace_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_handle(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ vn_replace_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_handle_self((VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->ycbcr2plane444Formats);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->ycbcr2plane444Formats);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->ycbcr2plane444Formats */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_handle_self(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->ycbcr2plane444Formats */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_handle(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_handle_self((VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceProvokingVertexFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceProvokingVertexFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->provokingVertexLast);
+ vn_encode_VkBool32(enc, &val->transformFeedbackPreservesProvokingVertex);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceProvokingVertexFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceProvokingVertexFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->provokingVertexLast);
+ vn_decode_VkBool32(dec, &val->transformFeedbackPreservesProvokingVertex);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->provokingVertexLast */
+ /* skip val->transformFeedbackPreservesProvokingVertex */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceProvokingVertexFeaturesEXT_handle_self(VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->provokingVertexLast */
+ /* skip val->transformFeedbackPreservesProvokingVertex */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceProvokingVertexFeaturesEXT_handle(VkPhysicalDeviceProvokingVertexFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceProvokingVertexFeaturesEXT_handle_self((VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceShaderIntegerDotProductFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderIntegerDotProductFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->shaderIntegerDotProduct);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderIntegerDotProductFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES });
+ vn_encode_VkPhysicalDeviceShaderIntegerDotProductFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->shaderIntegerDotProduct);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->shaderIntegerDotProduct */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceShaderIntegerDotProductFeatures_handle_self(VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->shaderIntegerDotProduct */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceShaderIntegerDotProductFeatures_handle(VkPhysicalDeviceShaderIntegerDotProductFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderIntegerDotProductFeatures_handle_self((VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceDynamicRenderingFeatures chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceDynamicRenderingFeatures_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceDynamicRenderingFeatures_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->dynamicRendering);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceDynamicRenderingFeatures(struct vn_cs_encoder *enc, const VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES });
+ vn_encode_VkPhysicalDeviceDynamicRenderingFeatures_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceDynamicRenderingFeatures_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->dynamicRendering);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->dynamicRendering */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceDynamicRenderingFeatures_handle_self(VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->dynamicRendering */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceDynamicRenderingFeatures_handle(VkPhysicalDeviceDynamicRenderingFeatures *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ vn_replace_VkPhysicalDeviceDynamicRenderingFeatures_handle_self((VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceImageViewMinLodFeaturesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->minLod);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceImageViewMinLodFeaturesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT });
+ vn_encode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->minLod);
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_temp(dec, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->minLod */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_partial_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceImageViewMinLodFeaturesEXT_handle_self(VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->minLod */
+}
+
+static inline void
+vn_replace_VkPhysicalDeviceImageViewMinLodFeaturesEXT_handle(VkPhysicalDeviceImageViewMinLodFeaturesEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceImageViewMinLodFeaturesEXT_handle_self((VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPhysicalDeviceFeatures2 chain */
static inline void
@@ -3512,6 +7298,12 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
while (pnext) {
switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevicePrivateDataFeatures_self(enc, (const VkPhysicalDevicePrivateDataFeatures *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -3548,6 +7340,24 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceProtectedMemoryFeatures_self(enc, (const VkPhysicalDeviceProtectedMemoryFeatures *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceMultiDrawFeaturesEXT_self(enc, (const VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceInlineUniformBlockFeatures_self(enc, (const VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceMaintenance4Features_self(enc, (const VkPhysicalDeviceMaintenance4Features *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -3584,6 +7394,12 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDevice8BitStorageFeatures_self(enc, (const VkPhysicalDevice8BitStorageFeatures *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self(enc, (const VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -3596,6 +7412,12 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceShaderAtomicInt64Features_self(enc, (const VkPhysicalDeviceShaderAtomicInt64Features *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self(enc, (const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -3614,6 +7436,12 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_self(enc, (const VkPhysicalDeviceUniformBufferStandardLayoutFeatures *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self(enc, (const VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -3626,12 +7454,60 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceImagelessFramebufferFeatures_self(enc, (const VkPhysicalDeviceImagelessFramebufferFeatures *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self(enc, (const VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self(enc, (const VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_self(enc, (const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self(enc, (const VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self(enc, (const VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self(enc, (const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceSubgroupSizeControlFeatures_self(enc, (const VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self(enc, (const VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self(enc, (const VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -3644,6 +7520,114 @@ vn_encode_VkPhysicalDeviceFeatures2_pnext(struct vn_cs_encoder *enc, const void
vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceVulkan12Features_self(enc, (const VkPhysicalDeviceVulkan12Features *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceVulkan13Features_self(enc, (const VkPhysicalDeviceVulkan13Features *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self(enc, (const VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self(enc, (const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self(enc, (const VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self(enc, (const VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceRobustness2FeaturesEXT_self(enc, (const VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceImageRobustnessFeatures_self(enc, (const VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevice4444FormatsFeaturesEXT_self(enc, (const VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self(enc, (const VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self(enc, (const VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self(enc, (const VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceSynchronization2Features_self(enc, (const VkPhysicalDeviceSynchronization2Features *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self(enc, (const VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self(enc, (const VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self(enc, (const VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self(enc, (const VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceDynamicRenderingFeatures_self(enc, (const VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceFeatures2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self(enc, (const VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ return;
default:
/* ignore unknown/unsupported struct */
break;
@@ -3681,6 +7665,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkStructureType(dec, &stype);
switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrivateDataFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrivateDataFeatures_self_temp(dec, (VkPhysicalDevicePrivateDataFeatures *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVariablePointersFeatures));
if (pnext) {
@@ -3729,6 +7721,30 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceProtectedMemoryFeatures_self_temp(dec, (VkPhysicalDeviceProtectedMemoryFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMultiDrawFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_temp(dec, (VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceInlineUniformBlockFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_temp(dec, (VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMaintenance4Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Features_self_temp(dec, (VkPhysicalDeviceMaintenance4Features *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderDrawParametersFeatures));
if (pnext) {
@@ -3777,6 +7793,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDevice8BitStorageFeatures_self_temp(dec, (VkPhysicalDevice8BitStorageFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceConditionalRenderingFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_temp(dec, (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkanMemoryModelFeatures));
if (pnext) {
@@ -3793,6 +7817,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceShaderAtomicInt64Features_self_temp(dec, (VkPhysicalDeviceShaderAtomicInt64Features *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_temp(dec, (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTransformFeedbackFeaturesEXT));
if (pnext) {
@@ -3817,6 +7849,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_self_temp(dec, (VkPhysicalDeviceUniformBufferStandardLayoutFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthClipEnableFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_temp(dec, (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceBufferDeviceAddressFeatures));
if (pnext) {
@@ -3833,6 +7873,22 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceImagelessFramebufferFeatures_self_temp(dec, (VkPhysicalDeviceImagelessFramebufferFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTextureCompressionASTCHDRFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_temp(dec, (VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceIndexTypeUint8FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_temp(dec, (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures));
if (pnext) {
@@ -3841,6 +7897,54 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_self_temp(dec, (VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_temp(dec, (VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_temp(dec, (VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_temp(dec, (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSubgroupSizeControlFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_temp(dec, (VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceLineRasterizationFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_temp(dec, (VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePipelineCreationCacheControlFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_temp(dec, (VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan11Features));
if (pnext) {
@@ -3857,6 +7961,150 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceVulkan12Features_self_temp(dec, (VkPhysicalDeviceVulkan12Features *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan13Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Features_self_temp(dec, (VkPhysicalDeviceVulkan13Features *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceCustomBorderColorFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_temp(dec, (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_temp(dec, (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_temp(dec, (VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_temp(dec, (VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceRobustness2FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_temp(dec, (VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceImageRobustnessFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_temp(dec, (VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevice4444FormatsFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_temp(dec, (VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderTerminateInvocationFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_temp(dec, (VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_temp(dec, (VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthClipControlFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_temp(dec, (VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSynchronization2Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceSynchronization2Features_self_temp(dec, (VkPhysicalDeviceSynchronization2Features *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_temp(dec, (VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_temp(dec, (VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceProvokingVertexFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_temp(dec, (VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderIntegerDotProductFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_temp(dec, (VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDynamicRenderingFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_temp(dec, (VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceImageViewMinLodFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_temp(dec, (VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -3898,6 +8146,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkStructureType(dec, &stype);
switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrivateDataFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePrivateDataFeatures_self_partial_temp(dec, (VkPhysicalDevicePrivateDataFeatures *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVariablePointersFeatures));
if (pnext) {
@@ -3946,6 +8202,30 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceProtectedMemoryFeatures_self_partial_temp(dec, (VkPhysicalDeviceProtectedMemoryFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMultiDrawFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceInlineUniformBlockFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_partial_temp(dec, (VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMaintenance4Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Features_self_partial_temp(dec, (VkPhysicalDeviceMaintenance4Features *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderDrawParametersFeatures));
if (pnext) {
@@ -3994,6 +8274,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDevice8BitStorageFeatures_self_partial_temp(dec, (VkPhysicalDevice8BitStorageFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceConditionalRenderingFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkanMemoryModelFeatures));
if (pnext) {
@@ -4010,6 +8298,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceShaderAtomicInt64Features_self_partial_temp(dec, (VkPhysicalDeviceShaderAtomicInt64Features *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTransformFeedbackFeaturesEXT));
if (pnext) {
@@ -4034,6 +8330,14 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_self_partial_temp(dec, (VkPhysicalDeviceUniformBufferStandardLayoutFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthClipEnableFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceBufferDeviceAddressFeatures));
if (pnext) {
@@ -4050,6 +8354,22 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceImagelessFramebufferFeatures_self_partial_temp(dec, (VkPhysicalDeviceImagelessFramebufferFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTextureCompressionASTCHDRFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_partial_temp(dec, (VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceIndexTypeUint8FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures));
if (pnext) {
@@ -4058,6 +8378,54 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_self_partial_temp(dec, (VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_partial_temp(dec, (VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_partial_temp(dec, (VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSubgroupSizeControlFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_partial_temp(dec, (VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceLineRasterizationFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePipelineCreationCacheControlFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_partial_temp(dec, (VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan11Features));
if (pnext) {
@@ -4074,6 +8442,150 @@ vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceVulkan12Features_self_partial_temp(dec, (VkPhysicalDeviceVulkan12Features *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan13Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Features_self_partial_temp(dec, (VkPhysicalDeviceVulkan13Features *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceCustomBorderColorFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_partial_temp(dec, (VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceRobustness2FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceImageRobustnessFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_partial_temp(dec, (VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevice4444FormatsFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_partial_temp(dec, (VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderTerminateInvocationFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_partial_temp(dec, (VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthClipControlFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSynchronization2Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceSynchronization2Features_self_partial_temp(dec, (VkPhysicalDeviceSynchronization2Features *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_partial_temp(dec, (VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceProvokingVertexFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderIntegerDotProductFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_partial_temp(dec, (VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDynamicRenderingFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_partial_temp(dec, (VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceImageViewMinLodFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceFeatures2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_partial_temp(dec, (VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -4122,6 +8634,9 @@ vn_replace_VkPhysicalDeviceFeatures2_handle(VkPhysicalDeviceFeatures2 *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
vn_replace_VkPhysicalDeviceFeatures2_handle_self((VkPhysicalDeviceFeatures2 *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ vn_replace_VkPhysicalDevicePrivateDataFeatures_handle_self((VkPhysicalDevicePrivateDataFeatures *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
vn_replace_VkPhysicalDeviceVariablePointersFeatures_handle_self((VkPhysicalDeviceVariablePointersFeatures *)pnext);
break;
@@ -4140,6 +8655,15 @@ vn_replace_VkPhysicalDeviceFeatures2_handle(VkPhysicalDeviceFeatures2 *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
vn_replace_VkPhysicalDeviceProtectedMemoryFeatures_handle_self((VkPhysicalDeviceProtectedMemoryFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceMultiDrawFeaturesEXT_handle_self((VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ vn_replace_VkPhysicalDeviceInlineUniformBlockFeatures_handle_self((VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ vn_replace_VkPhysicalDeviceMaintenance4Features_handle_self((VkPhysicalDeviceMaintenance4Features *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
vn_replace_VkPhysicalDeviceShaderDrawParametersFeatures_handle_self((VkPhysicalDeviceShaderDrawParametersFeatures *)pnext);
break;
@@ -4158,12 +8682,18 @@ vn_replace_VkPhysicalDeviceFeatures2_handle(VkPhysicalDeviceFeatures2 *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
vn_replace_VkPhysicalDevice8BitStorageFeatures_handle_self((VkPhysicalDevice8BitStorageFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceConditionalRenderingFeaturesEXT_handle_self((VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
vn_replace_VkPhysicalDeviceVulkanMemoryModelFeatures_handle_self((VkPhysicalDeviceVulkanMemoryModelFeatures *)pnext);
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
vn_replace_VkPhysicalDeviceShaderAtomicInt64Features_handle_self((VkPhysicalDeviceShaderAtomicInt64Features *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_handle_self((VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
vn_replace_VkPhysicalDeviceTransformFeedbackFeaturesEXT_handle_self((VkPhysicalDeviceTransformFeedbackFeaturesEXT *)pnext);
break;
@@ -4173,21 +8703,102 @@ vn_replace_VkPhysicalDeviceFeatures2_handle(VkPhysicalDeviceFeatures2 *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
vn_replace_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_handle_self((VkPhysicalDeviceUniformBufferStandardLayoutFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceDepthClipEnableFeaturesEXT_handle_self((VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
vn_replace_VkPhysicalDeviceBufferDeviceAddressFeatures_handle_self((VkPhysicalDeviceBufferDeviceAddressFeatures *)pnext);
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
vn_replace_VkPhysicalDeviceImagelessFramebufferFeatures_handle_self((VkPhysicalDeviceImagelessFramebufferFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ vn_replace_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_handle_self((VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_handle_self((VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
vn_replace_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_handle_self((VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ vn_replace_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_handle_self((VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_handle_self((VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_handle_self((VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ vn_replace_VkPhysicalDeviceSubgroupSizeControlFeatures_handle_self((VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceLineRasterizationFeaturesEXT_handle_self((VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ vn_replace_VkPhysicalDevicePipelineCreationCacheControlFeatures_handle_self((VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
vn_replace_VkPhysicalDeviceVulkan11Features_handle_self((VkPhysicalDeviceVulkan11Features *)pnext);
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
vn_replace_VkPhysicalDeviceVulkan12Features_handle_self((VkPhysicalDeviceVulkan12Features *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ vn_replace_VkPhysicalDeviceVulkan13Features_handle_self((VkPhysicalDeviceVulkan13Features *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceCustomBorderColorFeaturesEXT_handle_self((VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_handle_self((VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_handle_self((VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ vn_replace_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_handle_self((VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceRobustness2FeaturesEXT_handle_self((VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ vn_replace_VkPhysicalDeviceImageRobustnessFeatures_handle_self((VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ vn_replace_VkPhysicalDevice4444FormatsFeaturesEXT_handle_self((VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderTerminateInvocationFeatures_handle_self((VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_handle_self((VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceDepthClipControlFeaturesEXT_handle_self((VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ vn_replace_VkPhysicalDeviceSynchronization2Features_handle_self((VkPhysicalDeviceSynchronization2Features *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ vn_replace_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_handle_self((VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_handle_self((VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceProvokingVertexFeaturesEXT_handle_self((VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderIntegerDotProductFeatures_handle_self((VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ vn_replace_VkPhysicalDeviceDynamicRenderingFeatures_handle_self((VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceImageViewMinLodFeaturesEXT_handle_self((VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -4280,6 +8891,22 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkStructureType(dec, &stype);
switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkDevicePrivateDataCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkDevicePrivateDataCreateInfo_self_temp(dec, (VkDevicePrivateDataCreateInfo *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrivateDataFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrivateDataFeatures_self_temp(dec, (VkPhysicalDevicePrivateDataFeatures *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceFeatures2));
if (pnext) {
@@ -4344,6 +8971,30 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceProtectedMemoryFeatures_self_temp(dec, (VkPhysicalDeviceProtectedMemoryFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMultiDrawFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawFeaturesEXT_self_temp(dec, (VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceInlineUniformBlockFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockFeatures_self_temp(dec, (VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMaintenance4Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Features_self_temp(dec, (VkPhysicalDeviceMaintenance4Features *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderDrawParametersFeatures));
if (pnext) {
@@ -4392,6 +9043,14 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDevice8BitStorageFeatures_self_temp(dec, (VkPhysicalDevice8BitStorageFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceConditionalRenderingFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceConditionalRenderingFeaturesEXT_self_temp(dec, (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkanMemoryModelFeatures));
if (pnext) {
@@ -4408,6 +9067,14 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceShaderAtomicInt64Features_self_temp(dec, (VkPhysicalDeviceShaderAtomicInt64Features *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_self_temp(dec, (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTransformFeedbackFeaturesEXT));
if (pnext) {
@@ -4432,6 +9099,14 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_self_temp(dec, (VkPhysicalDeviceUniformBufferStandardLayoutFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthClipEnableFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipEnableFeaturesEXT_self_temp(dec, (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceBufferDeviceAddressFeatures));
if (pnext) {
@@ -4448,6 +9123,22 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceImagelessFramebufferFeatures_self_temp(dec, (VkPhysicalDeviceImagelessFramebufferFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTextureCompressionASTCHDRFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_self_temp(dec, (VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceIndexTypeUint8FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_self_temp(dec, (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures));
if (pnext) {
@@ -4456,6 +9147,54 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_self_temp(dec, (VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_self_temp(dec, (VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_self_temp(dec, (VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_self_temp(dec, (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSubgroupSizeControlFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlFeatures_self_temp(dec, (VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceLineRasterizationFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationFeaturesEXT_self_temp(dec, (VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePipelineCreationCacheControlFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePipelineCreationCacheControlFeatures_self_temp(dec, (VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan11Features));
if (pnext) {
@@ -4472,6 +9211,150 @@ vn_decode_VkDeviceCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkPhysicalDeviceVulkan12Features_self_temp(dec, (VkPhysicalDeviceVulkan12Features *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan13Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Features_self_temp(dec, (VkPhysicalDeviceVulkan13Features *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceCustomBorderColorFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorFeaturesEXT_self_temp(dec, (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_self_temp(dec, (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceExtendedDynamicState2FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_self_temp(dec, (VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_self_temp(dec, (VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceRobustness2FeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2FeaturesEXT_self_temp(dec, (VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceImageRobustnessFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceImageRobustnessFeatures_self_temp(dec, (VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevice4444FormatsFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDevice4444FormatsFeaturesEXT_self_temp(dec, (VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderTerminateInvocationFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderTerminateInvocationFeatures_self_temp(dec, (VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_self_temp(dec, (VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthClipControlFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDepthClipControlFeaturesEXT_self_temp(dec, (VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSynchronization2Features));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceSynchronization2Features_self_temp(dec, (VkPhysicalDeviceSynchronization2Features *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_self_temp(dec, (VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_self_temp(dec, (VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceProvokingVertexFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexFeaturesEXT_self_temp(dec, (VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderIntegerDotProductFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductFeatures_self_temp(dec, (VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDynamicRenderingFeatures));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceDynamicRenderingFeatures_self_temp(dec, (VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceImageViewMinLodFeaturesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceCreateInfo_pnext_temp(dec);
+ vn_decode_VkPhysicalDeviceImageViewMinLodFeaturesEXT_self_temp(dec, (VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -4579,6 +9462,12 @@ vn_replace_VkDeviceCreateInfo_handle(VkDeviceCreateInfo *val)
case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
vn_replace_VkDeviceCreateInfo_handle_self((VkDeviceCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO:
+ vn_replace_VkDevicePrivateDataCreateInfo_handle_self((VkDevicePrivateDataCreateInfo *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES:
+ vn_replace_VkPhysicalDevicePrivateDataFeatures_handle_self((VkPhysicalDevicePrivateDataFeatures *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
vn_replace_VkPhysicalDeviceFeatures2_handle_self((VkPhysicalDeviceFeatures2 *)pnext);
break;
@@ -4603,6 +9492,15 @@ vn_replace_VkDeviceCreateInfo_handle(VkDeviceCreateInfo *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
vn_replace_VkPhysicalDeviceProtectedMemoryFeatures_handle_self((VkPhysicalDeviceProtectedMemoryFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceMultiDrawFeaturesEXT_handle_self((VkPhysicalDeviceMultiDrawFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES:
+ vn_replace_VkPhysicalDeviceInlineUniformBlockFeatures_handle_self((VkPhysicalDeviceInlineUniformBlockFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES:
+ vn_replace_VkPhysicalDeviceMaintenance4Features_handle_self((VkPhysicalDeviceMaintenance4Features *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
vn_replace_VkPhysicalDeviceShaderDrawParametersFeatures_handle_self((VkPhysicalDeviceShaderDrawParametersFeatures *)pnext);
break;
@@ -4621,12 +9519,18 @@ vn_replace_VkDeviceCreateInfo_handle(VkDeviceCreateInfo *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
vn_replace_VkPhysicalDevice8BitStorageFeatures_handle_self((VkPhysicalDevice8BitStorageFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceConditionalRenderingFeaturesEXT_handle_self((VkPhysicalDeviceConditionalRenderingFeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
vn_replace_VkPhysicalDeviceVulkanMemoryModelFeatures_handle_self((VkPhysicalDeviceVulkanMemoryModelFeatures *)pnext);
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
vn_replace_VkPhysicalDeviceShaderAtomicInt64Features_handle_self((VkPhysicalDeviceShaderAtomicInt64Features *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT_handle_self((VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
vn_replace_VkPhysicalDeviceTransformFeedbackFeaturesEXT_handle_self((VkPhysicalDeviceTransformFeedbackFeaturesEXT *)pnext);
break;
@@ -4636,21 +9540,102 @@ vn_replace_VkDeviceCreateInfo_handle(VkDeviceCreateInfo *val)
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
vn_replace_VkPhysicalDeviceUniformBufferStandardLayoutFeatures_handle_self((VkPhysicalDeviceUniformBufferStandardLayoutFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceDepthClipEnableFeaturesEXT_handle_self((VkPhysicalDeviceDepthClipEnableFeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
vn_replace_VkPhysicalDeviceBufferDeviceAddressFeatures_handle_self((VkPhysicalDeviceBufferDeviceAddressFeatures *)pnext);
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
vn_replace_VkPhysicalDeviceImagelessFramebufferFeatures_handle_self((VkPhysicalDeviceImagelessFramebufferFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES:
+ vn_replace_VkPhysicalDeviceTextureCompressionASTCHDRFeatures_handle_self((VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceIndexTypeUint8FeaturesEXT_handle_self((VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
vn_replace_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures_handle_self((VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT:
+ vn_replace_VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT_handle_self((VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures_handle_self((VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT_handle_self((VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES:
+ vn_replace_VkPhysicalDeviceSubgroupSizeControlFeatures_handle_self((VkPhysicalDeviceSubgroupSizeControlFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceLineRasterizationFeaturesEXT_handle_self((VkPhysicalDeviceLineRasterizationFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES:
+ vn_replace_VkPhysicalDevicePipelineCreationCacheControlFeatures_handle_self((VkPhysicalDevicePipelineCreationCacheControlFeatures *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
vn_replace_VkPhysicalDeviceVulkan11Features_handle_self((VkPhysicalDeviceVulkan11Features *)pnext);
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
vn_replace_VkPhysicalDeviceVulkan12Features_handle_self((VkPhysicalDeviceVulkan12Features *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES:
+ vn_replace_VkPhysicalDeviceVulkan13Features_handle_self((VkPhysicalDeviceVulkan13Features *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceCustomBorderColorFeaturesEXT_handle_self((VkPhysicalDeviceCustomBorderColorFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT_handle_self((VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceExtendedDynamicState2FeaturesEXT_handle_self((VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES:
+ vn_replace_VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures_handle_self((VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceRobustness2FeaturesEXT_handle_self((VkPhysicalDeviceRobustness2FeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES:
+ vn_replace_VkPhysicalDeviceImageRobustnessFeatures_handle_self((VkPhysicalDeviceImageRobustnessFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ vn_replace_VkPhysicalDevice4444FormatsFeaturesEXT_handle_self((VkPhysicalDevice4444FormatsFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderTerminateInvocationFeatures_handle_self((VkPhysicalDeviceShaderTerminateInvocationFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT_handle_self((VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceDepthClipControlFeaturesEXT_handle_self((VkPhysicalDeviceDepthClipControlFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ vn_replace_VkPhysicalDeviceSynchronization2Features_handle_self((VkPhysicalDeviceSynchronization2Features *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT:
+ vn_replace_VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT_handle_self((VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT_handle_self((VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceProvokingVertexFeaturesEXT_handle_self((VkPhysicalDeviceProvokingVertexFeaturesEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES:
+ vn_replace_VkPhysicalDeviceShaderIntegerDotProductFeatures_handle_self((VkPhysicalDeviceShaderIntegerDotProductFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES:
+ vn_replace_VkPhysicalDeviceDynamicRenderingFeatures_handle_self((VkPhysicalDeviceDynamicRenderingFeatures *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT:
+ vn_replace_VkPhysicalDeviceImageViewMinLodFeaturesEXT_handle_self((VkPhysicalDeviceImageViewMinLodFeaturesEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -4659,6 +9644,114 @@ vn_replace_VkDeviceCreateInfo_handle(VkDeviceCreateInfo *val)
} while (pnext);
}
+/* struct VkPhysicalDeviceMultiDrawPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceMultiDrawPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMultiDrawPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceMultiDrawPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->maxMultiDrawCount);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMultiDrawPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceMultiDrawPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceMultiDrawPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceMultiDrawPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMultiDrawPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMultiDrawPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMultiDrawPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maxMultiDrawCount */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMultiDrawPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMultiDrawPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMultiDrawPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawPropertiesEXT_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDevicePushDescriptorPropertiesKHR chain */
+
+static inline void
+vn_encode_VkPhysicalDevicePushDescriptorPropertiesKHR_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePushDescriptorPropertiesKHR_self(struct vn_cs_encoder *enc, const VkPhysicalDevicePushDescriptorPropertiesKHR *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->maxPushDescriptors);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePushDescriptorPropertiesKHR(struct vn_cs_encoder *enc, const VkPhysicalDevicePushDescriptorPropertiesKHR *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR });
+ vn_encode_VkPhysicalDevicePushDescriptorPropertiesKHR_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevicePushDescriptorPropertiesKHR_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePushDescriptorPropertiesKHR_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePushDescriptorPropertiesKHR_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePushDescriptorPropertiesKHR *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maxPushDescriptors */
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePushDescriptorPropertiesKHR_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePushDescriptorPropertiesKHR *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePushDescriptorPropertiesKHR_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePushDescriptorPropertiesKHR_self_partial_temp(dec, val);
+}
+
/* struct VkConformanceVersion */
static inline void
@@ -5086,6 +10179,68 @@ vn_decode_VkPhysicalDeviceSamplerFilterMinmaxProperties_partial_temp(struct vn_c
vn_decode_VkPhysicalDeviceSamplerFilterMinmaxProperties_self_partial_temp(dec, val);
}
+/* struct VkPhysicalDeviceInlineUniformBlockProperties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceInlineUniformBlockProperties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceInlineUniformBlockProperties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceInlineUniformBlockProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->maxInlineUniformBlockSize);
+ vn_encode_uint32_t(enc, &val->maxPerStageDescriptorInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxDescriptorSetInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceInlineUniformBlockProperties(struct vn_cs_encoder *enc, const VkPhysicalDeviceInlineUniformBlockProperties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES });
+ vn_encode_VkPhysicalDeviceInlineUniformBlockProperties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceInlineUniformBlockProperties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceInlineUniformBlockProperties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceInlineUniformBlockProperties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceInlineUniformBlockProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maxInlineUniformBlockSize */
+ /* skip val->maxPerStageDescriptorInlineUniformBlocks */
+ /* skip val->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks */
+ /* skip val->maxDescriptorSetInlineUniformBlocks */
+ /* skip val->maxDescriptorSetUpdateAfterBindInlineUniformBlocks */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceInlineUniformBlockProperties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceInlineUniformBlockProperties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceInlineUniformBlockProperties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockProperties_self_partial_temp(dec, val);
+}
+
/* struct VkPhysicalDeviceMaintenance3Properties chain */
static inline void
@@ -5142,6 +10297,60 @@ vn_decode_VkPhysicalDeviceMaintenance3Properties_partial_temp(struct vn_cs_decod
vn_decode_VkPhysicalDeviceMaintenance3Properties_self_partial_temp(dec, val);
}
+/* struct VkPhysicalDeviceMaintenance4Properties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceMaintenance4Properties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMaintenance4Properties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceMaintenance4Properties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkDeviceSize(enc, &val->maxBufferSize);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceMaintenance4Properties(struct vn_cs_encoder *enc, const VkPhysicalDeviceMaintenance4Properties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES });
+ vn_encode_VkPhysicalDeviceMaintenance4Properties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceMaintenance4Properties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceMaintenance4Properties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMaintenance4Properties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMaintenance4Properties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maxBufferSize */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceMaintenance4Properties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceMaintenance4Properties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceMaintenance4Properties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Properties_self_partial_temp(dec, val);
+}
+
/* struct VkPhysicalDeviceFloatControlsProperties chain */
static inline void
@@ -5228,6 +10437,76 @@ vn_decode_VkPhysicalDeviceFloatControlsProperties_partial_temp(struct vn_cs_deco
vn_decode_VkPhysicalDeviceFloatControlsProperties_self_partial_temp(dec, val);
}
+/* struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceConservativeRasterizationPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_float(enc, &val->primitiveOverestimationSize);
+ vn_encode_float(enc, &val->maxExtraPrimitiveOverestimationSize);
+ vn_encode_float(enc, &val->extraPrimitiveOverestimationSizeGranularity);
+ vn_encode_VkBool32(enc, &val->primitiveUnderestimation);
+ vn_encode_VkBool32(enc, &val->conservativePointAndLineRasterization);
+ vn_encode_VkBool32(enc, &val->degenerateTrianglesRasterized);
+ vn_encode_VkBool32(enc, &val->degenerateLinesRasterized);
+ vn_encode_VkBool32(enc, &val->fullyCoveredFragmentShaderInputVariable);
+ vn_encode_VkBool32(enc, &val->conservativeRasterizationPostDepthCoverage);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceConservativeRasterizationPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceConservativeRasterizationPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->primitiveOverestimationSize */
+ /* skip val->maxExtraPrimitiveOverestimationSize */
+ /* skip val->extraPrimitiveOverestimationSizeGranularity */
+ /* skip val->primitiveUnderestimation */
+ /* skip val->conservativePointAndLineRasterization */
+ /* skip val->degenerateTrianglesRasterized */
+ /* skip val->degenerateLinesRasterized */
+ /* skip val->fullyCoveredFragmentShaderInputVariable */
+ /* skip val->conservativeRasterizationPostDepthCoverage */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceConservativeRasterizationPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_self_partial_temp(dec, val);
+}
+
/* struct VkPhysicalDeviceDescriptorIndexingProperties chain */
static inline void
@@ -5380,6 +10659,120 @@ vn_decode_VkPhysicalDeviceTimelineSemaphoreProperties_partial_temp(struct vn_cs_
vn_decode_VkPhysicalDeviceTimelineSemaphoreProperties_self_partial_temp(dec, val);
}
+/* struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->maxVertexAttribDivisor);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maxVertexAttribDivisor */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDevicePCIBusInfoPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDevicePCIBusInfoPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePCIBusInfoPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDevicePCIBusInfoPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->pciDomain);
+ vn_encode_uint32_t(enc, &val->pciBus);
+ vn_encode_uint32_t(enc, &val->pciDevice);
+ vn_encode_uint32_t(enc, &val->pciFunction);
+}
+
+static inline void
+vn_encode_VkPhysicalDevicePCIBusInfoPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDevicePCIBusInfoPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDevicePCIBusInfoPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDevicePCIBusInfoPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDevicePCIBusInfoPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePCIBusInfoPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePCIBusInfoPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->pciDomain */
+ /* skip val->pciBus */
+ /* skip val->pciDevice */
+ /* skip val->pciFunction */
+}
+
+static inline void
+vn_decode_VkPhysicalDevicePCIBusInfoPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDevicePCIBusInfoPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDevicePCIBusInfoPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePCIBusInfoPropertiesEXT_self_partial_temp(dec, val);
+}
+
/* struct VkPhysicalDeviceDepthStencilResolveProperties chain */
static inline void
@@ -5512,6 +10905,180 @@ vn_decode_VkPhysicalDeviceTransformFeedbackPropertiesEXT_partial_temp(struct vn_
vn_decode_VkPhysicalDeviceTransformFeedbackPropertiesEXT_self_partial_temp(dec, val);
}
+/* struct VkPhysicalDeviceTexelBufferAlignmentProperties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceTexelBufferAlignmentProperties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceTexelBufferAlignmentProperties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceTexelBufferAlignmentProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkDeviceSize(enc, &val->storageTexelBufferOffsetAlignmentBytes);
+ vn_encode_VkBool32(enc, &val->storageTexelBufferOffsetSingleTexelAlignment);
+ vn_encode_VkDeviceSize(enc, &val->uniformTexelBufferOffsetAlignmentBytes);
+ vn_encode_VkBool32(enc, &val->uniformTexelBufferOffsetSingleTexelAlignment);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceTexelBufferAlignmentProperties(struct vn_cs_encoder *enc, const VkPhysicalDeviceTexelBufferAlignmentProperties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES });
+ vn_encode_VkPhysicalDeviceTexelBufferAlignmentProperties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceTexelBufferAlignmentProperties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentProperties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentProperties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTexelBufferAlignmentProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->storageTexelBufferOffsetAlignmentBytes */
+ /* skip val->storageTexelBufferOffsetSingleTexelAlignment */
+ /* skip val->uniformTexelBufferOffsetAlignmentBytes */
+ /* skip val->uniformTexelBufferOffsetSingleTexelAlignment */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceTexelBufferAlignmentProperties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceTexelBufferAlignmentProperties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceTexelBufferAlignmentProperties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentProperties_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDeviceSubgroupSizeControlProperties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceSubgroupSizeControlProperties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceSubgroupSizeControlProperties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceSubgroupSizeControlProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->minSubgroupSize);
+ vn_encode_uint32_t(enc, &val->maxSubgroupSize);
+ vn_encode_uint32_t(enc, &val->maxComputeWorkgroupSubgroups);
+ vn_encode_VkFlags(enc, &val->requiredSubgroupSizeStages);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceSubgroupSizeControlProperties(struct vn_cs_encoder *enc, const VkPhysicalDeviceSubgroupSizeControlProperties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES });
+ vn_encode_VkPhysicalDeviceSubgroupSizeControlProperties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceSubgroupSizeControlProperties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceSubgroupSizeControlProperties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSubgroupSizeControlProperties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSubgroupSizeControlProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->minSubgroupSize */
+ /* skip val->maxSubgroupSize */
+ /* skip val->maxComputeWorkgroupSubgroups */
+ /* skip val->requiredSubgroupSizeStages */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceSubgroupSizeControlProperties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceSubgroupSizeControlProperties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceSubgroupSizeControlProperties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlProperties_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDeviceLineRasterizationPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceLineRasterizationPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceLineRasterizationPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceLineRasterizationPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->lineSubPixelPrecisionBits);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceLineRasterizationPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceLineRasterizationPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceLineRasterizationPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceLineRasterizationPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceLineRasterizationPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceLineRasterizationPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceLineRasterizationPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->lineSubPixelPrecisionBits */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceLineRasterizationPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceLineRasterizationPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceLineRasterizationPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationPropertiesEXT_self_partial_temp(dec, val);
+}
+
/* struct VkPhysicalDeviceVulkan11Properties chain */
static inline void
@@ -5755,6 +11322,426 @@ vn_decode_VkPhysicalDeviceVulkan12Properties_partial_temp(struct vn_cs_decoder *
vn_decode_VkPhysicalDeviceVulkan12Properties_self_partial_temp(dec, val);
}
+/* struct VkPhysicalDeviceVulkan13Properties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceVulkan13Properties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVulkan13Properties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceVulkan13Properties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->minSubgroupSize);
+ vn_encode_uint32_t(enc, &val->maxSubgroupSize);
+ vn_encode_uint32_t(enc, &val->maxComputeWorkgroupSubgroups);
+ vn_encode_VkFlags(enc, &val->requiredSubgroupSizeStages);
+ vn_encode_uint32_t(enc, &val->maxInlineUniformBlockSize);
+ vn_encode_uint32_t(enc, &val->maxPerStageDescriptorInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxDescriptorSetInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
+ vn_encode_uint32_t(enc, &val->maxInlineUniformTotalSize);
+ vn_encode_VkBool32(enc, &val->integerDotProduct8BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct8BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct8BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct4x8BitPackedUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct4x8BitPackedSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct4x8BitPackedMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct16BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct16BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct16BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct32BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct32BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct32BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct64BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct64BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct64BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating8BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating8BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating16BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating16BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating32BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating32BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating64BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating64BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated);
+ vn_encode_VkDeviceSize(enc, &val->storageTexelBufferOffsetAlignmentBytes);
+ vn_encode_VkBool32(enc, &val->storageTexelBufferOffsetSingleTexelAlignment);
+ vn_encode_VkDeviceSize(enc, &val->uniformTexelBufferOffsetAlignmentBytes);
+ vn_encode_VkBool32(enc, &val->uniformTexelBufferOffsetSingleTexelAlignment);
+ vn_encode_VkDeviceSize(enc, &val->maxBufferSize);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceVulkan13Properties(struct vn_cs_encoder *enc, const VkPhysicalDeviceVulkan13Properties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES });
+ vn_encode_VkPhysicalDeviceVulkan13Properties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceVulkan13Properties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceVulkan13Properties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVulkan13Properties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVulkan13Properties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->minSubgroupSize */
+ /* skip val->maxSubgroupSize */
+ /* skip val->maxComputeWorkgroupSubgroups */
+ /* skip val->requiredSubgroupSizeStages */
+ /* skip val->maxInlineUniformBlockSize */
+ /* skip val->maxPerStageDescriptorInlineUniformBlocks */
+ /* skip val->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks */
+ /* skip val->maxDescriptorSetInlineUniformBlocks */
+ /* skip val->maxDescriptorSetUpdateAfterBindInlineUniformBlocks */
+ /* skip val->maxInlineUniformTotalSize */
+ /* skip val->integerDotProduct8BitUnsignedAccelerated */
+ /* skip val->integerDotProduct8BitSignedAccelerated */
+ /* skip val->integerDotProduct8BitMixedSignednessAccelerated */
+ /* skip val->integerDotProduct4x8BitPackedUnsignedAccelerated */
+ /* skip val->integerDotProduct4x8BitPackedSignedAccelerated */
+ /* skip val->integerDotProduct4x8BitPackedMixedSignednessAccelerated */
+ /* skip val->integerDotProduct16BitUnsignedAccelerated */
+ /* skip val->integerDotProduct16BitSignedAccelerated */
+ /* skip val->integerDotProduct16BitMixedSignednessAccelerated */
+ /* skip val->integerDotProduct32BitUnsignedAccelerated */
+ /* skip val->integerDotProduct32BitSignedAccelerated */
+ /* skip val->integerDotProduct32BitMixedSignednessAccelerated */
+ /* skip val->integerDotProduct64BitUnsignedAccelerated */
+ /* skip val->integerDotProduct64BitSignedAccelerated */
+ /* skip val->integerDotProduct64BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating8BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating8BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating16BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating16BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating32BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating32BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating64BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating64BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated */
+ /* skip val->storageTexelBufferOffsetAlignmentBytes */
+ /* skip val->storageTexelBufferOffsetSingleTexelAlignment */
+ /* skip val->uniformTexelBufferOffsetAlignmentBytes */
+ /* skip val->uniformTexelBufferOffsetSingleTexelAlignment */
+ /* skip val->maxBufferSize */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceVulkan13Properties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceVulkan13Properties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceVulkan13Properties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Properties_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDeviceCustomBorderColorPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceCustomBorderColorPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->maxCustomBorderColorSamplers);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceCustomBorderColorPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceCustomBorderColorPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceCustomBorderColorPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->maxCustomBorderColorSamplers */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceCustomBorderColorPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDeviceRobustness2PropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceRobustness2PropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceRobustness2PropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceRobustness2PropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkDeviceSize(enc, &val->robustStorageBufferAccessSizeAlignment);
+ vn_encode_VkDeviceSize(enc, &val->robustUniformBufferAccessSizeAlignment);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceRobustness2PropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceRobustness2PropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceRobustness2PropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceRobustness2PropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceRobustness2PropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceRobustness2PropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceRobustness2PropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->robustStorageBufferAccessSizeAlignment */
+ /* skip val->robustUniformBufferAccessSizeAlignment */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceRobustness2PropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceRobustness2PropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceRobustness2PropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2PropertiesEXT_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDeviceProvokingVertexPropertiesEXT chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceProvokingVertexPropertiesEXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceProvokingVertexPropertiesEXT_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceProvokingVertexPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->provokingVertexModePerPipeline);
+ vn_encode_VkBool32(enc, &val->transformFeedbackPreservesTriangleFanProvokingVertex);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceProvokingVertexPropertiesEXT(struct vn_cs_encoder *enc, const VkPhysicalDeviceProvokingVertexPropertiesEXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT });
+ vn_encode_VkPhysicalDeviceProvokingVertexPropertiesEXT_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceProvokingVertexPropertiesEXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceProvokingVertexPropertiesEXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceProvokingVertexPropertiesEXT_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceProvokingVertexPropertiesEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->provokingVertexModePerPipeline */
+ /* skip val->transformFeedbackPreservesTriangleFanProvokingVertex */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceProvokingVertexPropertiesEXT_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceProvokingVertexPropertiesEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceProvokingVertexPropertiesEXT_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexPropertiesEXT_self_partial_temp(dec, val);
+}
+
+/* struct VkPhysicalDeviceShaderIntegerDotProductProperties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderIntegerDotProductProperties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderIntegerDotProductProperties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderIntegerDotProductProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkBool32(enc, &val->integerDotProduct8BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct8BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct8BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct4x8BitPackedUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct4x8BitPackedSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct4x8BitPackedMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct16BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct16BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct16BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct32BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct32BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct32BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct64BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct64BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProduct64BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating8BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating8BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating16BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating16BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating32BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating32BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating64BitUnsignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating64BitSignedAccelerated);
+ vn_encode_VkBool32(enc, &val->integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceShaderIntegerDotProductProperties(struct vn_cs_encoder *enc, const VkPhysicalDeviceShaderIntegerDotProductProperties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES });
+ vn_encode_VkPhysicalDeviceShaderIntegerDotProductProperties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceShaderIntegerDotProductProperties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductProperties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductProperties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderIntegerDotProductProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->integerDotProduct8BitUnsignedAccelerated */
+ /* skip val->integerDotProduct8BitSignedAccelerated */
+ /* skip val->integerDotProduct8BitMixedSignednessAccelerated */
+ /* skip val->integerDotProduct4x8BitPackedUnsignedAccelerated */
+ /* skip val->integerDotProduct4x8BitPackedSignedAccelerated */
+ /* skip val->integerDotProduct4x8BitPackedMixedSignednessAccelerated */
+ /* skip val->integerDotProduct16BitUnsignedAccelerated */
+ /* skip val->integerDotProduct16BitSignedAccelerated */
+ /* skip val->integerDotProduct16BitMixedSignednessAccelerated */
+ /* skip val->integerDotProduct32BitUnsignedAccelerated */
+ /* skip val->integerDotProduct32BitSignedAccelerated */
+ /* skip val->integerDotProduct32BitMixedSignednessAccelerated */
+ /* skip val->integerDotProduct64BitUnsignedAccelerated */
+ /* skip val->integerDotProduct64BitSignedAccelerated */
+ /* skip val->integerDotProduct64BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating8BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating8BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating16BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating16BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating32BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating32BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating64BitUnsignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating64BitSignedAccelerated */
+ /* skip val->integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceShaderIntegerDotProductProperties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceShaderIntegerDotProductProperties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceShaderIntegerDotProductProperties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductProperties_self_partial_temp(dec, val);
+}
+
/* struct VkPhysicalDeviceProperties2 chain */
static inline void
@@ -5764,6 +11751,18 @@ vn_encode_VkPhysicalDeviceProperties2_pnext(struct vn_cs_encoder *enc, const voi
while (pnext) {
switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceMultiDrawPropertiesEXT_self(enc, (const VkPhysicalDeviceMultiDrawPropertiesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevicePushDescriptorPropertiesKHR_self(enc, (const VkPhysicalDevicePushDescriptorPropertiesKHR *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -5806,18 +11805,36 @@ vn_encode_VkPhysicalDeviceProperties2_pnext(struct vn_cs_encoder *enc, const voi
vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceSamplerFilterMinmaxProperties_self(enc, (const VkPhysicalDeviceSamplerFilterMinmaxProperties *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceInlineUniformBlockProperties_self(enc, (const VkPhysicalDeviceInlineUniformBlockProperties *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceMaintenance3Properties_self(enc, (const VkPhysicalDeviceMaintenance3Properties *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceMaintenance4Properties_self(enc, (const VkPhysicalDeviceMaintenance4Properties *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceFloatControlsProperties_self(enc, (const VkPhysicalDeviceFloatControlsProperties *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_self(enc, (const VkPhysicalDeviceConservativeRasterizationPropertiesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -5830,6 +11847,18 @@ vn_encode_VkPhysicalDeviceProperties2_pnext(struct vn_cs_encoder *enc, const voi
vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceTimelineSemaphoreProperties_self(enc, (const VkPhysicalDeviceTimelineSemaphoreProperties *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_self(enc, (const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDevicePCIBusInfoPropertiesEXT_self(enc, (const VkPhysicalDevicePCIBusInfoPropertiesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -5842,6 +11871,24 @@ vn_encode_VkPhysicalDeviceProperties2_pnext(struct vn_cs_encoder *enc, const voi
vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceTransformFeedbackPropertiesEXT_self(enc, (const VkPhysicalDeviceTransformFeedbackPropertiesEXT *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceTexelBufferAlignmentProperties_self(enc, (const VkPhysicalDeviceTexelBufferAlignmentProperties *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceSubgroupSizeControlProperties_self(enc, (const VkPhysicalDeviceSubgroupSizeControlProperties *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceLineRasterizationPropertiesEXT_self(enc, (const VkPhysicalDeviceLineRasterizationPropertiesEXT *)pnext);
+ return;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
vn_encode_simple_pointer(enc, pnext);
vn_encode_VkStructureType(enc, &pnext->sType);
@@ -5854,6 +11901,36 @@ vn_encode_VkPhysicalDeviceProperties2_pnext(struct vn_cs_encoder *enc, const voi
vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
vn_encode_VkPhysicalDeviceVulkan12Properties_self(enc, (const VkPhysicalDeviceVulkan12Properties *)pnext);
return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceVulkan13Properties_self(enc, (const VkPhysicalDeviceVulkan13Properties *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_self(enc, (const VkPhysicalDeviceCustomBorderColorPropertiesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceRobustness2PropertiesEXT_self(enc, (const VkPhysicalDeviceRobustness2PropertiesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceProvokingVertexPropertiesEXT_self(enc, (const VkPhysicalDeviceProvokingVertexPropertiesEXT *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkPhysicalDeviceProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkPhysicalDeviceShaderIntegerDotProductProperties_self(enc, (const VkPhysicalDeviceShaderIntegerDotProductProperties *)pnext);
+ return;
default:
/* ignore unknown/unsupported struct */
break;
@@ -5891,6 +11968,22 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkStructureType(dec, &stype);
switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMultiDrawPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMultiDrawPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceMultiDrawPropertiesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePushDescriptorPropertiesKHR));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePushDescriptorPropertiesKHR_self_partial_temp(dec, (VkPhysicalDevicePushDescriptorPropertiesKHR *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDriverProperties));
if (pnext) {
@@ -5947,6 +12040,14 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkPhysicalDeviceSamplerFilterMinmaxProperties_self_partial_temp(dec, (VkPhysicalDeviceSamplerFilterMinmaxProperties *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceInlineUniformBlockProperties));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceInlineUniformBlockProperties_self_partial_temp(dec, (VkPhysicalDeviceInlineUniformBlockProperties *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMaintenance3Properties));
if (pnext) {
@@ -5955,6 +12056,14 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkPhysicalDeviceMaintenance3Properties_self_partial_temp(dec, (VkPhysicalDeviceMaintenance3Properties *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceMaintenance4Properties));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceMaintenance4Properties_self_partial_temp(dec, (VkPhysicalDeviceMaintenance4Properties *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceFloatControlsProperties));
if (pnext) {
@@ -5963,6 +12072,14 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkPhysicalDeviceFloatControlsProperties_self_partial_temp(dec, (VkPhysicalDeviceFloatControlsProperties *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceConservativeRasterizationPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceConservativeRasterizationPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceConservativeRasterizationPropertiesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDescriptorIndexingProperties));
if (pnext) {
@@ -5979,6 +12096,22 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkPhysicalDeviceTimelineSemaphoreProperties_self_partial_temp(dec, (VkPhysicalDeviceTimelineSemaphoreProperties *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDevicePCIBusInfoPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDevicePCIBusInfoPropertiesEXT_self_partial_temp(dec, (VkPhysicalDevicePCIBusInfoPropertiesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceDepthStencilResolveProperties));
if (pnext) {
@@ -5995,6 +12128,30 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkPhysicalDeviceTransformFeedbackPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceTexelBufferAlignmentProperties));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceTexelBufferAlignmentProperties_self_partial_temp(dec, (VkPhysicalDeviceTexelBufferAlignmentProperties *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceSubgroupSizeControlProperties));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceSubgroupSizeControlProperties_self_partial_temp(dec, (VkPhysicalDeviceSubgroupSizeControlProperties *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceLineRasterizationPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceLineRasterizationPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceLineRasterizationPropertiesEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan11Properties));
if (pnext) {
@@ -6011,6 +12168,46 @@ vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(struct vn_cs_decoder *d
vn_decode_VkPhysicalDeviceVulkan12Properties_self_partial_temp(dec, (VkPhysicalDeviceVulkan12Properties *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceVulkan13Properties));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceVulkan13Properties_self_partial_temp(dec, (VkPhysicalDeviceVulkan13Properties *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceCustomBorderColorPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceCustomBorderColorPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceCustomBorderColorPropertiesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceRobustness2PropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceRobustness2PropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceRobustness2PropertiesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceProvokingVertexPropertiesEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceProvokingVertexPropertiesEXT_self_partial_temp(dec, (VkPhysicalDeviceProvokingVertexPropertiesEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPhysicalDeviceShaderIntegerDotProductProperties));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPhysicalDeviceProperties2_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceShaderIntegerDotProductProperties_self_partial_temp(dec, (VkPhysicalDeviceShaderIntegerDotProductProperties *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -6130,6 +12327,153 @@ vn_decode_VkDrmFormatModifierPropertiesListEXT_partial_temp(struct vn_cs_decoder
vn_decode_VkDrmFormatModifierPropertiesListEXT_self_partial_temp(dec, val);
}
+/* struct VkFormatProperties3 chain */
+
+static inline void
+vn_encode_VkFormatProperties3_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkFormatProperties3_self(struct vn_cs_encoder *enc, const VkFormatProperties3 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_VkFlags64(enc, &val->linearTilingFeatures);
+ vn_encode_VkFlags64(enc, &val->optimalTilingFeatures);
+ vn_encode_VkFlags64(enc, &val->bufferFeatures);
+}
+
+static inline void
+vn_encode_VkFormatProperties3(struct vn_cs_encoder *enc, const VkFormatProperties3 *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3 });
+ vn_encode_VkFormatProperties3_pnext(enc, val->pNext);
+ vn_encode_VkFormatProperties3_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkFormatProperties3_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkFormatProperties3_self_partial_temp(struct vn_cs_decoder *dec, VkFormatProperties3 *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->linearTilingFeatures */
+ /* skip val->optimalTilingFeatures */
+ /* skip val->bufferFeatures */
+}
+
+static inline void
+vn_decode_VkFormatProperties3_partial_temp(struct vn_cs_decoder *dec, VkFormatProperties3 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkFormatProperties3_pnext_partial_temp(dec);
+ vn_decode_VkFormatProperties3_self_partial_temp(dec, val);
+}
+
+/* struct VkDrmFormatModifierProperties2EXT */
+
+static inline void
+vn_encode_VkDrmFormatModifierProperties2EXT(struct vn_cs_encoder *enc, const VkDrmFormatModifierProperties2EXT *val)
+{
+ vn_encode_uint64_t(enc, &val->drmFormatModifier);
+ vn_encode_uint32_t(enc, &val->drmFormatModifierPlaneCount);
+ vn_encode_VkFlags64(enc, &val->drmFormatModifierTilingFeatures);
+}
+
+static inline void
+vn_decode_VkDrmFormatModifierProperties2EXT_partial_temp(struct vn_cs_decoder *dec, VkDrmFormatModifierProperties2EXT *val)
+{
+ /* skip val->drmFormatModifier */
+ /* skip val->drmFormatModifierPlaneCount */
+ /* skip val->drmFormatModifierTilingFeatures */
+}
+
+/* struct VkDrmFormatModifierPropertiesList2EXT chain */
+
+static inline void
+vn_encode_VkDrmFormatModifierPropertiesList2EXT_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkDrmFormatModifierPropertiesList2EXT_self(struct vn_cs_encoder *enc, const VkDrmFormatModifierPropertiesList2EXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->drmFormatModifierCount);
+ if (val->pDrmFormatModifierProperties) {
+ vn_encode_array_size(enc, val->drmFormatModifierCount);
+ for (uint32_t i = 0; i < val->drmFormatModifierCount; i++)
+ vn_encode_VkDrmFormatModifierProperties2EXT(enc, &val->pDrmFormatModifierProperties[i]);
+ } else {
+ vn_encode_array_size(enc, 0);
+ }
+}
+
+static inline void
+vn_encode_VkDrmFormatModifierPropertiesList2EXT(struct vn_cs_encoder *enc, const VkDrmFormatModifierPropertiesList2EXT *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT });
+ vn_encode_VkDrmFormatModifierPropertiesList2EXT_pnext(enc, val->pNext);
+ vn_encode_VkDrmFormatModifierPropertiesList2EXT_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkDrmFormatModifierPropertiesList2EXT_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkDrmFormatModifierPropertiesList2EXT_self_partial_temp(struct vn_cs_decoder *dec, VkDrmFormatModifierPropertiesList2EXT *val)
+{
+ /* skip val->{sType,pNext} */
+ /* WA1 */ val->drmFormatModifierCount = vn_peek_array_size(dec);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->drmFormatModifierCount);
+ val->pDrmFormatModifierProperties = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pDrmFormatModifierProperties) * iter_count);
+ if (!val->pDrmFormatModifierProperties) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkDrmFormatModifierProperties2EXT_partial_temp(dec, &val->pDrmFormatModifierProperties[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ val->pDrmFormatModifierProperties = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkDrmFormatModifierPropertiesList2EXT_partial_temp(struct vn_cs_decoder *dec, VkDrmFormatModifierPropertiesList2EXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDrmFormatModifierPropertiesList2EXT_pnext_partial_temp(dec);
+ vn_decode_VkDrmFormatModifierPropertiesList2EXT_self_partial_temp(dec, val);
+}
+
/* struct VkFormatProperties2 chain */
static inline void
@@ -6145,6 +12489,18 @@ vn_encode_VkFormatProperties2_pnext(struct vn_cs_encoder *enc, const void *val)
vn_encode_VkFormatProperties2_pnext(enc, pnext->pNext);
vn_encode_VkDrmFormatModifierPropertiesListEXT_self(enc, (const VkDrmFormatModifierPropertiesListEXT *)pnext);
return;
+ case VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkFormatProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkFormatProperties3_self(enc, (const VkFormatProperties3 *)pnext);
+ return;
+ case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkFormatProperties2_pnext(enc, pnext->pNext);
+ vn_encode_VkDrmFormatModifierPropertiesList2EXT_self(enc, (const VkDrmFormatModifierPropertiesList2EXT *)pnext);
+ return;
default:
/* ignore unknown/unsupported struct */
break;
@@ -6190,6 +12546,22 @@ vn_decode_VkFormatProperties2_pnext_partial_temp(struct vn_cs_decoder *dec)
vn_decode_VkDrmFormatModifierPropertiesListEXT_self_partial_temp(dec, (VkDrmFormatModifierPropertiesListEXT *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkFormatProperties3));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkFormatProperties2_pnext_partial_temp(dec);
+ vn_decode_VkFormatProperties3_self_partial_temp(dec, (VkFormatProperties3 *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkDrmFormatModifierPropertiesList2EXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkFormatProperties2_pnext_partial_temp(dec);
+ vn_decode_VkDrmFormatModifierPropertiesList2EXT_self_partial_temp(dec, (VkDrmFormatModifierPropertiesList2EXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -7358,10 +13730,10 @@ vn_decode_VkPhysicalDeviceGroupProperties_partial_temp(struct vn_cs_decoder *dec
vn_decode_VkPhysicalDeviceGroupProperties_self_partial_temp(dec, val);
}
-/* struct VkDeviceQueueInfo2 chain */
+/* struct VkDeviceQueueTimelineInfoMESA chain */
static inline void *
-vn_decode_VkDeviceQueueInfo2_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkDeviceQueueTimelineInfoMESA_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -7370,6 +13742,83 @@ vn_decode_VkDeviceQueueInfo2_pnext_temp(struct vn_cs_decoder *dec)
}
static inline void
+vn_decode_VkDeviceQueueTimelineInfoMESA_self_temp(struct vn_cs_decoder *dec, VkDeviceQueueTimelineInfoMESA *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->ringIdx);
+}
+
+static inline void
+vn_decode_VkDeviceQueueTimelineInfoMESA_temp(struct vn_cs_decoder *dec, VkDeviceQueueTimelineInfoMESA *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDeviceQueueTimelineInfoMESA_pnext_temp(dec);
+ vn_decode_VkDeviceQueueTimelineInfoMESA_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkDeviceQueueTimelineInfoMESA_handle_self(VkDeviceQueueTimelineInfoMESA *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->ringIdx */
+}
+
+static inline void
+vn_replace_VkDeviceQueueTimelineInfoMESA_handle(VkDeviceQueueTimelineInfoMESA *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA:
+ vn_replace_VkDeviceQueueTimelineInfoMESA_handle_self((VkDeviceQueueTimelineInfoMESA *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkDeviceQueueInfo2 chain */
+
+static inline void *
+vn_decode_VkDeviceQueueInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkDeviceQueueTimelineInfoMESA));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkDeviceQueueInfo2_pnext_temp(dec);
+ vn_decode_VkDeviceQueueTimelineInfoMESA_self_temp(dec, (VkDeviceQueueTimelineInfoMESA *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkDeviceQueueInfo2_self_temp(struct vn_cs_decoder *dec, VkDeviceQueueInfo2 *val)
{
/* skip val->{sType,pNext} */
@@ -7411,6 +13860,9 @@ vn_replace_VkDeviceQueueInfo2_handle(VkDeviceQueueInfo2 *val)
case VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2:
vn_replace_VkDeviceQueueInfo2_handle_self((VkDeviceQueueInfo2 *)pnext);
break;
+ case VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA:
+ vn_replace_VkDeviceQueueTimelineInfoMESA_handle_self((VkDeviceQueueTimelineInfoMESA *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -7419,6 +13871,129 @@ vn_replace_VkDeviceQueueInfo2_handle(VkDeviceQueueInfo2 *val)
} while (pnext);
}
+/* struct VkCalibratedTimestampInfoEXT chain */
+
+static inline void *
+vn_decode_VkCalibratedTimestampInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkCalibratedTimestampInfoEXT_self_temp(struct vn_cs_decoder *dec, VkCalibratedTimestampInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkTimeDomainEXT(dec, &val->timeDomain);
+}
+
+static inline void
+vn_decode_VkCalibratedTimestampInfoEXT_temp(struct vn_cs_decoder *dec, VkCalibratedTimestampInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCalibratedTimestampInfoEXT_pnext_temp(dec);
+ vn_decode_VkCalibratedTimestampInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCalibratedTimestampInfoEXT_handle_self(VkCalibratedTimestampInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->timeDomain */
+}
+
+static inline void
+vn_replace_VkCalibratedTimestampInfoEXT_handle(VkCalibratedTimestampInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT:
+ vn_replace_VkCalibratedTimestampInfoEXT_handle_self((VkCalibratedTimestampInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPhysicalDeviceToolProperties chain */
+
+static inline void
+vn_encode_VkPhysicalDeviceToolProperties_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceToolProperties_self(struct vn_cs_encoder *enc, const VkPhysicalDeviceToolProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_array_size(enc, VK_MAX_EXTENSION_NAME_SIZE);
+ vn_encode_char_array(enc, val->name, VK_MAX_EXTENSION_NAME_SIZE);
+ vn_encode_array_size(enc, VK_MAX_EXTENSION_NAME_SIZE);
+ vn_encode_char_array(enc, val->version, VK_MAX_EXTENSION_NAME_SIZE);
+ vn_encode_VkFlags(enc, &val->purposes);
+ vn_encode_array_size(enc, VK_MAX_DESCRIPTION_SIZE);
+ vn_encode_char_array(enc, val->description, VK_MAX_DESCRIPTION_SIZE);
+ vn_encode_array_size(enc, VK_MAX_EXTENSION_NAME_SIZE);
+ vn_encode_char_array(enc, val->layer, VK_MAX_EXTENSION_NAME_SIZE);
+}
+
+static inline void
+vn_encode_VkPhysicalDeviceToolProperties(struct vn_cs_encoder *enc, const VkPhysicalDeviceToolProperties *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES });
+ vn_encode_VkPhysicalDeviceToolProperties_pnext(enc, val->pNext);
+ vn_encode_VkPhysicalDeviceToolProperties_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkPhysicalDeviceToolProperties_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceToolProperties_self_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceToolProperties *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->name */
+ /* skip val->version */
+ /* skip val->purposes */
+ /* skip val->description */
+ /* skip val->layer */
+}
+
+static inline void
+vn_decode_VkPhysicalDeviceToolProperties_partial_temp(struct vn_cs_decoder *dec, VkPhysicalDeviceToolProperties *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPhysicalDeviceToolProperties_pnext_partial_temp(dec);
+ vn_decode_VkPhysicalDeviceToolProperties_self_partial_temp(dec, val);
+}
+
static inline void vn_decode_vkEnumeratePhysicalDevices_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkEnumeratePhysicalDevices *args)
{
vn_decode_VkInstance_lookup(dec, &args->instance);
@@ -8457,6 +15032,158 @@ static inline void vn_encode_vkGetDeviceQueue2_reply(struct vn_cs_encoder *enc,
vn_encode_VkQueue(enc, args->pQueue);
}
+static inline void vn_decode_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT *args)
+{
+ vn_decode_VkPhysicalDevice_lookup(dec, &args->physicalDevice);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pTimeDomainCount = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pTimeDomainCount));
+ if (!args->pTimeDomainCount) return;
+ vn_decode_uint32_t(dec, args->pTimeDomainCount);
+ } else {
+ args->pTimeDomainCount = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, (args->pTimeDomainCount ? *args->pTimeDomainCount : 0));
+ args->pTimeDomains = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pTimeDomains) * array_size);
+ if (!args->pTimeDomains) return;
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pTimeDomains = NULL;
+ }
+}
+
+static inline void vn_replace_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_args_handle(struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT *args)
+{
+ vn_replace_VkPhysicalDevice_handle(&args->physicalDevice);
+ /* skip args->pTimeDomainCount */
+ /* skip args->pTimeDomains */
+}
+
+static inline void vn_encode_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->physicalDevice */
+ if (vn_encode_simple_pointer(enc, args->pTimeDomainCount))
+ vn_encode_uint32_t(enc, args->pTimeDomainCount);
+ if (args->pTimeDomains) {
+ vn_encode_array_size(enc, (args->pTimeDomainCount ? *args->pTimeDomainCount : 0));
+ vn_encode_VkTimeDomainEXT_array(enc, args->pTimeDomains, (args->pTimeDomainCount ? *args->pTimeDomainCount : 0));
+ } else {
+ vn_encode_array_size(enc, 0);
+ }
+}
+
+static inline void vn_decode_vkGetCalibratedTimestampsEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetCalibratedTimestampsEXT *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_uint32_t(dec, &args->timestampCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->timestampCount);
+ args->pTimestampInfos = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pTimestampInfos) * iter_count);
+ if (!args->pTimestampInfos) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkCalibratedTimestampInfoEXT_temp(dec, &((VkCalibratedTimestampInfoEXT *)args->pTimestampInfos)[i]);
+ } else {
+ vn_decode_array_size(dec, args->timestampCount);
+ args->pTimestampInfos = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, args->timestampCount);
+ args->pTimestamps = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pTimestamps) * array_size);
+ if (!args->pTimestamps) return;
+ } else {
+ vn_decode_array_size(dec, args->timestampCount);
+ args->pTimestamps = NULL;
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ args->pMaxDeviation = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pMaxDeviation));
+ if (!args->pMaxDeviation) return;
+ } else {
+ args->pMaxDeviation = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkGetCalibratedTimestampsEXT_args_handle(struct vn_command_vkGetCalibratedTimestampsEXT *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ /* skip args->timestampCount */
+ if (args->pTimestampInfos) {
+ for (uint32_t i = 0; i < args->timestampCount; i++)
+ vn_replace_VkCalibratedTimestampInfoEXT_handle(&((VkCalibratedTimestampInfoEXT *)args->pTimestampInfos)[i]);
+ }
+ /* skip args->pTimestamps */
+ /* skip args->pMaxDeviation */
+}
+
+static inline void vn_encode_vkGetCalibratedTimestampsEXT_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetCalibratedTimestampsEXT *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetCalibratedTimestampsEXT_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->device */
+ /* skip args->timestampCount */
+ /* skip args->pTimestampInfos */
+ if (args->pTimestamps) {
+ vn_encode_array_size(enc, args->timestampCount);
+ vn_encode_uint64_t_array(enc, args->pTimestamps, args->timestampCount);
+ } else {
+ vn_encode_array_size(enc, 0);
+ }
+ if (vn_encode_simple_pointer(enc, args->pMaxDeviation))
+ vn_encode_uint64_t(enc, args->pMaxDeviation);
+}
+
+static inline void vn_decode_vkGetPhysicalDeviceToolProperties_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetPhysicalDeviceToolProperties *args)
+{
+ vn_decode_VkPhysicalDevice_lookup(dec, &args->physicalDevice);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pToolCount = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pToolCount));
+ if (!args->pToolCount) return;
+ vn_decode_uint32_t(dec, args->pToolCount);
+ } else {
+ args->pToolCount = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, (args->pToolCount ? *args->pToolCount : 0));
+ args->pToolProperties = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pToolProperties) * iter_count);
+ if (!args->pToolProperties) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkPhysicalDeviceToolProperties_partial_temp(dec, &args->pToolProperties[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pToolProperties = NULL;
+ }
+}
+
+static inline void vn_replace_vkGetPhysicalDeviceToolProperties_args_handle(struct vn_command_vkGetPhysicalDeviceToolProperties *args)
+{
+ vn_replace_VkPhysicalDevice_handle(&args->physicalDevice);
+ /* skip args->pToolCount */
+ /* skip args->pToolProperties */
+}
+
+static inline void vn_encode_vkGetPhysicalDeviceToolProperties_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetPhysicalDeviceToolProperties *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetPhysicalDeviceToolProperties_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->physicalDevice */
+ if (vn_encode_simple_pointer(enc, args->pToolCount))
+ vn_encode_uint32_t(enc, args->pToolCount);
+ if (args->pToolProperties) {
+ vn_encode_array_size(enc, (args->pToolCount ? *args->pToolCount : 0));
+ for (uint32_t i = 0; i < (args->pToolCount ? *args->pToolCount : 0); i++)
+ vn_encode_VkPhysicalDeviceToolProperties(enc, &args->pToolProperties[i]);
+ } else {
+ vn_encode_array_size(enc, 0);
+ }
+}
+
static inline void vn_dispatch_vkEnumeratePhysicalDevices(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkEnumeratePhysicalDevices args;
@@ -9164,6 +15891,93 @@ static inline void vn_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context *ctx
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT args;
+
+ if (!ctx->dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_args_temp(ctx->decoder, &args);
+ if (!args.physicalDevice) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkGetCalibratedTimestampsEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetCalibratedTimestampsEXT args;
+
+ if (!ctx->dispatch_vkGetCalibratedTimestampsEXT) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetCalibratedTimestampsEXT_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetCalibratedTimestampsEXT(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkGetCalibratedTimestampsEXT returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetCalibratedTimestampsEXT_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkGetPhysicalDeviceToolProperties(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetPhysicalDeviceToolProperties args;
+
+ if (!ctx->dispatch_vkGetPhysicalDeviceToolProperties) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetPhysicalDeviceToolProperties_args_temp(ctx->decoder, &args);
+ if (!args.physicalDevice) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetPhysicalDeviceToolProperties(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkGetPhysicalDeviceToolProperties returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetPhysicalDeviceToolProperties_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_DEVICE_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_device_memory.h b/src/venus/venus-protocol/vn_protocol_renderer_device_memory.h
index d7d8c071..e549f09f 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_device_memory.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_device_memory.h
@@ -19,6 +19,8 @@
*
* VkImportMemoryFdInfoKHR
* vkMapMemory
+ * vkGetMemoryFdKHR
+ * vkGetMemoryFdPropertiesKHR
*/
/* struct VkExportMemoryAllocateInfo chain */
@@ -440,31 +442,6 @@ vn_replace_VkMemoryAllocateInfo_handle(VkMemoryAllocateInfo *val)
/* struct VkMappedMemoryRange chain */
-static inline void
-vn_encode_VkMappedMemoryRange_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkMappedMemoryRange_self(struct vn_cs_encoder *enc, const VkMappedMemoryRange *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_VkDeviceMemory(enc, &val->memory);
- vn_encode_VkDeviceSize(enc, &val->offset);
- vn_encode_VkDeviceSize(enc, &val->size);
-}
-
-static inline void
-vn_encode_VkMappedMemoryRange(struct vn_cs_encoder *enc, const VkMappedMemoryRange *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE });
- vn_encode_VkMappedMemoryRange_pnext(enc, val->pNext);
- vn_encode_VkMappedMemoryRange_self(enc, val);
-}
-
static inline void *
vn_decode_VkMappedMemoryRange_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -526,29 +503,6 @@ vn_replace_VkMappedMemoryRange_handle(VkMappedMemoryRange *val)
/* struct VkDeviceMemoryOpaqueCaptureAddressInfo chain */
-static inline void
-vn_encode_VkDeviceMemoryOpaqueCaptureAddressInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkDeviceMemoryOpaqueCaptureAddressInfo_self(struct vn_cs_encoder *enc, const VkDeviceMemoryOpaqueCaptureAddressInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_VkDeviceMemory(enc, &val->memory);
-}
-
-static inline void
-vn_encode_VkDeviceMemoryOpaqueCaptureAddressInfo(struct vn_cs_encoder *enc, const VkDeviceMemoryOpaqueCaptureAddressInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO });
- vn_encode_VkDeviceMemoryOpaqueCaptureAddressInfo_pnext(enc, val->pNext);
- vn_encode_VkDeviceMemoryOpaqueCaptureAddressInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkDeviceMemoryOpaqueCaptureAddressInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -604,6 +558,150 @@ vn_replace_VkDeviceMemoryOpaqueCaptureAddressInfo_handle(VkDeviceMemoryOpaqueCap
} while (pnext);
}
+/* struct VkMemoryResourceAllocationSizeProperties100000MESA chain */
+
+static inline void
+vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ /* no known/supported struct */
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_self(struct vn_cs_encoder *enc, const VkMemoryResourceAllocationSizeProperties100000MESA *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint64_t(enc, &val->allocationSize);
+}
+
+static inline void
+vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA(struct vn_cs_encoder *enc, const VkMemoryResourceAllocationSizeProperties100000MESA *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA });
+ vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext(enc, val->pNext);
+ vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_self_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourceAllocationSizeProperties100000MESA *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->allocationSize */
+}
+
+static inline void
+vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourceAllocationSizeProperties100000MESA *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext_partial_temp(dec);
+ vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_self_partial_temp(dec, val);
+}
+
+/* struct VkMemoryResourcePropertiesMESA chain */
+
+static inline void
+vn_encode_VkMemoryResourcePropertiesMESA_pnext(struct vn_cs_encoder *enc, const void *val)
+{
+ const VkBaseInStructure *pnext = val;
+
+ while (pnext) {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA:
+ vn_encode_simple_pointer(enc, pnext);
+ vn_encode_VkStructureType(enc, &pnext->sType);
+ vn_encode_VkMemoryResourcePropertiesMESA_pnext(enc, pnext->pNext);
+ vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_self(enc, (const VkMemoryResourceAllocationSizeProperties100000MESA *)pnext);
+ return;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ }
+
+ vn_encode_simple_pointer(enc, NULL);
+}
+
+static inline void
+vn_encode_VkMemoryResourcePropertiesMESA_self(struct vn_cs_encoder *enc, const VkMemoryResourcePropertiesMESA *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_encode_uint32_t(enc, &val->memoryTypeBits);
+}
+
+static inline void
+vn_encode_VkMemoryResourcePropertiesMESA(struct vn_cs_encoder *enc, const VkMemoryResourcePropertiesMESA *val)
+{
+ assert(val->sType == VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA);
+ vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA });
+ vn_encode_VkMemoryResourcePropertiesMESA_pnext(enc, val->pNext);
+ vn_encode_VkMemoryResourcePropertiesMESA_self(enc, val);
+}
+
+static inline void *
+vn_decode_VkMemoryResourcePropertiesMESA_pnext_partial_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkMemoryResourceAllocationSizeProperties100000MESA));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkMemoryResourcePropertiesMESA_pnext_partial_temp(dec);
+ vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_self_partial_temp(dec, (VkMemoryResourceAllocationSizeProperties100000MESA *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
+vn_decode_VkMemoryResourcePropertiesMESA_self_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourcePropertiesMESA *val)
+{
+ /* skip val->{sType,pNext} */
+ /* skip val->memoryTypeBits */
+}
+
+static inline void
+vn_decode_VkMemoryResourcePropertiesMESA_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourcePropertiesMESA *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkMemoryResourcePropertiesMESA_pnext_partial_temp(dec);
+ vn_decode_VkMemoryResourcePropertiesMESA_self_partial_temp(dec, val);
+}
+
static inline void vn_decode_vkAllocateMemory_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkAllocateMemory *args)
{
vn_decode_VkDevice_lookup(dec, &args->device);
@@ -829,6 +927,38 @@ static inline void vn_encode_vkGetDeviceMemoryOpaqueCaptureAddress_reply(struct
/* skip args->pInfo */
}
+static inline void vn_decode_vkGetMemoryResourcePropertiesMESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_uint32_t(dec, &args->resourceId);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pMemoryResourceProperties = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pMemoryResourceProperties));
+ if (!args->pMemoryResourceProperties) return;
+ vn_decode_VkMemoryResourcePropertiesMESA_partial_temp(dec, args->pMemoryResourceProperties);
+ } else {
+ args->pMemoryResourceProperties = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkGetMemoryResourcePropertiesMESA_args_handle(struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ /* skip args->resourceId */
+ /* skip args->pMemoryResourceProperties */
+}
+
+static inline void vn_encode_vkGetMemoryResourcePropertiesMESA_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetMemoryResourcePropertiesMESA_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->device */
+ /* skip args->resourceId */
+ if (vn_encode_simple_pointer(enc, args->pMemoryResourceProperties))
+ vn_encode_VkMemoryResourcePropertiesMESA(enc, args->pMemoryResourceProperties);
+}
+
static inline void vn_dispatch_vkAllocateMemory(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkAllocateMemory args;
@@ -1016,6 +1146,35 @@ static inline void vn_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_d
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkGetMemoryResourcePropertiesMESA(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetMemoryResourcePropertiesMESA args;
+
+ if (!ctx->dispatch_vkGetMemoryResourcePropertiesMESA) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetMemoryResourcePropertiesMESA_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetMemoryResourcePropertiesMESA(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkGetMemoryResourcePropertiesMESA returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetMemoryResourcePropertiesMESA_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_DEVICE_MEMORY_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_dispatches.h b/src/venus/venus-protocol/vn_protocol_renderer_dispatches.h
index d45fdc4e..725a0e79 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_dispatches.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_dispatches.h
@@ -38,6 +38,7 @@
#include "vn_protocol_renderer_pipeline_cache.h"
#include "vn_protocol_renderer_command_pool.h"
#include "vn_protocol_renderer_command_buffer.h"
+#include "vn_protocol_renderer_private_data_slot.h"
static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
{
@@ -149,6 +150,8 @@ static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
case VK_COMMAND_TYPE_vkCmdBindVertexBuffers_EXT: return "vkCmdBindVertexBuffers";
case VK_COMMAND_TYPE_vkCmdDraw_EXT: return "vkCmdDraw";
case VK_COMMAND_TYPE_vkCmdDrawIndexed_EXT: return "vkCmdDrawIndexed";
+ case VK_COMMAND_TYPE_vkCmdDrawMultiEXT_EXT: return "vkCmdDrawMultiEXT";
+ case VK_COMMAND_TYPE_vkCmdDrawMultiIndexedEXT_EXT: return "vkCmdDrawMultiIndexedEXT";
case VK_COMMAND_TYPE_vkCmdDrawIndirect_EXT: return "vkCmdDrawIndirect";
case VK_COMMAND_TYPE_vkCmdDrawIndexedIndirect_EXT: return "vkCmdDrawIndexedIndirect";
case VK_COMMAND_TYPE_vkCmdDispatch_EXT: return "vkCmdDispatch";
@@ -170,6 +173,8 @@ static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
case VK_COMMAND_TYPE_vkCmdPipelineBarrier_EXT: return "vkCmdPipelineBarrier";
case VK_COMMAND_TYPE_vkCmdBeginQuery_EXT: return "vkCmdBeginQuery";
case VK_COMMAND_TYPE_vkCmdEndQuery_EXT: return "vkCmdEndQuery";
+ case VK_COMMAND_TYPE_vkCmdBeginConditionalRenderingEXT_EXT: return "vkCmdBeginConditionalRenderingEXT";
+ case VK_COMMAND_TYPE_vkCmdEndConditionalRenderingEXT_EXT: return "vkCmdEndConditionalRenderingEXT";
case VK_COMMAND_TYPE_vkCmdResetQueryPool_EXT: return "vkCmdResetQueryPool";
case VK_COMMAND_TYPE_vkCmdWriteTimestamp_EXT: return "vkCmdWriteTimestamp";
case VK_COMMAND_TYPE_vkCmdCopyQueryPoolResults_EXT: return "vkCmdCopyQueryPoolResults";
@@ -185,6 +190,7 @@ static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
case VK_COMMAND_TYPE_vkGetPhysicalDeviceQueueFamilyProperties2_EXT: return "vkGetPhysicalDeviceQueueFamilyProperties2";
case VK_COMMAND_TYPE_vkGetPhysicalDeviceMemoryProperties2_EXT: return "vkGetPhysicalDeviceMemoryProperties2";
case VK_COMMAND_TYPE_vkGetPhysicalDeviceSparseImageFormatProperties2_EXT: return "vkGetPhysicalDeviceSparseImageFormatProperties2";
+ case VK_COMMAND_TYPE_vkCmdPushDescriptorSetKHR_EXT: return "vkCmdPushDescriptorSetKHR";
case VK_COMMAND_TYPE_vkTrimCommandPool_EXT: return "vkTrimCommandPool";
case VK_COMMAND_TYPE_vkGetPhysicalDeviceExternalBufferProperties_EXT: return "vkGetPhysicalDeviceExternalBufferProperties";
case VK_COMMAND_TYPE_vkGetPhysicalDeviceExternalSemaphoreProperties_EXT: return "vkGetPhysicalDeviceExternalSemaphoreProperties";
@@ -200,10 +206,15 @@ static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
case VK_COMMAND_TYPE_vkGetBufferMemoryRequirements2_EXT: return "vkGetBufferMemoryRequirements2";
case VK_COMMAND_TYPE_vkGetImageMemoryRequirements2_EXT: return "vkGetImageMemoryRequirements2";
case VK_COMMAND_TYPE_vkGetImageSparseMemoryRequirements2_EXT: return "vkGetImageSparseMemoryRequirements2";
+ case VK_COMMAND_TYPE_vkGetDeviceBufferMemoryRequirements_EXT: return "vkGetDeviceBufferMemoryRequirements";
+ case VK_COMMAND_TYPE_vkGetDeviceImageMemoryRequirements_EXT: return "vkGetDeviceImageMemoryRequirements";
+ case VK_COMMAND_TYPE_vkGetDeviceImageSparseMemoryRequirements_EXT: return "vkGetDeviceImageSparseMemoryRequirements";
case VK_COMMAND_TYPE_vkCreateSamplerYcbcrConversion_EXT: return "vkCreateSamplerYcbcrConversion";
case VK_COMMAND_TYPE_vkDestroySamplerYcbcrConversion_EXT: return "vkDestroySamplerYcbcrConversion";
case VK_COMMAND_TYPE_vkGetDeviceQueue2_EXT: return "vkGetDeviceQueue2";
case VK_COMMAND_TYPE_vkGetDescriptorSetLayoutSupport_EXT: return "vkGetDescriptorSetLayoutSupport";
+ case VK_COMMAND_TYPE_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_EXT: return "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT";
+ case VK_COMMAND_TYPE_vkGetCalibratedTimestampsEXT_EXT: return "vkGetCalibratedTimestampsEXT";
case VK_COMMAND_TYPE_vkCreateRenderPass2_EXT: return "vkCreateRenderPass2";
case VK_COMMAND_TYPE_vkCmdBeginRenderPass2_EXT: return "vkCmdBeginRenderPass2";
case VK_COMMAND_TYPE_vkCmdNextSubpass2_EXT: return "vkCmdNextSubpass2";
@@ -223,6 +234,43 @@ static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
case VK_COMMAND_TYPE_vkGetBufferOpaqueCaptureAddress_EXT: return "vkGetBufferOpaqueCaptureAddress";
case VK_COMMAND_TYPE_vkGetBufferDeviceAddress_EXT: return "vkGetBufferDeviceAddress";
case VK_COMMAND_TYPE_vkGetDeviceMemoryOpaqueCaptureAddress_EXT: return "vkGetDeviceMemoryOpaqueCaptureAddress";
+ case VK_COMMAND_TYPE_vkCmdSetLineStippleEXT_EXT: return "vkCmdSetLineStippleEXT";
+ case VK_COMMAND_TYPE_vkGetPhysicalDeviceToolProperties_EXT: return "vkGetPhysicalDeviceToolProperties";
+ case VK_COMMAND_TYPE_vkCmdSetCullMode_EXT: return "vkCmdSetCullMode";
+ case VK_COMMAND_TYPE_vkCmdSetFrontFace_EXT: return "vkCmdSetFrontFace";
+ case VK_COMMAND_TYPE_vkCmdSetPrimitiveTopology_EXT: return "vkCmdSetPrimitiveTopology";
+ case VK_COMMAND_TYPE_vkCmdSetViewportWithCount_EXT: return "vkCmdSetViewportWithCount";
+ case VK_COMMAND_TYPE_vkCmdSetScissorWithCount_EXT: return "vkCmdSetScissorWithCount";
+ case VK_COMMAND_TYPE_vkCmdBindVertexBuffers2_EXT: return "vkCmdBindVertexBuffers2";
+ case VK_COMMAND_TYPE_vkCmdSetDepthTestEnable_EXT: return "vkCmdSetDepthTestEnable";
+ case VK_COMMAND_TYPE_vkCmdSetDepthWriteEnable_EXT: return "vkCmdSetDepthWriteEnable";
+ case VK_COMMAND_TYPE_vkCmdSetDepthCompareOp_EXT: return "vkCmdSetDepthCompareOp";
+ case VK_COMMAND_TYPE_vkCmdSetDepthBoundsTestEnable_EXT: return "vkCmdSetDepthBoundsTestEnable";
+ case VK_COMMAND_TYPE_vkCmdSetStencilTestEnable_EXT: return "vkCmdSetStencilTestEnable";
+ case VK_COMMAND_TYPE_vkCmdSetStencilOp_EXT: return "vkCmdSetStencilOp";
+ case VK_COMMAND_TYPE_vkCmdSetPatchControlPointsEXT_EXT: return "vkCmdSetPatchControlPointsEXT";
+ case VK_COMMAND_TYPE_vkCmdSetRasterizerDiscardEnable_EXT: return "vkCmdSetRasterizerDiscardEnable";
+ case VK_COMMAND_TYPE_vkCmdSetDepthBiasEnable_EXT: return "vkCmdSetDepthBiasEnable";
+ case VK_COMMAND_TYPE_vkCmdSetLogicOpEXT_EXT: return "vkCmdSetLogicOpEXT";
+ case VK_COMMAND_TYPE_vkCmdSetPrimitiveRestartEnable_EXT: return "vkCmdSetPrimitiveRestartEnable";
+ case VK_COMMAND_TYPE_vkCreatePrivateDataSlot_EXT: return "vkCreatePrivateDataSlot";
+ case VK_COMMAND_TYPE_vkDestroyPrivateDataSlot_EXT: return "vkDestroyPrivateDataSlot";
+ case VK_COMMAND_TYPE_vkSetPrivateData_EXT: return "vkSetPrivateData";
+ case VK_COMMAND_TYPE_vkGetPrivateData_EXT: return "vkGetPrivateData";
+ case VK_COMMAND_TYPE_vkCmdCopyBuffer2_EXT: return "vkCmdCopyBuffer2";
+ case VK_COMMAND_TYPE_vkCmdCopyImage2_EXT: return "vkCmdCopyImage2";
+ case VK_COMMAND_TYPE_vkCmdBlitImage2_EXT: return "vkCmdBlitImage2";
+ case VK_COMMAND_TYPE_vkCmdCopyBufferToImage2_EXT: return "vkCmdCopyBufferToImage2";
+ case VK_COMMAND_TYPE_vkCmdCopyImageToBuffer2_EXT: return "vkCmdCopyImageToBuffer2";
+ case VK_COMMAND_TYPE_vkCmdResolveImage2_EXT: return "vkCmdResolveImage2";
+ case VK_COMMAND_TYPE_vkCmdSetEvent2_EXT: return "vkCmdSetEvent2";
+ case VK_COMMAND_TYPE_vkCmdResetEvent2_EXT: return "vkCmdResetEvent2";
+ case VK_COMMAND_TYPE_vkCmdWaitEvents2_EXT: return "vkCmdWaitEvents2";
+ case VK_COMMAND_TYPE_vkCmdPipelineBarrier2_EXT: return "vkCmdPipelineBarrier2";
+ case VK_COMMAND_TYPE_vkQueueSubmit2_EXT: return "vkQueueSubmit2";
+ case VK_COMMAND_TYPE_vkCmdWriteTimestamp2_EXT: return "vkCmdWriteTimestamp2";
+ case VK_COMMAND_TYPE_vkCmdBeginRendering_EXT: return "vkCmdBeginRendering";
+ case VK_COMMAND_TYPE_vkCmdEndRendering_EXT: return "vkCmdEndRendering";
case VK_COMMAND_TYPE_vkSetReplyCommandStreamMESA_EXT: return "vkSetReplyCommandStreamMESA";
case VK_COMMAND_TYPE_vkSeekReplyCommandStreamMESA_EXT: return "vkSeekReplyCommandStreamMESA";
case VK_COMMAND_TYPE_vkExecuteCommandStreamsMESA_EXT: return "vkExecuteCommandStreamsMESA";
@@ -231,18 +279,26 @@ static inline const char *vn_dispatch_command_name(VkCommandTypeEXT type)
case VK_COMMAND_TYPE_vkNotifyRingMESA_EXT: return "vkNotifyRingMESA";
case VK_COMMAND_TYPE_vkWriteRingExtraMESA_EXT: return "vkWriteRingExtraMESA";
case VK_COMMAND_TYPE_vkGetMemoryResourcePropertiesMESA_EXT: return "vkGetMemoryResourcePropertiesMESA";
+ case VK_COMMAND_TYPE_vkResetFenceResource100000MESA_EXT: return "vkResetFenceResource100000MESA";
+ case VK_COMMAND_TYPE_vkWaitSemaphoreResource100000MESA_EXT: return "vkWaitSemaphoreResource100000MESA";
+ case VK_COMMAND_TYPE_vkImportSemaphoreResource100000MESA_EXT: return "vkImportSemaphoreResource100000MESA";
case VK_COMMAND_TYPE_vkGetVenusExperimentalFeatureData100000MESA_EXT: return "vkGetVenusExperimentalFeatureData100000MESA";
case VK_COMMAND_TYPE_vkGetDeviceProcAddr_EXT: return "vkGetDeviceProcAddr";
case VK_COMMAND_TYPE_vkGetInstanceProcAddr_EXT: return "vkGetInstanceProcAddr";
case VK_COMMAND_TYPE_vkMapMemory_EXT: return "vkMapMemory";
case VK_COMMAND_TYPE_vkGetMemoryFdKHR_EXT: return "vkGetMemoryFdKHR";
case VK_COMMAND_TYPE_vkGetMemoryFdPropertiesKHR_EXT: return "vkGetMemoryFdPropertiesKHR";
+ case VK_COMMAND_TYPE_vkGetSemaphoreFdKHR_EXT: return "vkGetSemaphoreFdKHR";
+ case VK_COMMAND_TYPE_vkImportSemaphoreFdKHR_EXT: return "vkImportSemaphoreFdKHR";
+ case VK_COMMAND_TYPE_vkGetFenceFdKHR_EXT: return "vkGetFenceFdKHR";
+ case VK_COMMAND_TYPE_vkImportFenceFdKHR_EXT: return "vkImportFenceFdKHR";
case VK_COMMAND_TYPE_vkUpdateDescriptorSetWithTemplate_EXT: return "vkUpdateDescriptorSetWithTemplate";
+ case VK_COMMAND_TYPE_vkCmdPushDescriptorSetWithTemplateKHR_EXT: return "vkCmdPushDescriptorSetWithTemplateKHR";
default: return "unknown";
}
}
-static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags) = {
+static void (*const vn_dispatch_table[251])(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags) = {
[VK_COMMAND_TYPE_vkCreateInstance_EXT] = vn_dispatch_vkCreateInstance,
[VK_COMMAND_TYPE_vkDestroyInstance_EXT] = vn_dispatch_vkDestroyInstance,
[VK_COMMAND_TYPE_vkEnumeratePhysicalDevices_EXT] = vn_dispatch_vkEnumeratePhysicalDevices,
@@ -350,6 +406,8 @@ static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkC
[VK_COMMAND_TYPE_vkCmdBindVertexBuffers_EXT] = vn_dispatch_vkCmdBindVertexBuffers,
[VK_COMMAND_TYPE_vkCmdDraw_EXT] = vn_dispatch_vkCmdDraw,
[VK_COMMAND_TYPE_vkCmdDrawIndexed_EXT] = vn_dispatch_vkCmdDrawIndexed,
+ [VK_COMMAND_TYPE_vkCmdDrawMultiEXT_EXT] = vn_dispatch_vkCmdDrawMultiEXT,
+ [VK_COMMAND_TYPE_vkCmdDrawMultiIndexedEXT_EXT] = vn_dispatch_vkCmdDrawMultiIndexedEXT,
[VK_COMMAND_TYPE_vkCmdDrawIndirect_EXT] = vn_dispatch_vkCmdDrawIndirect,
[VK_COMMAND_TYPE_vkCmdDrawIndexedIndirect_EXT] = vn_dispatch_vkCmdDrawIndexedIndirect,
[VK_COMMAND_TYPE_vkCmdDispatch_EXT] = vn_dispatch_vkCmdDispatch,
@@ -371,6 +429,8 @@ static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkC
[VK_COMMAND_TYPE_vkCmdPipelineBarrier_EXT] = vn_dispatch_vkCmdPipelineBarrier,
[VK_COMMAND_TYPE_vkCmdBeginQuery_EXT] = vn_dispatch_vkCmdBeginQuery,
[VK_COMMAND_TYPE_vkCmdEndQuery_EXT] = vn_dispatch_vkCmdEndQuery,
+ [VK_COMMAND_TYPE_vkCmdBeginConditionalRenderingEXT_EXT] = vn_dispatch_vkCmdBeginConditionalRenderingEXT,
+ [VK_COMMAND_TYPE_vkCmdEndConditionalRenderingEXT_EXT] = vn_dispatch_vkCmdEndConditionalRenderingEXT,
[VK_COMMAND_TYPE_vkCmdResetQueryPool_EXT] = vn_dispatch_vkCmdResetQueryPool,
[VK_COMMAND_TYPE_vkCmdWriteTimestamp_EXT] = vn_dispatch_vkCmdWriteTimestamp,
[VK_COMMAND_TYPE_vkCmdCopyQueryPoolResults_EXT] = vn_dispatch_vkCmdCopyQueryPoolResults,
@@ -386,6 +446,7 @@ static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkC
[VK_COMMAND_TYPE_vkGetPhysicalDeviceQueueFamilyProperties2_EXT] = vn_dispatch_vkGetPhysicalDeviceQueueFamilyProperties2,
[VK_COMMAND_TYPE_vkGetPhysicalDeviceMemoryProperties2_EXT] = vn_dispatch_vkGetPhysicalDeviceMemoryProperties2,
[VK_COMMAND_TYPE_vkGetPhysicalDeviceSparseImageFormatProperties2_EXT] = vn_dispatch_vkGetPhysicalDeviceSparseImageFormatProperties2,
+ [VK_COMMAND_TYPE_vkCmdPushDescriptorSetKHR_EXT] = vn_dispatch_vkCmdPushDescriptorSetKHR,
[VK_COMMAND_TYPE_vkTrimCommandPool_EXT] = vn_dispatch_vkTrimCommandPool,
[VK_COMMAND_TYPE_vkGetPhysicalDeviceExternalBufferProperties_EXT] = vn_dispatch_vkGetPhysicalDeviceExternalBufferProperties,
[VK_COMMAND_TYPE_vkGetPhysicalDeviceExternalSemaphoreProperties_EXT] = vn_dispatch_vkGetPhysicalDeviceExternalSemaphoreProperties,
@@ -401,10 +462,15 @@ static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkC
[VK_COMMAND_TYPE_vkGetBufferMemoryRequirements2_EXT] = vn_dispatch_vkGetBufferMemoryRequirements2,
[VK_COMMAND_TYPE_vkGetImageMemoryRequirements2_EXT] = vn_dispatch_vkGetImageMemoryRequirements2,
[VK_COMMAND_TYPE_vkGetImageSparseMemoryRequirements2_EXT] = vn_dispatch_vkGetImageSparseMemoryRequirements2,
+ [VK_COMMAND_TYPE_vkGetDeviceBufferMemoryRequirements_EXT] = vn_dispatch_vkGetDeviceBufferMemoryRequirements,
+ [VK_COMMAND_TYPE_vkGetDeviceImageMemoryRequirements_EXT] = vn_dispatch_vkGetDeviceImageMemoryRequirements,
+ [VK_COMMAND_TYPE_vkGetDeviceImageSparseMemoryRequirements_EXT] = vn_dispatch_vkGetDeviceImageSparseMemoryRequirements,
[VK_COMMAND_TYPE_vkCreateSamplerYcbcrConversion_EXT] = vn_dispatch_vkCreateSamplerYcbcrConversion,
[VK_COMMAND_TYPE_vkDestroySamplerYcbcrConversion_EXT] = vn_dispatch_vkDestroySamplerYcbcrConversion,
[VK_COMMAND_TYPE_vkGetDeviceQueue2_EXT] = vn_dispatch_vkGetDeviceQueue2,
[VK_COMMAND_TYPE_vkGetDescriptorSetLayoutSupport_EXT] = vn_dispatch_vkGetDescriptorSetLayoutSupport,
+ [VK_COMMAND_TYPE_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_EXT] = vn_dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT,
+ [VK_COMMAND_TYPE_vkGetCalibratedTimestampsEXT_EXT] = vn_dispatch_vkGetCalibratedTimestampsEXT,
[VK_COMMAND_TYPE_vkCreateRenderPass2_EXT] = vn_dispatch_vkCreateRenderPass2,
[VK_COMMAND_TYPE_vkCmdBeginRenderPass2_EXT] = vn_dispatch_vkCmdBeginRenderPass2,
[VK_COMMAND_TYPE_vkCmdNextSubpass2_EXT] = vn_dispatch_vkCmdNextSubpass2,
@@ -424,6 +490,43 @@ static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkC
[VK_COMMAND_TYPE_vkGetBufferOpaqueCaptureAddress_EXT] = vn_dispatch_vkGetBufferOpaqueCaptureAddress,
[VK_COMMAND_TYPE_vkGetBufferDeviceAddress_EXT] = vn_dispatch_vkGetBufferDeviceAddress,
[VK_COMMAND_TYPE_vkGetDeviceMemoryOpaqueCaptureAddress_EXT] = vn_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress,
+ [VK_COMMAND_TYPE_vkCmdSetLineStippleEXT_EXT] = vn_dispatch_vkCmdSetLineStippleEXT,
+ [VK_COMMAND_TYPE_vkGetPhysicalDeviceToolProperties_EXT] = vn_dispatch_vkGetPhysicalDeviceToolProperties,
+ [VK_COMMAND_TYPE_vkCmdSetCullMode_EXT] = vn_dispatch_vkCmdSetCullMode,
+ [VK_COMMAND_TYPE_vkCmdSetFrontFace_EXT] = vn_dispatch_vkCmdSetFrontFace,
+ [VK_COMMAND_TYPE_vkCmdSetPrimitiveTopology_EXT] = vn_dispatch_vkCmdSetPrimitiveTopology,
+ [VK_COMMAND_TYPE_vkCmdSetViewportWithCount_EXT] = vn_dispatch_vkCmdSetViewportWithCount,
+ [VK_COMMAND_TYPE_vkCmdSetScissorWithCount_EXT] = vn_dispatch_vkCmdSetScissorWithCount,
+ [VK_COMMAND_TYPE_vkCmdBindVertexBuffers2_EXT] = vn_dispatch_vkCmdBindVertexBuffers2,
+ [VK_COMMAND_TYPE_vkCmdSetDepthTestEnable_EXT] = vn_dispatch_vkCmdSetDepthTestEnable,
+ [VK_COMMAND_TYPE_vkCmdSetDepthWriteEnable_EXT] = vn_dispatch_vkCmdSetDepthWriteEnable,
+ [VK_COMMAND_TYPE_vkCmdSetDepthCompareOp_EXT] = vn_dispatch_vkCmdSetDepthCompareOp,
+ [VK_COMMAND_TYPE_vkCmdSetDepthBoundsTestEnable_EXT] = vn_dispatch_vkCmdSetDepthBoundsTestEnable,
+ [VK_COMMAND_TYPE_vkCmdSetStencilTestEnable_EXT] = vn_dispatch_vkCmdSetStencilTestEnable,
+ [VK_COMMAND_TYPE_vkCmdSetStencilOp_EXT] = vn_dispatch_vkCmdSetStencilOp,
+ [VK_COMMAND_TYPE_vkCmdSetPatchControlPointsEXT_EXT] = vn_dispatch_vkCmdSetPatchControlPointsEXT,
+ [VK_COMMAND_TYPE_vkCmdSetRasterizerDiscardEnable_EXT] = vn_dispatch_vkCmdSetRasterizerDiscardEnable,
+ [VK_COMMAND_TYPE_vkCmdSetDepthBiasEnable_EXT] = vn_dispatch_vkCmdSetDepthBiasEnable,
+ [VK_COMMAND_TYPE_vkCmdSetLogicOpEXT_EXT] = vn_dispatch_vkCmdSetLogicOpEXT,
+ [VK_COMMAND_TYPE_vkCmdSetPrimitiveRestartEnable_EXT] = vn_dispatch_vkCmdSetPrimitiveRestartEnable,
+ [VK_COMMAND_TYPE_vkCreatePrivateDataSlot_EXT] = vn_dispatch_vkCreatePrivateDataSlot,
+ [VK_COMMAND_TYPE_vkDestroyPrivateDataSlot_EXT] = vn_dispatch_vkDestroyPrivateDataSlot,
+ [VK_COMMAND_TYPE_vkSetPrivateData_EXT] = vn_dispatch_vkSetPrivateData,
+ [VK_COMMAND_TYPE_vkGetPrivateData_EXT] = vn_dispatch_vkGetPrivateData,
+ [VK_COMMAND_TYPE_vkCmdCopyBuffer2_EXT] = vn_dispatch_vkCmdCopyBuffer2,
+ [VK_COMMAND_TYPE_vkCmdCopyImage2_EXT] = vn_dispatch_vkCmdCopyImage2,
+ [VK_COMMAND_TYPE_vkCmdBlitImage2_EXT] = vn_dispatch_vkCmdBlitImage2,
+ [VK_COMMAND_TYPE_vkCmdCopyBufferToImage2_EXT] = vn_dispatch_vkCmdCopyBufferToImage2,
+ [VK_COMMAND_TYPE_vkCmdCopyImageToBuffer2_EXT] = vn_dispatch_vkCmdCopyImageToBuffer2,
+ [VK_COMMAND_TYPE_vkCmdResolveImage2_EXT] = vn_dispatch_vkCmdResolveImage2,
+ [VK_COMMAND_TYPE_vkCmdSetEvent2_EXT] = vn_dispatch_vkCmdSetEvent2,
+ [VK_COMMAND_TYPE_vkCmdResetEvent2_EXT] = vn_dispatch_vkCmdResetEvent2,
+ [VK_COMMAND_TYPE_vkCmdWaitEvents2_EXT] = vn_dispatch_vkCmdWaitEvents2,
+ [VK_COMMAND_TYPE_vkCmdPipelineBarrier2_EXT] = vn_dispatch_vkCmdPipelineBarrier2,
+ [VK_COMMAND_TYPE_vkQueueSubmit2_EXT] = vn_dispatch_vkQueueSubmit2,
+ [VK_COMMAND_TYPE_vkCmdWriteTimestamp2_EXT] = vn_dispatch_vkCmdWriteTimestamp2,
+ [VK_COMMAND_TYPE_vkCmdBeginRendering_EXT] = vn_dispatch_vkCmdBeginRendering,
+ [VK_COMMAND_TYPE_vkCmdEndRendering_EXT] = vn_dispatch_vkCmdEndRendering,
[VK_COMMAND_TYPE_vkSetReplyCommandStreamMESA_EXT] = vn_dispatch_vkSetReplyCommandStreamMESA,
[VK_COMMAND_TYPE_vkSeekReplyCommandStreamMESA_EXT] = vn_dispatch_vkSeekReplyCommandStreamMESA,
[VK_COMMAND_TYPE_vkExecuteCommandStreamsMESA_EXT] = vn_dispatch_vkExecuteCommandStreamsMESA,
@@ -432,6 +535,9 @@ static void (*const vn_dispatch_table[196])(struct vn_dispatch_context *ctx, VkC
[VK_COMMAND_TYPE_vkNotifyRingMESA_EXT] = vn_dispatch_vkNotifyRingMESA,
[VK_COMMAND_TYPE_vkWriteRingExtraMESA_EXT] = vn_dispatch_vkWriteRingExtraMESA,
[VK_COMMAND_TYPE_vkGetMemoryResourcePropertiesMESA_EXT] = vn_dispatch_vkGetMemoryResourcePropertiesMESA,
+ [VK_COMMAND_TYPE_vkResetFenceResource100000MESA_EXT] = vn_dispatch_vkResetFenceResource100000MESA,
+ [VK_COMMAND_TYPE_vkWaitSemaphoreResource100000MESA_EXT] = vn_dispatch_vkWaitSemaphoreResource100000MESA,
+ [VK_COMMAND_TYPE_vkImportSemaphoreResource100000MESA_EXT] = vn_dispatch_vkImportSemaphoreResource100000MESA,
[VK_COMMAND_TYPE_vkGetVenusExperimentalFeatureData100000MESA_EXT] = vn_dispatch_vkGetVenusExperimentalFeatureData100000MESA,
};
@@ -447,7 +553,7 @@ static inline void vn_dispatch_command(struct vn_dispatch_context *ctx)
#ifdef DEBUG
TRACE_SCOPE_SLOW(vn_dispatch_command_name(cmd_type));
#endif
- if (cmd_type < 196 && vn_dispatch_table[cmd_type])
+ if (cmd_type < 251 && vn_dispatch_table[cmd_type])
vn_dispatch_table[cmd_type](ctx, cmd_flags);
else
vn_cs_decoder_set_fatal(ctx->decoder);
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_fence.h b/src/venus/venus-protocol/vn_protocol_renderer_fence.h
index fb94b0ca..6677bef7 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_fence.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_fence.h
@@ -14,6 +14,13 @@
#pragma GCC diagnostic ignored "-Wpointer-arith"
#pragma GCC diagnostic ignored "-Wunused-parameter"
+/*
+ * These structs/unions/commands are not included
+ *
+ * vkGetFenceFdKHR
+ * vkImportFenceFdKHR
+ */
+
/* struct VkExportFenceCreateInfo chain */
static inline void *
@@ -324,6 +331,26 @@ static inline void vn_encode_vkWaitForFences_reply(struct vn_cs_encoder *enc, co
/* skip args->timeout */
}
+static inline void vn_decode_vkResetFenceResource100000MESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkResetFenceResource100000MESA *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_VkFence_lookup(dec, &args->fence);
+}
+
+static inline void vn_replace_vkResetFenceResource100000MESA_args_handle(struct vn_command_vkResetFenceResource100000MESA *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ vn_replace_VkFence_handle(&args->fence);
+}
+
+static inline void vn_encode_vkResetFenceResource100000MESA_reply(struct vn_cs_encoder *enc, const struct vn_command_vkResetFenceResource100000MESA *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkResetFenceResource100000MESA_EXT});
+
+ /* skip args->device */
+ /* skip args->fence */
+}
+
static inline void vn_dispatch_vkCreateFence(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkCreateFence args;
@@ -465,6 +492,31 @@ static inline void vn_dispatch_vkWaitForFences(struct vn_dispatch_context *ctx,
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkResetFenceResource100000MESA(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkResetFenceResource100000MESA args;
+
+ if (!ctx->dispatch_vkResetFenceResource100000MESA) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkResetFenceResource100000MESA_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkResetFenceResource100000MESA(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkResetFenceResource100000MESA_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_FENCE_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_handles.h b/src/venus/venus-protocol/vn_protocol_renderer_handles.h
index f9563891..e0beea18 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_handles.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_handles.h
@@ -26,7 +26,7 @@ vn_decode_VkInstance_temp(struct vn_cs_decoder *dec, VkInstance *val)
vn_decode_uint64_t(dec, &id);
if (vn_cs_handle_indirect_id(VK_OBJECT_TYPE_INSTANCE)) {
*val = vn_cs_decoder_alloc_temp(dec, sizeof(vn_object_id));
- if (!val)
+ if (!*val)
return;
}
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_INSTANCE);
@@ -62,7 +62,7 @@ vn_decode_VkPhysicalDevice_temp(struct vn_cs_decoder *dec, VkPhysicalDevice *val
vn_decode_uint64_t(dec, &id);
if (vn_cs_handle_indirect_id(VK_OBJECT_TYPE_PHYSICAL_DEVICE)) {
*val = vn_cs_decoder_alloc_temp(dec, sizeof(vn_object_id));
- if (!val)
+ if (!*val)
return;
}
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
@@ -98,7 +98,7 @@ vn_decode_VkDevice_temp(struct vn_cs_decoder *dec, VkDevice *val)
vn_decode_uint64_t(dec, &id);
if (vn_cs_handle_indirect_id(VK_OBJECT_TYPE_DEVICE)) {
*val = vn_cs_decoder_alloc_temp(dec, sizeof(vn_object_id));
- if (!val)
+ if (!*val)
return;
}
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_DEVICE);
@@ -134,7 +134,7 @@ vn_decode_VkQueue_temp(struct vn_cs_decoder *dec, VkQueue *val)
vn_decode_uint64_t(dec, &id);
if (vn_cs_handle_indirect_id(VK_OBJECT_TYPE_QUEUE)) {
*val = vn_cs_decoder_alloc_temp(dec, sizeof(vn_object_id));
- if (!val)
+ if (!*val)
return;
}
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_QUEUE);
@@ -170,7 +170,7 @@ vn_decode_VkCommandBuffer_temp(struct vn_cs_decoder *dec, VkCommandBuffer *val)
vn_decode_uint64_t(dec, &id);
if (vn_cs_handle_indirect_id(VK_OBJECT_TYPE_COMMAND_BUFFER)) {
*val = vn_cs_decoder_alloc_temp(dec, sizeof(vn_object_id));
- if (!val)
+ if (!*val)
return;
}
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_COMMAND_BUFFER);
@@ -872,4 +872,35 @@ vn_replace_VkSamplerYcbcrConversion_handle(VkSamplerYcbcrConversion *val)
*val = (VkSamplerYcbcrConversion)vn_cs_get_object_handle((const void **)val, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
}
+/* VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot) */
+
+static inline void
+vn_encode_VkPrivateDataSlot(struct vn_cs_encoder *enc, const VkPrivateDataSlot *val)
+{
+ const uint64_t id = vn_cs_handle_load_id((const void **)val, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT);
+ vn_encode_uint64_t(enc, &id);
+}
+
+static inline void
+vn_decode_VkPrivateDataSlot(struct vn_cs_decoder *dec, VkPrivateDataSlot *val)
+{
+ uint64_t id;
+ vn_decode_uint64_t(dec, &id);
+ vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT);
+}
+
+static inline void
+vn_decode_VkPrivateDataSlot_lookup(struct vn_cs_decoder *dec, VkPrivateDataSlot *val)
+{
+ uint64_t id;
+ vn_decode_uint64_t(dec, &id);
+ *val = (VkPrivateDataSlot)(uintptr_t)vn_cs_decoder_lookup_object(dec, id, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT);
+}
+
+static inline void
+vn_replace_VkPrivateDataSlot_handle(VkPrivateDataSlot *val)
+{
+ *val = (VkPrivateDataSlot)vn_cs_get_object_handle((const void **)val, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT);
+}
+
#endif /* VN_PROTOCOL_RENDERER_HANDLES_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_image.h b/src/venus/venus-protocol/vn_protocol_renderer_image.h
index 9b57ecbd..fde1d624 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_image.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_image.h
@@ -433,43 +433,6 @@ vn_replace_VkImageCreateInfo_handle(VkImageCreateInfo *val)
/* struct VkBindImageMemoryDeviceGroupInfo chain */
-static inline void
-vn_encode_VkBindImageMemoryDeviceGroupInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkBindImageMemoryDeviceGroupInfo_self(struct vn_cs_encoder *enc, const VkBindImageMemoryDeviceGroupInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_uint32_t(enc, &val->deviceIndexCount);
- if (val->pDeviceIndices) {
- vn_encode_array_size(enc, val->deviceIndexCount);
- vn_encode_uint32_t_array(enc, val->pDeviceIndices, val->deviceIndexCount);
- } else {
- vn_encode_array_size(enc, 0);
- }
- vn_encode_uint32_t(enc, &val->splitInstanceBindRegionCount);
- if (val->pSplitInstanceBindRegions) {
- vn_encode_array_size(enc, val->splitInstanceBindRegionCount);
- for (uint32_t i = 0; i < val->splitInstanceBindRegionCount; i++)
- vn_encode_VkRect2D(enc, &val->pSplitInstanceBindRegions[i]);
- } else {
- vn_encode_array_size(enc, 0);
- }
-}
-
-static inline void
-vn_encode_VkBindImageMemoryDeviceGroupInfo(struct vn_cs_encoder *enc, const VkBindImageMemoryDeviceGroupInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO });
- vn_encode_VkBindImageMemoryDeviceGroupInfo_pnext(enc, val->pNext);
- vn_encode_VkBindImageMemoryDeviceGroupInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkBindImageMemoryDeviceGroupInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -553,29 +516,6 @@ vn_replace_VkBindImageMemoryDeviceGroupInfo_handle(VkBindImageMemoryDeviceGroupI
/* struct VkBindImagePlaneMemoryInfo chain */
-static inline void
-vn_encode_VkBindImagePlaneMemoryInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkBindImagePlaneMemoryInfo_self(struct vn_cs_encoder *enc, const VkBindImagePlaneMemoryInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_VkImageAspectFlagBits(enc, &val->planeAspect);
-}
-
-static inline void
-vn_encode_VkBindImagePlaneMemoryInfo(struct vn_cs_encoder *enc, const VkBindImagePlaneMemoryInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO });
- vn_encode_VkBindImagePlaneMemoryInfo_pnext(enc, val->pNext);
- vn_encode_VkBindImagePlaneMemoryInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkBindImagePlaneMemoryInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -633,53 +573,6 @@ vn_replace_VkBindImagePlaneMemoryInfo_handle(VkBindImagePlaneMemoryInfo *val)
/* struct VkBindImageMemoryInfo chain */
-static inline void
-vn_encode_VkBindImageMemoryInfo_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- const VkBaseInStructure *pnext = val;
-
- while (pnext) {
- switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
- vn_encode_simple_pointer(enc, pnext);
- vn_encode_VkStructureType(enc, &pnext->sType);
- vn_encode_VkBindImageMemoryInfo_pnext(enc, pnext->pNext);
- vn_encode_VkBindImageMemoryDeviceGroupInfo_self(enc, (const VkBindImageMemoryDeviceGroupInfo *)pnext);
- return;
- case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
- vn_encode_simple_pointer(enc, pnext);
- vn_encode_VkStructureType(enc, &pnext->sType);
- vn_encode_VkBindImageMemoryInfo_pnext(enc, pnext->pNext);
- vn_encode_VkBindImagePlaneMemoryInfo_self(enc, (const VkBindImagePlaneMemoryInfo *)pnext);
- return;
- default:
- /* ignore unknown/unsupported struct */
- break;
- }
- pnext = pnext->pNext;
- }
-
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkBindImageMemoryInfo_self(struct vn_cs_encoder *enc, const VkBindImageMemoryInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_VkImage(enc, &val->image);
- vn_encode_VkDeviceMemory(enc, &val->memory);
- vn_encode_VkDeviceSize(enc, &val->memoryOffset);
-}
-
-static inline void
-vn_encode_VkBindImageMemoryInfo(struct vn_cs_encoder *enc, const VkBindImageMemoryInfo *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO });
- vn_encode_VkBindImageMemoryInfo_pnext(enc, val->pNext);
- vn_encode_VkBindImageMemoryInfo_self(enc, val);
-}
-
static inline void *
vn_decode_VkBindImageMemoryInfo_pnext_temp(struct vn_cs_decoder *dec)
{
@@ -1021,6 +914,73 @@ vn_decode_VkSparseImageMemoryRequirements2_partial_temp(struct vn_cs_decoder *de
vn_decode_VkSparseImageMemoryRequirements2_self_partial_temp(dec, val);
}
+/* struct VkDeviceImageMemoryRequirements chain */
+
+static inline void *
+vn_decode_VkDeviceImageMemoryRequirements_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkDeviceImageMemoryRequirements_self_temp(struct vn_cs_decoder *dec, VkDeviceImageMemoryRequirements *val)
+{
+ /* skip val->{sType,pNext} */
+ if (vn_decode_simple_pointer(dec)) {
+ val->pCreateInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pCreateInfo));
+ if (!val->pCreateInfo) return;
+ vn_decode_VkImageCreateInfo_temp(dec, (VkImageCreateInfo *)val->pCreateInfo);
+ } else {
+ val->pCreateInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ vn_decode_VkImageAspectFlagBits(dec, &val->planeAspect);
+}
+
+static inline void
+vn_decode_VkDeviceImageMemoryRequirements_temp(struct vn_cs_decoder *dec, VkDeviceImageMemoryRequirements *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkDeviceImageMemoryRequirements_pnext_temp(dec);
+ vn_decode_VkDeviceImageMemoryRequirements_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkDeviceImageMemoryRequirements_handle_self(VkDeviceImageMemoryRequirements *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ if (val->pCreateInfo)
+ vn_replace_VkImageCreateInfo_handle((VkImageCreateInfo *)val->pCreateInfo);
+ /* skip val->planeAspect */
+}
+
+static inline void
+vn_replace_VkDeviceImageMemoryRequirements_handle(VkDeviceImageMemoryRequirements *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS:
+ vn_replace_VkDeviceImageMemoryRequirements_handle_self((VkDeviceImageMemoryRequirements *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkImageDrmFormatModifierPropertiesEXT chain */
static inline void
@@ -1430,6 +1390,102 @@ static inline void vn_encode_vkGetImageSparseMemoryRequirements2_reply(struct vn
}
}
+static inline void vn_decode_vkGetDeviceImageMemoryRequirements_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetDeviceImageMemoryRequirements *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pInfo));
+ if (!args->pInfo) return;
+ vn_decode_VkDeviceImageMemoryRequirements_temp(dec, (VkDeviceImageMemoryRequirements *)args->pInfo);
+ } else {
+ args->pInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ args->pMemoryRequirements = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pMemoryRequirements));
+ if (!args->pMemoryRequirements) return;
+ vn_decode_VkMemoryRequirements2_partial_temp(dec, args->pMemoryRequirements);
+ } else {
+ args->pMemoryRequirements = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkGetDeviceImageMemoryRequirements_args_handle(struct vn_command_vkGetDeviceImageMemoryRequirements *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ if (args->pInfo)
+ vn_replace_VkDeviceImageMemoryRequirements_handle((VkDeviceImageMemoryRequirements *)args->pInfo);
+ /* skip args->pMemoryRequirements */
+}
+
+static inline void vn_encode_vkGetDeviceImageMemoryRequirements_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetDeviceImageMemoryRequirements *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetDeviceImageMemoryRequirements_EXT});
+
+ /* skip args->device */
+ /* skip args->pInfo */
+ if (vn_encode_simple_pointer(enc, args->pMemoryRequirements))
+ vn_encode_VkMemoryRequirements2(enc, args->pMemoryRequirements);
+}
+
+static inline void vn_decode_vkGetDeviceImageSparseMemoryRequirements_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetDeviceImageSparseMemoryRequirements *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pInfo));
+ if (!args->pInfo) return;
+ vn_decode_VkDeviceImageMemoryRequirements_temp(dec, (VkDeviceImageMemoryRequirements *)args->pInfo);
+ } else {
+ args->pInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ args->pSparseMemoryRequirementCount = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pSparseMemoryRequirementCount));
+ if (!args->pSparseMemoryRequirementCount) return;
+ vn_decode_uint32_t(dec, args->pSparseMemoryRequirementCount);
+ } else {
+ args->pSparseMemoryRequirementCount = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, (args->pSparseMemoryRequirementCount ? *args->pSparseMemoryRequirementCount : 0));
+ args->pSparseMemoryRequirements = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pSparseMemoryRequirements) * iter_count);
+ if (!args->pSparseMemoryRequirements) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkSparseImageMemoryRequirements2_partial_temp(dec, &args->pSparseMemoryRequirements[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ args->pSparseMemoryRequirements = NULL;
+ }
+}
+
+static inline void vn_replace_vkGetDeviceImageSparseMemoryRequirements_args_handle(struct vn_command_vkGetDeviceImageSparseMemoryRequirements *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ if (args->pInfo)
+ vn_replace_VkDeviceImageMemoryRequirements_handle((VkDeviceImageMemoryRequirements *)args->pInfo);
+ /* skip args->pSparseMemoryRequirementCount */
+ /* skip args->pSparseMemoryRequirements */
+}
+
+static inline void vn_encode_vkGetDeviceImageSparseMemoryRequirements_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetDeviceImageSparseMemoryRequirements *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetDeviceImageSparseMemoryRequirements_EXT});
+
+ /* skip args->device */
+ /* skip args->pInfo */
+ if (vn_encode_simple_pointer(enc, args->pSparseMemoryRequirementCount))
+ vn_encode_uint32_t(enc, args->pSparseMemoryRequirementCount);
+ if (args->pSparseMemoryRequirements) {
+ vn_encode_array_size(enc, (args->pSparseMemoryRequirementCount ? *args->pSparseMemoryRequirementCount : 0));
+ for (uint32_t i = 0; i < (args->pSparseMemoryRequirementCount ? *args->pSparseMemoryRequirementCount : 0); i++)
+ vn_encode_VkSparseImageMemoryRequirements2(enc, &args->pSparseMemoryRequirements[i]);
+ } else {
+ vn_encode_array_size(enc, 0);
+ }
+}
+
static inline void vn_decode_vkGetImageDrmFormatModifierPropertiesEXT_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetImageDrmFormatModifierPropertiesEXT *args)
{
vn_decode_VkDevice_lookup(dec, &args->device);
@@ -1699,6 +1755,56 @@ static inline void vn_dispatch_vkGetImageSparseMemoryRequirements2(struct vn_dis
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkGetDeviceImageMemoryRequirements(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetDeviceImageMemoryRequirements args;
+
+ if (!ctx->dispatch_vkGetDeviceImageMemoryRequirements) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetDeviceImageMemoryRequirements_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetDeviceImageMemoryRequirements(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetDeviceImageMemoryRequirements_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkGetDeviceImageSparseMemoryRequirements(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetDeviceImageSparseMemoryRequirements args;
+
+ if (!ctx->dispatch_vkGetDeviceImageSparseMemoryRequirements) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetDeviceImageSparseMemoryRequirements_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetDeviceImageSparseMemoryRequirements(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetDeviceImageSparseMemoryRequirements_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
static inline void vn_dispatch_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkGetImageDrmFormatModifierPropertiesEXT args;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_image_view.h b/src/venus/venus-protocol/vn_protocol_renderer_image_view.h
index 6f6283e1..28565419 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_image_view.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_image_view.h
@@ -71,6 +71,63 @@ vn_replace_VkImageViewUsageCreateInfo_handle(VkImageViewUsageCreateInfo *val)
} while (pnext);
}
+/* struct VkImageViewMinLodCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkImageViewMinLodCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkImageViewMinLodCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkImageViewMinLodCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_float(dec, &val->minLod);
+}
+
+static inline void
+vn_decode_VkImageViewMinLodCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkImageViewMinLodCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkImageViewMinLodCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkImageViewMinLodCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkImageViewMinLodCreateInfoEXT_handle_self(VkImageViewMinLodCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->minLod */
+}
+
+static inline void
+vn_replace_VkImageViewMinLodCreateInfoEXT_handle(VkImageViewMinLodCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT:
+ vn_replace_VkImageViewMinLodCreateInfoEXT_handle_self((VkImageViewMinLodCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkImageViewCreateInfo chain */
static inline void *
@@ -100,6 +157,14 @@ vn_decode_VkImageViewCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkSamplerYcbcrConversionInfo_self_temp(dec, (VkSamplerYcbcrConversionInfo *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkImageViewMinLodCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkImageViewCreateInfo_pnext_temp(dec);
+ vn_decode_VkImageViewMinLodCreateInfoEXT_self_temp(dec, (VkImageViewMinLodCreateInfoEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -164,6 +229,9 @@ vn_replace_VkImageViewCreateInfo_handle(VkImageViewCreateInfo *val)
case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
vn_replace_VkSamplerYcbcrConversionInfo_handle_self((VkSamplerYcbcrConversionInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT:
+ vn_replace_VkImageViewMinLodCreateInfoEXT_handle_self((VkImageViewMinLodCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_info.h b/src/venus/venus-protocol/vn_protocol_renderer_info.h
index 05d2c830..9ccccef7 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_info.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_info.h
@@ -10,6 +10,225 @@
#include "vn_protocol_renderer_defines.h"
+struct vn_info_extension_table {
+ union {
+ bool enabled[99];
+ struct {
+ bool EXT_4444_formats;
+ bool EXT_calibrated_timestamps;
+ bool EXT_command_serialization;
+ bool EXT_conditional_rendering;
+ bool EXT_conservative_rasterization;
+ bool EXT_custom_border_color;
+ bool EXT_depth_clip_control;
+ bool EXT_depth_clip_enable;
+ bool EXT_descriptor_indexing;
+ bool EXT_extended_dynamic_state;
+ bool EXT_extended_dynamic_state2;
+ bool EXT_external_memory_dma_buf;
+ bool EXT_host_query_reset;
+ bool EXT_image_drm_format_modifier;
+ bool EXT_image_robustness;
+ bool EXT_image_view_min_lod;
+ bool EXT_index_type_uint8;
+ bool EXT_inline_uniform_block;
+ bool EXT_line_rasterization;
+ bool EXT_multi_draw;
+ bool EXT_mutable_descriptor_type;
+ bool EXT_pci_bus_info;
+ bool EXT_pipeline_creation_cache_control;
+ bool EXT_pipeline_creation_feedback;
+ bool EXT_primitive_topology_list_restart;
+ bool EXT_primitives_generated_query;
+ bool EXT_private_data;
+ bool EXT_provoking_vertex;
+ bool EXT_queue_family_foreign;
+ bool EXT_robustness2;
+ bool EXT_sampler_filter_minmax;
+ bool EXT_scalar_block_layout;
+ bool EXT_separate_stencil_usage;
+ bool EXT_shader_demote_to_helper_invocation;
+ bool EXT_shader_stencil_export;
+ bool EXT_shader_viewport_index_layer;
+ bool EXT_subgroup_size_control;
+ bool EXT_texel_buffer_alignment;
+ bool EXT_texture_compression_astc_hdr;
+ bool EXT_tooling_info;
+ bool EXT_transform_feedback;
+ bool EXT_vertex_attribute_divisor;
+ bool EXT_ycbcr_2plane_444_formats;
+ bool KHR_16bit_storage;
+ bool KHR_8bit_storage;
+ bool KHR_bind_memory2;
+ bool KHR_buffer_device_address;
+ bool KHR_copy_commands2;
+ bool KHR_create_renderpass2;
+ bool KHR_dedicated_allocation;
+ bool KHR_depth_stencil_resolve;
+ bool KHR_descriptor_update_template;
+ bool KHR_device_group;
+ bool KHR_device_group_creation;
+ bool KHR_draw_indirect_count;
+ bool KHR_driver_properties;
+ bool KHR_dynamic_rendering;
+ bool KHR_external_fence;
+ bool KHR_external_fence_capabilities;
+ bool KHR_external_fence_fd;
+ bool KHR_external_memory;
+ bool KHR_external_memory_capabilities;
+ bool KHR_external_memory_fd;
+ bool KHR_external_semaphore;
+ bool KHR_external_semaphore_capabilities;
+ bool KHR_external_semaphore_fd;
+ bool KHR_format_feature_flags2;
+ bool KHR_get_memory_requirements2;
+ bool KHR_get_physical_device_properties2;
+ bool KHR_image_format_list;
+ bool KHR_imageless_framebuffer;
+ bool KHR_maintenance1;
+ bool KHR_maintenance2;
+ bool KHR_maintenance3;
+ bool KHR_maintenance4;
+ bool KHR_multiview;
+ bool KHR_push_descriptor;
+ bool KHR_relaxed_block_layout;
+ bool KHR_sampler_mirror_clamp_to_edge;
+ bool KHR_sampler_ycbcr_conversion;
+ bool KHR_separate_depth_stencil_layouts;
+ bool KHR_shader_atomic_int64;
+ bool KHR_shader_draw_parameters;
+ bool KHR_shader_float16_int8;
+ bool KHR_shader_float_controls;
+ bool KHR_shader_integer_dot_product;
+ bool KHR_shader_non_semantic_info;
+ bool KHR_shader_subgroup_extended_types;
+ bool KHR_shader_terminate_invocation;
+ bool KHR_spirv_1_4;
+ bool KHR_storage_buffer_storage_class;
+ bool KHR_synchronization2;
+ bool KHR_timeline_semaphore;
+ bool KHR_uniform_buffer_standard_layout;
+ bool KHR_variable_pointers;
+ bool KHR_vulkan_memory_model;
+ bool KHR_zero_initialize_workgroup_memory;
+ bool MESA_venus_protocol;
+ bool VALVE_mutable_descriptor_type;
+ };
+ };
+};
+
+#define VN_INFO_EXTENSION_MAX_NUMBER (495)
+
+struct vn_info_extension {
+ const char *name;
+ uint32_t number;
+ uint32_t spec_version;
+};
+
+/* sorted by extension names for bsearch */
+static const uint32_t _vn_info_extension_count = 99;
+static const struct vn_info_extension _vn_info_extensions[99] = {
+ { "VK_EXT_4444_formats", 341, 1 },
+ { "VK_EXT_calibrated_timestamps", 185, 2 },
+ { "VK_EXT_command_serialization", 384, 0 },
+ { "VK_EXT_conditional_rendering", 82, 2 },
+ { "VK_EXT_conservative_rasterization", 102, 1 },
+ { "VK_EXT_custom_border_color", 288, 12 },
+ { "VK_EXT_depth_clip_control", 356, 1 },
+ { "VK_EXT_depth_clip_enable", 103, 1 },
+ { "VK_EXT_descriptor_indexing", 162, 2 },
+ { "VK_EXT_extended_dynamic_state", 268, 1 },
+ { "VK_EXT_extended_dynamic_state2", 378, 1 },
+ { "VK_EXT_external_memory_dma_buf", 126, 1 },
+ { "VK_EXT_host_query_reset", 262, 1 },
+ { "VK_EXT_image_drm_format_modifier", 159, 2 },
+ { "VK_EXT_image_robustness", 336, 1 },
+ { "VK_EXT_image_view_min_lod", 392, 1 },
+ { "VK_EXT_index_type_uint8", 266, 1 },
+ { "VK_EXT_inline_uniform_block", 139, 1 },
+ { "VK_EXT_line_rasterization", 260, 1 },
+ { "VK_EXT_multi_draw", 393, 1 },
+ { "VK_EXT_mutable_descriptor_type", 495, 1 },
+ { "VK_EXT_pci_bus_info", 213, 2 },
+ { "VK_EXT_pipeline_creation_cache_control", 298, 3 },
+ { "VK_EXT_pipeline_creation_feedback", 193, 1 },
+ { "VK_EXT_primitive_topology_list_restart", 357, 1 },
+ { "VK_EXT_primitives_generated_query", 383, 1 },
+ { "VK_EXT_private_data", 296, 1 },
+ { "VK_EXT_provoking_vertex", 255, 1 },
+ { "VK_EXT_queue_family_foreign", 127, 1 },
+ { "VK_EXT_robustness2", 287, 1 },
+ { "VK_EXT_sampler_filter_minmax", 131, 2 },
+ { "VK_EXT_scalar_block_layout", 222, 1 },
+ { "VK_EXT_separate_stencil_usage", 247, 1 },
+ { "VK_EXT_shader_demote_to_helper_invocation", 277, 1 },
+ { "VK_EXT_shader_stencil_export", 141, 1 },
+ { "VK_EXT_shader_viewport_index_layer", 163, 1 },
+ { "VK_EXT_subgroup_size_control", 226, 2 },
+ { "VK_EXT_texel_buffer_alignment", 282, 1 },
+ { "VK_EXT_texture_compression_astc_hdr", 67, 1 },
+ { "VK_EXT_tooling_info", 246, 1 },
+ { "VK_EXT_transform_feedback", 29, 1 },
+ { "VK_EXT_vertex_attribute_divisor", 191, 3 },
+ { "VK_EXT_ycbcr_2plane_444_formats", 331, 1 },
+ { "VK_KHR_16bit_storage", 84, 1 },
+ { "VK_KHR_8bit_storage", 178, 1 },
+ { "VK_KHR_bind_memory2", 158, 1 },
+ { "VK_KHR_buffer_device_address", 258, 1 },
+ { "VK_KHR_copy_commands2", 338, 1 },
+ { "VK_KHR_create_renderpass2", 110, 1 },
+ { "VK_KHR_dedicated_allocation", 128, 3 },
+ { "VK_KHR_depth_stencil_resolve", 200, 1 },
+ { "VK_KHR_descriptor_update_template", 86, 1 },
+ { "VK_KHR_device_group", 61, 4 },
+ { "VK_KHR_device_group_creation", 71, 1 },
+ { "VK_KHR_draw_indirect_count", 170, 1 },
+ { "VK_KHR_driver_properties", 197, 1 },
+ { "VK_KHR_dynamic_rendering", 45, 1 },
+ { "VK_KHR_external_fence", 114, 1 },
+ { "VK_KHR_external_fence_capabilities", 113, 1 },
+ { "VK_KHR_external_fence_fd", 116, 1 },
+ { "VK_KHR_external_memory", 73, 1 },
+ { "VK_KHR_external_memory_capabilities", 72, 1 },
+ { "VK_KHR_external_memory_fd", 75, 1 },
+ { "VK_KHR_external_semaphore", 78, 1 },
+ { "VK_KHR_external_semaphore_capabilities", 77, 1 },
+ { "VK_KHR_external_semaphore_fd", 80, 1 },
+ { "VK_KHR_format_feature_flags2", 361, 2 },
+ { "VK_KHR_get_memory_requirements2", 147, 1 },
+ { "VK_KHR_get_physical_device_properties2", 60, 2 },
+ { "VK_KHR_image_format_list", 148, 1 },
+ { "VK_KHR_imageless_framebuffer", 109, 1 },
+ { "VK_KHR_maintenance1", 70, 2 },
+ { "VK_KHR_maintenance2", 118, 1 },
+ { "VK_KHR_maintenance3", 169, 1 },
+ { "VK_KHR_maintenance4", 414, 2 },
+ { "VK_KHR_multiview", 54, 1 },
+ { "VK_KHR_push_descriptor", 81, 2 },
+ { "VK_KHR_relaxed_block_layout", 145, 1 },
+ { "VK_KHR_sampler_mirror_clamp_to_edge", 15, 3 },
+ { "VK_KHR_sampler_ycbcr_conversion", 157, 14 },
+ { "VK_KHR_separate_depth_stencil_layouts", 242, 1 },
+ { "VK_KHR_shader_atomic_int64", 181, 1 },
+ { "VK_KHR_shader_draw_parameters", 64, 1 },
+ { "VK_KHR_shader_float16_int8", 83, 1 },
+ { "VK_KHR_shader_float_controls", 198, 4 },
+ { "VK_KHR_shader_integer_dot_product", 281, 1 },
+ { "VK_KHR_shader_non_semantic_info", 294, 1 },
+ { "VK_KHR_shader_subgroup_extended_types", 176, 1 },
+ { "VK_KHR_shader_terminate_invocation", 216, 1 },
+ { "VK_KHR_spirv_1_4", 237, 1 },
+ { "VK_KHR_storage_buffer_storage_class", 132, 1 },
+ { "VK_KHR_synchronization2", 315, 1 },
+ { "VK_KHR_timeline_semaphore", 208, 2 },
+ { "VK_KHR_uniform_buffer_standard_layout", 254, 1 },
+ { "VK_KHR_variable_pointers", 121, 1 },
+ { "VK_KHR_vulkan_memory_model", 212, 3 },
+ { "VK_KHR_zero_initialize_workgroup_memory", 326, 1 },
+ { "VK_MESA_venus_protocol", 385, 100000 },
+ { "VK_VALVE_mutable_descriptor_type", 352, 1 },
+};
+
static inline uint32_t
vn_info_wire_format_version(void)
{
@@ -19,137 +238,37 @@ vn_info_wire_format_version(void)
static inline uint32_t
vn_info_vk_xml_version(void)
{
- return VK_MAKE_API_VERSION(0, 1, 2, 182);
+ return VK_MAKE_API_VERSION(0, 1, 3, 228);
}
static inline int
-vn_info_extension_compare(const void *a, const void *b)
+vn_info_extension_compare(const void *name, const void *ext)
{
- return strcmp(a, *(const char **)b);
+ return strcmp(name, ((const struct vn_info_extension *)ext)->name);
}
-static inline uint32_t
-vn_info_extension_spec_version(const char *name)
+static inline int32_t
+vn_info_extension_index(const char *name)
{
- static uint32_t ext_count = 54;
- static const char *ext_names[54] = {
- "VK_EXT_command_serialization",
- "VK_EXT_descriptor_indexing",
- "VK_EXT_external_memory_dma_buf",
- "VK_EXT_host_query_reset",
- "VK_EXT_image_drm_format_modifier",
- "VK_EXT_queue_family_foreign",
- "VK_EXT_sampler_filter_minmax",
- "VK_EXT_scalar_block_layout",
- "VK_EXT_separate_stencil_usage",
- "VK_EXT_shader_viewport_index_layer",
- "VK_EXT_transform_feedback",
- "VK_KHR_16bit_storage",
- "VK_KHR_8bit_storage",
- "VK_KHR_bind_memory2",
- "VK_KHR_buffer_device_address",
- "VK_KHR_create_renderpass2",
- "VK_KHR_dedicated_allocation",
- "VK_KHR_depth_stencil_resolve",
- "VK_KHR_descriptor_update_template",
- "VK_KHR_device_group",
- "VK_KHR_device_group_creation",
- "VK_KHR_draw_indirect_count",
- "VK_KHR_driver_properties",
- "VK_KHR_external_fence",
- "VK_KHR_external_fence_capabilities",
- "VK_KHR_external_memory",
- "VK_KHR_external_memory_capabilities",
- "VK_KHR_external_memory_fd",
- "VK_KHR_external_semaphore",
- "VK_KHR_external_semaphore_capabilities",
- "VK_KHR_get_memory_requirements2",
- "VK_KHR_get_physical_device_properties2",
- "VK_KHR_image_format_list",
- "VK_KHR_imageless_framebuffer",
- "VK_KHR_maintenance1",
- "VK_KHR_maintenance2",
- "VK_KHR_maintenance3",
- "VK_KHR_multiview",
- "VK_KHR_relaxed_block_layout",
- "VK_KHR_sampler_mirror_clamp_to_edge",
- "VK_KHR_sampler_ycbcr_conversion",
- "VK_KHR_separate_depth_stencil_layouts",
- "VK_KHR_shader_atomic_int64",
- "VK_KHR_shader_draw_parameters",
- "VK_KHR_shader_float16_int8",
- "VK_KHR_shader_float_controls",
- "VK_KHR_shader_subgroup_extended_types",
- "VK_KHR_spirv_1_4",
- "VK_KHR_storage_buffer_storage_class",
- "VK_KHR_timeline_semaphore",
- "VK_KHR_uniform_buffer_standard_layout",
- "VK_KHR_variable_pointers",
- "VK_KHR_vulkan_memory_model",
- "VK_MESA_venus_protocol",
- };
- static const uint32_t ext_versions[54] = {
- 0,
- 2,
- 1,
- 1,
- 1,
- 1,
- 2,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 3,
- 1,
- 1,
- 4,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 1,
- 2,
- 1,
- 1,
- 2,
- 1,
- 1,
- 1,
- 1,
- 3,
- 14,
- 1,
- 1,
- 1,
- 1,
- 4,
- 1,
- 1,
- 1,
- 2,
- 1,
- 1,
- 3,
- 100000,
- };
- const char **found;
+ const struct vn_info_extension *ext = bsearch(name, _vn_info_extensions,
+ _vn_info_extension_count, sizeof(*_vn_info_extensions),
+ vn_info_extension_compare);
+ return ext ? ext - _vn_info_extensions : -1;
+}
- found = bsearch(name, ext_names, ext_count, sizeof(ext_names[0]),
- vn_info_extension_compare);
+static inline const struct vn_info_extension *
+vn_info_extension_get(int32_t index)
+{
+ assert(index >= 0 && (uint32_t)index < _vn_info_extension_count);
+ return &_vn_info_extensions[index];
+}
- return found ? ext_versions[found - ext_names] : 0;
+static inline void
+vn_info_extension_mask_init(uint32_t *out_mask)
+{
+ for (uint32_t i = 0; i < _vn_info_extension_count; i++) {
+ out_mask[_vn_info_extensions[i].number / 32] |= 1 << (_vn_info_extensions[i].number % 32);
+ }
}
#endif /* VN_PROTOCOL_RENDERER_INFO_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_pipeline.h b/src/venus/venus-protocol/vn_protocol_renderer_pipeline.h
index 4946f0b6..554e5e53 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_pipeline.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_pipeline.h
@@ -72,10 +72,10 @@ vn_replace_VkSpecializationInfo_handle(VkSpecializationInfo *val)
/* skip val->pData */
}
-/* struct VkPipelineShaderStageCreateInfo chain */
+/* struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfo chain */
static inline void *
-vn_decode_VkPipelineShaderStageCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -84,6 +84,91 @@ vn_decode_VkPipelineShaderStageCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
}
static inline void
+vn_decode_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->requiredSubgroupSize);
+}
+
+static inline void
+vn_decode_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_temp(struct vn_cs_decoder *dec, VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_handle_self(VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->requiredSubgroupSize */
+}
+
+static inline void
+vn_replace_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_handle(VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO:
+ vn_replace_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_handle_self((VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineShaderStageCreateInfo chain */
+
+static inline void *
+vn_decode_VkPipelineShaderStageCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkShaderModuleCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineShaderStageCreateInfo_pnext_temp(dec);
+ vn_decode_VkShaderModuleCreateInfo_self_temp(dec, (VkShaderModuleCreateInfo *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineShaderStageRequiredSubgroupSizeCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineShaderStageCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_self_temp(dec, (VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkPipelineShaderStageCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPipelineShaderStageCreateInfo *val)
{
/* skip val->{sType,pNext} */
@@ -144,6 +229,12 @@ vn_replace_VkPipelineShaderStageCreateInfo_handle(VkPipelineShaderStageCreateInf
case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
vn_replace_VkPipelineShaderStageCreateInfo_handle_self((VkPipelineShaderStageCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO:
+ vn_replace_VkShaderModuleCreateInfo_handle_self((VkShaderModuleCreateInfo *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO:
+ vn_replace_VkPipelineShaderStageRequiredSubgroupSizeCreateInfo_handle_self((VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -190,10 +281,26 @@ vn_replace_VkVertexInputAttributeDescription_handle(VkVertexInputAttributeDescri
/* skip val->offset */
}
-/* struct VkPipelineVertexInputStateCreateInfo chain */
+/* struct VkVertexInputBindingDivisorDescriptionEXT */
+
+static inline void
+vn_decode_VkVertexInputBindingDivisorDescriptionEXT_temp(struct vn_cs_decoder *dec, VkVertexInputBindingDivisorDescriptionEXT *val)
+{
+ vn_decode_uint32_t(dec, &val->binding);
+ vn_decode_uint32_t(dec, &val->divisor);
+}
+
+static inline void
+vn_replace_VkVertexInputBindingDivisorDescriptionEXT_handle(VkVertexInputBindingDivisorDescriptionEXT *val)
+{
+ /* skip val->binding */
+ /* skip val->divisor */
+}
+
+/* struct VkPipelineVertexInputDivisorStateCreateInfoEXT chain */
static inline void *
-vn_decode_VkPipelineVertexInputStateCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkPipelineVertexInputDivisorStateCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -202,6 +309,97 @@ vn_decode_VkPipelineVertexInputStateCreateInfo_pnext_temp(struct vn_cs_decoder *
}
static inline void
+vn_decode_VkPipelineVertexInputDivisorStateCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkPipelineVertexInputDivisorStateCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->vertexBindingDivisorCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->vertexBindingDivisorCount);
+ val->pVertexBindingDivisors = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pVertexBindingDivisors) * iter_count);
+ if (!val->pVertexBindingDivisors) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkVertexInputBindingDivisorDescriptionEXT_temp(dec, &((VkVertexInputBindingDivisorDescriptionEXT *)val->pVertexBindingDivisors)[i]);
+ } else {
+ vn_decode_array_size(dec, val->vertexBindingDivisorCount);
+ val->pVertexBindingDivisors = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkPipelineVertexInputDivisorStateCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkPipelineVertexInputDivisorStateCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineVertexInputDivisorStateCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkPipelineVertexInputDivisorStateCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineVertexInputDivisorStateCreateInfoEXT_handle_self(VkPipelineVertexInputDivisorStateCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->vertexBindingDivisorCount */
+ if (val->pVertexBindingDivisors) {
+ for (uint32_t i = 0; i < val->vertexBindingDivisorCount; i++)
+ vn_replace_VkVertexInputBindingDivisorDescriptionEXT_handle(&((VkVertexInputBindingDivisorDescriptionEXT *)val->pVertexBindingDivisors)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkPipelineVertexInputDivisorStateCreateInfoEXT_handle(VkPipelineVertexInputDivisorStateCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineVertexInputDivisorStateCreateInfoEXT_handle_self((VkPipelineVertexInputDivisorStateCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineVertexInputStateCreateInfo chain */
+
+static inline void *
+vn_decode_VkPipelineVertexInputStateCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineVertexInputDivisorStateCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineVertexInputStateCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineVertexInputDivisorStateCreateInfoEXT_self_temp(dec, (VkPipelineVertexInputDivisorStateCreateInfoEXT *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkPipelineVertexInputStateCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPipelineVertexInputStateCreateInfo *val)
{
/* skip val->{sType,pNext} */
@@ -271,6 +469,9 @@ vn_replace_VkPipelineVertexInputStateCreateInfo_handle(VkPipelineVertexInputStat
case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO:
vn_replace_VkPipelineVertexInputStateCreateInfo_handle_self((VkPipelineVertexInputStateCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineVertexInputDivisorStateCreateInfoEXT_handle_self((VkPipelineVertexInputDivisorStateCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -479,10 +680,10 @@ vn_replace_VkPipelineTessellationStateCreateInfo_handle(VkPipelineTessellationSt
} while (pnext);
}
-/* struct VkPipelineViewportStateCreateInfo chain */
+/* struct VkPipelineViewportDepthClipControlCreateInfoEXT chain */
static inline void *
-vn_decode_VkPipelineViewportStateCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkPipelineViewportDepthClipControlCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -491,6 +692,83 @@ vn_decode_VkPipelineViewportStateCreateInfo_pnext_temp(struct vn_cs_decoder *dec
}
static inline void
+vn_decode_VkPipelineViewportDepthClipControlCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkPipelineViewportDepthClipControlCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkBool32(dec, &val->negativeOneToOne);
+}
+
+static inline void
+vn_decode_VkPipelineViewportDepthClipControlCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkPipelineViewportDepthClipControlCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineViewportDepthClipControlCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkPipelineViewportDepthClipControlCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineViewportDepthClipControlCreateInfoEXT_handle_self(VkPipelineViewportDepthClipControlCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->negativeOneToOne */
+}
+
+static inline void
+vn_replace_VkPipelineViewportDepthClipControlCreateInfoEXT_handle(VkPipelineViewportDepthClipControlCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT:
+ vn_replace_VkPipelineViewportDepthClipControlCreateInfoEXT_handle_self((VkPipelineViewportDepthClipControlCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineViewportStateCreateInfo chain */
+
+static inline void *
+vn_decode_VkPipelineViewportStateCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineViewportDepthClipControlCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineViewportStateCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineViewportDepthClipControlCreateInfoEXT_self_temp(dec, (VkPipelineViewportDepthClipControlCreateInfoEXT *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkPipelineViewportStateCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPipelineViewportStateCreateInfo *val)
{
/* skip val->{sType,pNext} */
@@ -560,6 +838,70 @@ vn_replace_VkPipelineViewportStateCreateInfo_handle(VkPipelineViewportStateCreat
case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO:
vn_replace_VkPipelineViewportStateCreateInfo_handle_self((VkPipelineViewportStateCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT:
+ vn_replace_VkPipelineViewportDepthClipControlCreateInfoEXT_handle_self((VkPipelineViewportDepthClipControlCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineRasterizationConservativeStateCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkPipelineRasterizationConservativeStateCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationConservativeStateCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationConservativeStateCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_VkConservativeRasterizationModeEXT(dec, &val->conservativeRasterizationMode);
+ vn_decode_float(dec, &val->extraPrimitiveOverestimationSize);
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationConservativeStateCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationConservativeStateCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineRasterizationConservativeStateCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationConservativeStateCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationConservativeStateCreateInfoEXT_handle_self(VkPipelineRasterizationConservativeStateCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+ /* skip val->conservativeRasterizationMode */
+ /* skip val->extraPrimitiveOverestimationSize */
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationConservativeStateCreateInfoEXT_handle(VkPipelineRasterizationConservativeStateCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationConservativeStateCreateInfoEXT_handle_self((VkPipelineRasterizationConservativeStateCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -627,6 +969,185 @@ vn_replace_VkPipelineRasterizationStateStreamCreateInfoEXT_handle(VkPipelineRast
} while (pnext);
}
+/* struct VkPipelineRasterizationDepthClipStateCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkPipelineRasterizationDepthClipStateCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationDepthClipStateCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationDepthClipStateCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_VkBool32(dec, &val->depthClipEnable);
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationDepthClipStateCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationDepthClipStateCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineRasterizationDepthClipStateCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationDepthClipStateCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationDepthClipStateCreateInfoEXT_handle_self(VkPipelineRasterizationDepthClipStateCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+ /* skip val->depthClipEnable */
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationDepthClipStateCreateInfoEXT_handle(VkPipelineRasterizationDepthClipStateCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationDepthClipStateCreateInfoEXT_handle_self((VkPipelineRasterizationDepthClipStateCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineRasterizationLineStateCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkPipelineRasterizationLineStateCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationLineStateCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationLineStateCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkLineRasterizationModeEXT(dec, &val->lineRasterizationMode);
+ vn_decode_VkBool32(dec, &val->stippledLineEnable);
+ vn_decode_uint32_t(dec, &val->lineStippleFactor);
+ vn_decode_uint16_t(dec, &val->lineStipplePattern);
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationLineStateCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationLineStateCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineRasterizationLineStateCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationLineStateCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationLineStateCreateInfoEXT_handle_self(VkPipelineRasterizationLineStateCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->lineRasterizationMode */
+ /* skip val->stippledLineEnable */
+ /* skip val->lineStippleFactor */
+ /* skip val->lineStipplePattern */
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationLineStateCreateInfoEXT_handle(VkPipelineRasterizationLineStateCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationLineStateCreateInfoEXT_handle_self((VkPipelineRasterizationLineStateCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineRasterizationProvokingVertexStateCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkProvokingVertexModeEXT(dec, &val->provokingVertexMode);
+}
+
+static inline void
+vn_decode_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_handle_self(VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->provokingVertexMode */
+}
+
+static inline void
+vn_replace_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_handle(VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_handle_self((VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkPipelineRasterizationStateCreateInfo chain */
static inline void *
@@ -640,6 +1161,14 @@ vn_decode_VkPipelineRasterizationStateCreateInfo_pnext_temp(struct vn_cs_decoder
vn_decode_VkStructureType(dec, &stype);
switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineRasterizationConservativeStateCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineRasterizationStateCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationConservativeStateCreateInfoEXT_self_temp(dec, (VkPipelineRasterizationConservativeStateCreateInfoEXT *)pnext);
+ }
+ break;
case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineRasterizationStateStreamCreateInfoEXT));
if (pnext) {
@@ -648,6 +1177,30 @@ vn_decode_VkPipelineRasterizationStateCreateInfo_pnext_temp(struct vn_cs_decoder
vn_decode_VkPipelineRasterizationStateStreamCreateInfoEXT_self_temp(dec, (VkPipelineRasterizationStateStreamCreateInfoEXT *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineRasterizationDepthClipStateCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineRasterizationStateCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationDepthClipStateCreateInfoEXT_self_temp(dec, (VkPipelineRasterizationDepthClipStateCreateInfoEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineRasterizationLineStateCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineRasterizationStateCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationLineStateCreateInfoEXT_self_temp(dec, (VkPipelineRasterizationLineStateCreateInfoEXT *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineRasterizationProvokingVertexStateCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkPipelineRasterizationStateCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_self_temp(dec, (VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -716,9 +1269,21 @@ vn_replace_VkPipelineRasterizationStateCreateInfo_handle(VkPipelineRasterization
case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO:
vn_replace_VkPipelineRasterizationStateCreateInfo_handle_self((VkPipelineRasterizationStateCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationConservativeStateCreateInfoEXT_handle_self((VkPipelineRasterizationConservativeStateCreateInfoEXT *)pnext);
+ break;
case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
vn_replace_VkPipelineRasterizationStateStreamCreateInfoEXT_handle_self((VkPipelineRasterizationStateStreamCreateInfoEXT *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationDepthClipStateCreateInfoEXT_handle_self((VkPipelineRasterizationDepthClipStateCreateInfoEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationLineStateCreateInfoEXT_handle_self((VkPipelineRasterizationLineStateCreateInfoEXT *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
+ vn_replace_VkPipelineRasterizationProvokingVertexStateCreateInfoEXT_handle_self((VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -1084,10 +1649,26 @@ vn_replace_VkPipelineDynamicStateCreateInfo_handle(VkPipelineDynamicStateCreateI
} while (pnext);
}
-/* struct VkGraphicsPipelineCreateInfo chain */
+/* struct VkPipelineCreationFeedback */
+
+static inline void
+vn_decode_VkPipelineCreationFeedback_temp(struct vn_cs_decoder *dec, VkPipelineCreationFeedback *val)
+{
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_uint64_t(dec, &val->duration);
+}
+
+static inline void
+vn_replace_VkPipelineCreationFeedback_handle(VkPipelineCreationFeedback *val)
+{
+ /* skip val->flags */
+ /* skip val->duration */
+}
+
+/* struct VkPipelineCreationFeedbackCreateInfo chain */
static inline void *
-vn_decode_VkGraphicsPipelineCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkPipelineCreationFeedbackCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -1096,6 +1677,188 @@ vn_decode_VkGraphicsPipelineCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
}
static inline void
+vn_decode_VkPipelineCreationFeedbackCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPipelineCreationFeedbackCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ if (vn_decode_simple_pointer(dec)) {
+ val->pPipelineCreationFeedback = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pPipelineCreationFeedback));
+ if (!val->pPipelineCreationFeedback) return;
+ vn_decode_VkPipelineCreationFeedback_temp(dec, val->pPipelineCreationFeedback);
+ } else {
+ val->pPipelineCreationFeedback = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ vn_decode_uint32_t(dec, &val->pipelineStageCreationFeedbackCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->pipelineStageCreationFeedbackCount);
+ val->pPipelineStageCreationFeedbacks = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pPipelineStageCreationFeedbacks) * iter_count);
+ if (!val->pPipelineStageCreationFeedbacks) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkPipelineCreationFeedback_temp(dec, &val->pPipelineStageCreationFeedbacks[i]);
+ } else {
+ vn_decode_array_size(dec, val->pipelineStageCreationFeedbackCount);
+ val->pPipelineStageCreationFeedbacks = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkPipelineCreationFeedbackCreateInfo_temp(struct vn_cs_decoder *dec, VkPipelineCreationFeedbackCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineCreationFeedbackCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineCreationFeedbackCreateInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineCreationFeedbackCreateInfo_handle_self(VkPipelineCreationFeedbackCreateInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ if (val->pPipelineCreationFeedback)
+ vn_replace_VkPipelineCreationFeedback_handle(val->pPipelineCreationFeedback);
+ /* skip val->pipelineStageCreationFeedbackCount */
+ if (val->pPipelineStageCreationFeedbacks) {
+ for (uint32_t i = 0; i < val->pipelineStageCreationFeedbackCount; i++)
+ vn_replace_VkPipelineCreationFeedback_handle(&val->pPipelineStageCreationFeedbacks[i]);
+ }
+}
+
+static inline void
+vn_replace_VkPipelineCreationFeedbackCreateInfo_handle(VkPipelineCreationFeedbackCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
+ vn_replace_VkPipelineCreationFeedbackCreateInfo_handle_self((VkPipelineCreationFeedbackCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkPipelineRenderingCreateInfo chain */
+
+static inline void *
+vn_decode_VkPipelineRenderingCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPipelineRenderingCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPipelineRenderingCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->viewMask);
+ vn_decode_uint32_t(dec, &val->colorAttachmentCount);
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, val->colorAttachmentCount);
+ val->pColorAttachmentFormats = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pColorAttachmentFormats) * array_size);
+ if (!val->pColorAttachmentFormats) return;
+ vn_decode_VkFormat_array(dec, (VkFormat *)val->pColorAttachmentFormats, array_size);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ val->pColorAttachmentFormats = NULL;
+ }
+ vn_decode_VkFormat(dec, &val->depthAttachmentFormat);
+ vn_decode_VkFormat(dec, &val->stencilAttachmentFormat);
+}
+
+static inline void
+vn_decode_VkPipelineRenderingCreateInfo_temp(struct vn_cs_decoder *dec, VkPipelineRenderingCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPipelineRenderingCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineRenderingCreateInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPipelineRenderingCreateInfo_handle_self(VkPipelineRenderingCreateInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->viewMask */
+ /* skip val->colorAttachmentCount */
+ /* skip val->pColorAttachmentFormats */
+ /* skip val->depthAttachmentFormat */
+ /* skip val->stencilAttachmentFormat */
+}
+
+static inline void
+vn_replace_VkPipelineRenderingCreateInfo_handle(VkPipelineRenderingCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO:
+ vn_replace_VkPipelineRenderingCreateInfo_handle_self((VkPipelineRenderingCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkGraphicsPipelineCreateInfo chain */
+
+static inline void *
+vn_decode_VkGraphicsPipelineCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineCreationFeedbackCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkGraphicsPipelineCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineCreationFeedbackCreateInfo_self_temp(dec, (VkPipelineCreationFeedbackCreateInfo *)pnext);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineRenderingCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkGraphicsPipelineCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineRenderingCreateInfo_self_temp(dec, (VkPipelineRenderingCreateInfo *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
vn_decode_VkGraphicsPipelineCreateInfo_self_temp(struct vn_cs_decoder *dec, VkGraphicsPipelineCreateInfo *val)
{
/* skip val->{sType,pNext} */
@@ -1108,7 +1871,7 @@ vn_decode_VkGraphicsPipelineCreateInfo_self_temp(struct vn_cs_decoder *dec, VkGr
for (uint32_t i = 0; i < iter_count; i++)
vn_decode_VkPipelineShaderStageCreateInfo_temp(dec, &((VkPipelineShaderStageCreateInfo *)val->pStages)[i]);
} else {
- vn_decode_array_size(dec, val->stageCount);
+ vn_decode_array_size_unchecked(dec);
val->pStages = NULL;
}
if (vn_decode_simple_pointer(dec)) {
@@ -1145,7 +1908,6 @@ vn_decode_VkGraphicsPipelineCreateInfo_self_temp(struct vn_cs_decoder *dec, VkGr
vn_decode_VkPipelineRasterizationStateCreateInfo_temp(dec, (VkPipelineRasterizationStateCreateInfo *)val->pRasterizationState);
} else {
val->pRasterizationState = NULL;
- vn_cs_decoder_set_fatal(dec);
}
if (vn_decode_simple_pointer(dec)) {
val->pMultisampleState = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pMultisampleState));
@@ -1241,6 +2003,12 @@ vn_replace_VkGraphicsPipelineCreateInfo_handle(VkGraphicsPipelineCreateInfo *val
case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO:
vn_replace_VkGraphicsPipelineCreateInfo_handle_self((VkGraphicsPipelineCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
+ vn_replace_VkPipelineCreationFeedbackCreateInfo_handle_self((VkPipelineCreationFeedbackCreateInfo *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO:
+ vn_replace_VkPipelineRenderingCreateInfo_handle_self((VkPipelineRenderingCreateInfo *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
@@ -1254,10 +2022,30 @@ vn_replace_VkGraphicsPipelineCreateInfo_handle(VkGraphicsPipelineCreateInfo *val
static inline void *
vn_decode_VkComputePipelineCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
{
- /* no known/supported struct */
- if (vn_decode_simple_pointer(dec))
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkPipelineCreationFeedbackCreateInfo));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkComputePipelineCreateInfo_pnext_temp(dec);
+ vn_decode_VkPipelineCreationFeedbackCreateInfo_self_temp(dec, (VkPipelineCreationFeedbackCreateInfo *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
vn_cs_decoder_set_fatal(dec);
- return NULL;
+ break;
+ }
+
+ return pnext;
}
static inline void
@@ -1306,6 +2094,9 @@ vn_replace_VkComputePipelineCreateInfo_handle(VkComputePipelineCreateInfo *val)
case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO:
vn_replace_VkComputePipelineCreateInfo_handle_self((VkComputePipelineCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
+ vn_replace_VkPipelineCreationFeedbackCreateInfo_handle_self((VkPipelineCreationFeedbackCreateInfo *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_private_data_slot.h b/src/venus/venus-protocol/vn_protocol_renderer_private_data_slot.h
new file mode 100644
index 00000000..bab95e8e
--- /dev/null
+++ b/src/venus/venus-protocol/vn_protocol_renderer_private_data_slot.h
@@ -0,0 +1,324 @@
+/* This file is generated by venus-protocol. See vn_protocol_renderer.h. */
+
+/*
+ * Copyright 2020 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef VN_PROTOCOL_RENDERER_PRIVATE_DATA_SLOT_H
+#define VN_PROTOCOL_RENDERER_PRIVATE_DATA_SLOT_H
+
+#include "vn_protocol_renderer_structs.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpointer-arith"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+/* struct VkPrivateDataSlotCreateInfo chain */
+
+static inline void *
+vn_decode_VkPrivateDataSlotCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkPrivateDataSlotCreateInfo_self_temp(struct vn_cs_decoder *dec, VkPrivateDataSlotCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+}
+
+static inline void
+vn_decode_VkPrivateDataSlotCreateInfo_temp(struct vn_cs_decoder *dec, VkPrivateDataSlotCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkPrivateDataSlotCreateInfo_pnext_temp(dec);
+ vn_decode_VkPrivateDataSlotCreateInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkPrivateDataSlotCreateInfo_handle_self(VkPrivateDataSlotCreateInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+}
+
+static inline void
+vn_replace_VkPrivateDataSlotCreateInfo_handle(VkPrivateDataSlotCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO:
+ vn_replace_VkPrivateDataSlotCreateInfo_handle_self((VkPrivateDataSlotCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+static inline void vn_decode_vkCreatePrivateDataSlot_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCreatePrivateDataSlot *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pCreateInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pCreateInfo));
+ if (!args->pCreateInfo) return;
+ vn_decode_VkPrivateDataSlotCreateInfo_temp(dec, (VkPrivateDataSlotCreateInfo *)args->pCreateInfo);
+ } else {
+ args->pCreateInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ vn_cs_decoder_set_fatal(dec);
+ } else {
+ args->pAllocator = NULL;
+ }
+ if (vn_decode_simple_pointer(dec)) {
+ args->pPrivateDataSlot = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pPrivateDataSlot));
+ if (!args->pPrivateDataSlot) return;
+ vn_decode_VkPrivateDataSlot(dec, args->pPrivateDataSlot);
+ } else {
+ args->pPrivateDataSlot = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkCreatePrivateDataSlot_args_handle(struct vn_command_vkCreatePrivateDataSlot *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ if (args->pCreateInfo)
+ vn_replace_VkPrivateDataSlotCreateInfo_handle((VkPrivateDataSlotCreateInfo *)args->pCreateInfo);
+ /* skip args->pAllocator */
+ /* skip args->pPrivateDataSlot */
+}
+
+static inline void vn_encode_vkCreatePrivateDataSlot_reply(struct vn_cs_encoder *enc, const struct vn_command_vkCreatePrivateDataSlot *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkCreatePrivateDataSlot_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->device */
+ /* skip args->pCreateInfo */
+ /* skip args->pAllocator */
+ if (vn_encode_simple_pointer(enc, args->pPrivateDataSlot))
+ vn_encode_VkPrivateDataSlot(enc, args->pPrivateDataSlot);
+}
+
+static inline void vn_decode_vkDestroyPrivateDataSlot_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkDestroyPrivateDataSlot *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_VkPrivateDataSlot_lookup(dec, &args->privateDataSlot);
+ if (vn_decode_simple_pointer(dec)) {
+ vn_cs_decoder_set_fatal(dec);
+ } else {
+ args->pAllocator = NULL;
+ }
+}
+
+static inline void vn_replace_vkDestroyPrivateDataSlot_args_handle(struct vn_command_vkDestroyPrivateDataSlot *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ vn_replace_VkPrivateDataSlot_handle(&args->privateDataSlot);
+ /* skip args->pAllocator */
+}
+
+static inline void vn_encode_vkDestroyPrivateDataSlot_reply(struct vn_cs_encoder *enc, const struct vn_command_vkDestroyPrivateDataSlot *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkDestroyPrivateDataSlot_EXT});
+
+ /* skip args->device */
+ /* skip args->privateDataSlot */
+ /* skip args->pAllocator */
+}
+
+static inline void vn_decode_vkSetPrivateData_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkSetPrivateData *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_VkObjectType(dec, &args->objectType);
+ vn_decode_uint64_t(dec, &args->objectHandle);
+ vn_decode_VkPrivateDataSlot_lookup(dec, &args->privateDataSlot);
+ vn_decode_uint64_t(dec, &args->data);
+}
+
+static inline void vn_replace_vkSetPrivateData_args_handle(struct vn_command_vkSetPrivateData *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ /* skip args->objectType */
+ /* skip args->objectHandle */
+ vn_replace_VkPrivateDataSlot_handle(&args->privateDataSlot);
+ /* skip args->data */
+}
+
+static inline void vn_encode_vkSetPrivateData_reply(struct vn_cs_encoder *enc, const struct vn_command_vkSetPrivateData *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkSetPrivateData_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->device */
+ /* skip args->objectType */
+ /* skip args->objectHandle */
+ /* skip args->privateDataSlot */
+ /* skip args->data */
+}
+
+static inline void vn_decode_vkGetPrivateData_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetPrivateData *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_VkObjectType(dec, &args->objectType);
+ vn_decode_uint64_t(dec, &args->objectHandle);
+ vn_decode_VkPrivateDataSlot_lookup(dec, &args->privateDataSlot);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pData = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pData));
+ if (!args->pData) return;
+ } else {
+ args->pData = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkGetPrivateData_args_handle(struct vn_command_vkGetPrivateData *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ /* skip args->objectType */
+ /* skip args->objectHandle */
+ vn_replace_VkPrivateDataSlot_handle(&args->privateDataSlot);
+ /* skip args->pData */
+}
+
+static inline void vn_encode_vkGetPrivateData_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetPrivateData *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetPrivateData_EXT});
+
+ /* skip args->device */
+ /* skip args->objectType */
+ /* skip args->objectHandle */
+ /* skip args->privateDataSlot */
+ if (vn_encode_simple_pointer(enc, args->pData))
+ vn_encode_uint64_t(enc, args->pData);
+}
+
+static inline void vn_dispatch_vkCreatePrivateDataSlot(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkCreatePrivateDataSlot args;
+
+ if (!ctx->dispatch_vkCreatePrivateDataSlot) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkCreatePrivateDataSlot_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkCreatePrivateDataSlot(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkCreatePrivateDataSlot returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkCreatePrivateDataSlot_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkDestroyPrivateDataSlot(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkDestroyPrivateDataSlot args;
+
+ if (!ctx->dispatch_vkDestroyPrivateDataSlot) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkDestroyPrivateDataSlot_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkDestroyPrivateDataSlot(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkDestroyPrivateDataSlot_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkSetPrivateData(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkSetPrivateData args;
+
+ if (!ctx->dispatch_vkSetPrivateData) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkSetPrivateData_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkSetPrivateData(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkSetPrivateData returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkSetPrivateData_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkGetPrivateData(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkGetPrivateData args;
+
+ if (!ctx->dispatch_vkGetPrivateData) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkGetPrivateData_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkGetPrivateData(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkGetPrivateData_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+#pragma GCC diagnostic pop
+
+#endif /* VN_PROTOCOL_RENDERER_PRIVATE_DATA_SLOT_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_queue.h b/src/venus/venus-protocol/vn_protocol_renderer_queue.h
index 8b22d7c4..041677fa 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_queue.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_queue.h
@@ -764,6 +764,233 @@ vn_replace_VkBindSparseInfo_handle(VkBindSparseInfo *val)
} while (pnext);
}
+/* struct VkSemaphoreSubmitInfo chain */
+
+static inline void *
+vn_decode_VkSemaphoreSubmitInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkSemaphoreSubmitInfo_self_temp(struct vn_cs_decoder *dec, VkSemaphoreSubmitInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkSemaphore_lookup(dec, &val->semaphore);
+ vn_decode_uint64_t(dec, &val->value);
+ vn_decode_VkFlags64(dec, &val->stageMask);
+ vn_decode_uint32_t(dec, &val->deviceIndex);
+}
+
+static inline void
+vn_decode_VkSemaphoreSubmitInfo_temp(struct vn_cs_decoder *dec, VkSemaphoreSubmitInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkSemaphoreSubmitInfo_pnext_temp(dec);
+ vn_decode_VkSemaphoreSubmitInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkSemaphoreSubmitInfo_handle_self(VkSemaphoreSubmitInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkSemaphore_handle(&val->semaphore);
+ /* skip val->value */
+ /* skip val->stageMask */
+ /* skip val->deviceIndex */
+}
+
+static inline void
+vn_replace_VkSemaphoreSubmitInfo_handle(VkSemaphoreSubmitInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO:
+ vn_replace_VkSemaphoreSubmitInfo_handle_self((VkSemaphoreSubmitInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkCommandBufferSubmitInfo chain */
+
+static inline void *
+vn_decode_VkCommandBufferSubmitInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkCommandBufferSubmitInfo_self_temp(struct vn_cs_decoder *dec, VkCommandBufferSubmitInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkCommandBuffer_lookup(dec, &val->commandBuffer);
+ vn_decode_uint32_t(dec, &val->deviceMask);
+}
+
+static inline void
+vn_decode_VkCommandBufferSubmitInfo_temp(struct vn_cs_decoder *dec, VkCommandBufferSubmitInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkCommandBufferSubmitInfo_pnext_temp(dec);
+ vn_decode_VkCommandBufferSubmitInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkCommandBufferSubmitInfo_handle_self(VkCommandBufferSubmitInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkCommandBuffer_handle(&val->commandBuffer);
+ /* skip val->deviceMask */
+}
+
+static inline void
+vn_replace_VkCommandBufferSubmitInfo_handle(VkCommandBufferSubmitInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO:
+ vn_replace_VkCommandBufferSubmitInfo_handle_self((VkCommandBufferSubmitInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkSubmitInfo2 chain */
+
+static inline void *
+vn_decode_VkSubmitInfo2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkSubmitInfo2_self_temp(struct vn_cs_decoder *dec, VkSubmitInfo2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_uint32_t(dec, &val->waitSemaphoreInfoCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->waitSemaphoreInfoCount);
+ val->pWaitSemaphoreInfos = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pWaitSemaphoreInfos) * iter_count);
+ if (!val->pWaitSemaphoreInfos) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkSemaphoreSubmitInfo_temp(dec, &((VkSemaphoreSubmitInfo *)val->pWaitSemaphoreInfos)[i]);
+ } else {
+ vn_decode_array_size(dec, val->waitSemaphoreInfoCount);
+ val->pWaitSemaphoreInfos = NULL;
+ }
+ vn_decode_uint32_t(dec, &val->commandBufferInfoCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->commandBufferInfoCount);
+ val->pCommandBufferInfos = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pCommandBufferInfos) * iter_count);
+ if (!val->pCommandBufferInfos) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkCommandBufferSubmitInfo_temp(dec, &((VkCommandBufferSubmitInfo *)val->pCommandBufferInfos)[i]);
+ } else {
+ vn_decode_array_size(dec, val->commandBufferInfoCount);
+ val->pCommandBufferInfos = NULL;
+ }
+ vn_decode_uint32_t(dec, &val->signalSemaphoreInfoCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->signalSemaphoreInfoCount);
+ val->pSignalSemaphoreInfos = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pSignalSemaphoreInfos) * iter_count);
+ if (!val->pSignalSemaphoreInfos) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkSemaphoreSubmitInfo_temp(dec, &((VkSemaphoreSubmitInfo *)val->pSignalSemaphoreInfos)[i]);
+ } else {
+ vn_decode_array_size(dec, val->signalSemaphoreInfoCount);
+ val->pSignalSemaphoreInfos = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkSubmitInfo2_temp(struct vn_cs_decoder *dec, VkSubmitInfo2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_SUBMIT_INFO_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkSubmitInfo2_pnext_temp(dec);
+ vn_decode_VkSubmitInfo2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkSubmitInfo2_handle_self(VkSubmitInfo2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+ /* skip val->waitSemaphoreInfoCount */
+ if (val->pWaitSemaphoreInfos) {
+ for (uint32_t i = 0; i < val->waitSemaphoreInfoCount; i++)
+ vn_replace_VkSemaphoreSubmitInfo_handle(&((VkSemaphoreSubmitInfo *)val->pWaitSemaphoreInfos)[i]);
+ }
+ /* skip val->commandBufferInfoCount */
+ if (val->pCommandBufferInfos) {
+ for (uint32_t i = 0; i < val->commandBufferInfoCount; i++)
+ vn_replace_VkCommandBufferSubmitInfo_handle(&((VkCommandBufferSubmitInfo *)val->pCommandBufferInfos)[i]);
+ }
+ /* skip val->signalSemaphoreInfoCount */
+ if (val->pSignalSemaphoreInfos) {
+ for (uint32_t i = 0; i < val->signalSemaphoreInfoCount; i++)
+ vn_replace_VkSemaphoreSubmitInfo_handle(&((VkSemaphoreSubmitInfo *)val->pSignalSemaphoreInfos)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkSubmitInfo2_handle(VkSubmitInfo2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_SUBMIT_INFO_2:
+ vn_replace_VkSubmitInfo2_handle_self((VkSubmitInfo2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
static inline void vn_decode_vkQueueSubmit_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkQueueSubmit *args)
{
vn_decode_VkQueue_lookup(dec, &args->queue);
@@ -860,6 +1087,45 @@ static inline void vn_encode_vkQueueBindSparse_reply(struct vn_cs_encoder *enc,
/* skip args->fence */
}
+static inline void vn_decode_vkQueueSubmit2_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkQueueSubmit2 *args)
+{
+ vn_decode_VkQueue_lookup(dec, &args->queue);
+ vn_decode_uint32_t(dec, &args->submitCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, args->submitCount);
+ args->pSubmits = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pSubmits) * iter_count);
+ if (!args->pSubmits) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkSubmitInfo2_temp(dec, &((VkSubmitInfo2 *)args->pSubmits)[i]);
+ } else {
+ vn_decode_array_size(dec, args->submitCount);
+ args->pSubmits = NULL;
+ }
+ vn_decode_VkFence_lookup(dec, &args->fence);
+}
+
+static inline void vn_replace_vkQueueSubmit2_args_handle(struct vn_command_vkQueueSubmit2 *args)
+{
+ vn_replace_VkQueue_handle(&args->queue);
+ /* skip args->submitCount */
+ if (args->pSubmits) {
+ for (uint32_t i = 0; i < args->submitCount; i++)
+ vn_replace_VkSubmitInfo2_handle(&((VkSubmitInfo2 *)args->pSubmits)[i]);
+ }
+ vn_replace_VkFence_handle(&args->fence);
+}
+
+static inline void vn_encode_vkQueueSubmit2_reply(struct vn_cs_encoder *enc, const struct vn_command_vkQueueSubmit2 *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkQueueSubmit2_EXT});
+
+ vn_encode_VkResult(enc, &args->ret);
+ /* skip args->queue */
+ /* skip args->submitCount */
+ /* skip args->pSubmits */
+ /* skip args->fence */
+}
+
static inline void vn_dispatch_vkQueueSubmit(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkQueueSubmit args;
@@ -947,6 +1213,35 @@ static inline void vn_dispatch_vkQueueBindSparse(struct vn_dispatch_context *ctx
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkQueueSubmit2(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkQueueSubmit2 args;
+
+ if (!ctx->dispatch_vkQueueSubmit2) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkQueueSubmit2_args_temp(ctx->decoder, &args);
+ if (!args.queue) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkQueueSubmit2(ctx, &args);
+
+#ifdef DEBUG
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
+ vn_dispatch_debug_log(ctx, "vkQueueSubmit2 returned %d", args.ret);
+#endif
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkQueueSubmit2_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_QUEUE_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_render_pass.h b/src/venus/venus-protocol/vn_protocol_renderer_render_pass.h
index cb240f2f..73dd3e53 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_render_pass.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_render_pass.h
@@ -1008,10 +1008,30 @@ vn_replace_VkSubpassDescription2_handle(VkSubpassDescription2 *val)
static inline void *
vn_decode_VkSubpassDependency2_pnext_temp(struct vn_cs_decoder *dec)
{
- /* no known/supported struct */
- if (vn_decode_simple_pointer(dec))
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_MEMORY_BARRIER_2:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkMemoryBarrier2));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkSubpassDependency2_pnext_temp(dec);
+ vn_decode_VkMemoryBarrier2_self_temp(dec, (VkMemoryBarrier2 *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
vn_cs_decoder_set_fatal(dec);
- return NULL;
+ break;
+ }
+
+ return pnext;
}
static inline void
@@ -1066,6 +1086,9 @@ vn_replace_VkSubpassDependency2_handle(VkSubpassDependency2 *val)
case VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2:
vn_replace_VkSubpassDependency2_handle_self((VkSubpassDependency2 *)pnext);
break;
+ case VK_STRUCTURE_TYPE_MEMORY_BARRIER_2:
+ vn_replace_VkMemoryBarrier2_handle_self((VkMemoryBarrier2 *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_sampler.h b/src/venus/venus-protocol/vn_protocol_renderer_sampler.h
index 1849c74d..c619f77d 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_sampler.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_sampler.h
@@ -71,6 +71,65 @@ vn_replace_VkSamplerReductionModeCreateInfo_handle(VkSamplerReductionModeCreateI
} while (pnext);
}
+/* struct VkSamplerCustomBorderColorCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkSamplerCustomBorderColorCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkSamplerCustomBorderColorCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkSamplerCustomBorderColorCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkClearColorValue_temp(dec, &val->customBorderColor);
+ vn_decode_VkFormat(dec, &val->format);
+}
+
+static inline void
+vn_decode_VkSamplerCustomBorderColorCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkSamplerCustomBorderColorCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkSamplerCustomBorderColorCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkSamplerCustomBorderColorCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkSamplerCustomBorderColorCreateInfoEXT_handle_self(VkSamplerCustomBorderColorCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->customBorderColor */
+ /* skip val->format */
+}
+
+static inline void
+vn_replace_VkSamplerCustomBorderColorCreateInfoEXT_handle(VkSamplerCustomBorderColorCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+ vn_replace_VkSamplerCustomBorderColorCreateInfoEXT_handle_self((VkSamplerCustomBorderColorCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkSamplerCreateInfo chain */
static inline void *
@@ -100,6 +159,14 @@ vn_decode_VkSamplerCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
vn_decode_VkSamplerReductionModeCreateInfo_self_temp(dec, (VkSamplerReductionModeCreateInfo *)pnext);
}
break;
+ case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkSamplerCustomBorderColorCreateInfoEXT));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkSamplerCreateInfo_pnext_temp(dec);
+ vn_decode_VkSamplerCustomBorderColorCreateInfoEXT_self_temp(dec, (VkSamplerCustomBorderColorCreateInfoEXT *)pnext);
+ }
+ break;
default:
/* unexpected struct */
pnext = NULL;
@@ -184,6 +251,9 @@ vn_replace_VkSamplerCreateInfo_handle(VkSamplerCreateInfo *val)
case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
vn_replace_VkSamplerReductionModeCreateInfo_handle_self((VkSamplerReductionModeCreateInfo *)pnext);
break;
+ case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+ vn_replace_VkSamplerCustomBorderColorCreateInfoEXT_handle_self((VkSamplerCustomBorderColorCreateInfoEXT *)pnext);
+ break;
default:
/* ignore unknown/unsupported struct */
break;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_semaphore.h b/src/venus/venus-protocol/vn_protocol_renderer_semaphore.h
index c9532a56..bded7e62 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_semaphore.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_semaphore.h
@@ -14,6 +14,13 @@
#pragma GCC diagnostic ignored "-Wpointer-arith"
#pragma GCC diagnostic ignored "-Wunused-parameter"
+/*
+ * These structs/unions/commands are not included
+ *
+ * vkGetSemaphoreFdKHR
+ * vkImportSemaphoreFdKHR
+ */
+
/* struct VkExportSemaphoreCreateInfo chain */
static inline void *
@@ -247,32 +254,67 @@ vn_replace_VkSemaphoreWaitInfo_handle(VkSemaphoreWaitInfo *val)
/* struct VkSemaphoreSignalInfo chain */
-static inline void
-vn_encode_VkSemaphoreSignalInfo_pnext(struct vn_cs_encoder *enc, const void *val)
+static inline void *
+vn_decode_VkSemaphoreSignalInfo_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
}
static inline void
-vn_encode_VkSemaphoreSignalInfo_self(struct vn_cs_encoder *enc, const VkSemaphoreSignalInfo *val)
+vn_decode_VkSemaphoreSignalInfo_self_temp(struct vn_cs_decoder *dec, VkSemaphoreSignalInfo *val)
{
/* skip val->{sType,pNext} */
- vn_encode_VkSemaphore(enc, &val->semaphore);
- vn_encode_uint64_t(enc, &val->value);
+ vn_decode_VkSemaphore_lookup(dec, &val->semaphore);
+ vn_decode_uint64_t(dec, &val->value);
+}
+
+static inline void
+vn_decode_VkSemaphoreSignalInfo_temp(struct vn_cs_decoder *dec, VkSemaphoreSignalInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkSemaphoreSignalInfo_pnext_temp(dec);
+ vn_decode_VkSemaphoreSignalInfo_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkSemaphoreSignalInfo_handle_self(VkSemaphoreSignalInfo *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkSemaphore_handle(&val->semaphore);
+ /* skip val->value */
}
static inline void
-vn_encode_VkSemaphoreSignalInfo(struct vn_cs_encoder *enc, const VkSemaphoreSignalInfo *val)
+vn_replace_VkSemaphoreSignalInfo_handle(VkSemaphoreSignalInfo *val)
{
- assert(val->sType == VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO });
- vn_encode_VkSemaphoreSignalInfo_pnext(enc, val->pNext);
- vn_encode_VkSemaphoreSignalInfo_self(enc, val);
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO:
+ vn_replace_VkSemaphoreSignalInfo_handle_self((VkSemaphoreSignalInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
}
+/* struct VkImportSemaphoreResourceInfo100000MESA chain */
+
static inline void *
-vn_decode_VkSemaphoreSignalInfo_pnext_temp(struct vn_cs_decoder *dec)
+vn_decode_VkImportSemaphoreResourceInfo100000MESA_pnext_temp(struct vn_cs_decoder *dec)
{
/* no known/supported struct */
if (vn_decode_simple_pointer(dec))
@@ -281,44 +323,44 @@ vn_decode_VkSemaphoreSignalInfo_pnext_temp(struct vn_cs_decoder *dec)
}
static inline void
-vn_decode_VkSemaphoreSignalInfo_self_temp(struct vn_cs_decoder *dec, VkSemaphoreSignalInfo *val)
+vn_decode_VkImportSemaphoreResourceInfo100000MESA_self_temp(struct vn_cs_decoder *dec, VkImportSemaphoreResourceInfo100000MESA *val)
{
/* skip val->{sType,pNext} */
vn_decode_VkSemaphore_lookup(dec, &val->semaphore);
- vn_decode_uint64_t(dec, &val->value);
+ vn_decode_uint32_t(dec, &val->resourceId);
}
static inline void
-vn_decode_VkSemaphoreSignalInfo_temp(struct vn_cs_decoder *dec, VkSemaphoreSignalInfo *val)
+vn_decode_VkImportSemaphoreResourceInfo100000MESA_temp(struct vn_cs_decoder *dec, VkImportSemaphoreResourceInfo100000MESA *val)
{
VkStructureType stype;
vn_decode_VkStructureType(dec, &stype);
- if (stype != VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO)
+ if (stype != VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_RESOURCE_INFO_100000_MESA)
vn_cs_decoder_set_fatal(dec);
val->sType = stype;
- val->pNext = vn_decode_VkSemaphoreSignalInfo_pnext_temp(dec);
- vn_decode_VkSemaphoreSignalInfo_self_temp(dec, val);
+ val->pNext = vn_decode_VkImportSemaphoreResourceInfo100000MESA_pnext_temp(dec);
+ vn_decode_VkImportSemaphoreResourceInfo100000MESA_self_temp(dec, val);
}
static inline void
-vn_replace_VkSemaphoreSignalInfo_handle_self(VkSemaphoreSignalInfo *val)
+vn_replace_VkImportSemaphoreResourceInfo100000MESA_handle_self(VkImportSemaphoreResourceInfo100000MESA *val)
{
/* skip val->sType */
/* skip val->pNext */
vn_replace_VkSemaphore_handle(&val->semaphore);
- /* skip val->value */
+ /* skip val->resourceId */
}
static inline void
-vn_replace_VkSemaphoreSignalInfo_handle(VkSemaphoreSignalInfo *val)
+vn_replace_VkImportSemaphoreResourceInfo100000MESA_handle(VkImportSemaphoreResourceInfo100000MESA *val)
{
struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
do {
switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO:
- vn_replace_VkSemaphoreSignalInfo_handle_self((VkSemaphoreSignalInfo *)pnext);
+ case VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_RESOURCE_INFO_100000_MESA:
+ vn_replace_VkImportSemaphoreResourceInfo100000MESA_handle_self((VkImportSemaphoreResourceInfo100000MESA *)pnext);
break;
default:
/* ignore unknown/unsupported struct */
@@ -494,6 +536,54 @@ static inline void vn_encode_vkSignalSemaphore_reply(struct vn_cs_encoder *enc,
/* skip args->pSignalInfo */
}
+static inline void vn_decode_vkWaitSemaphoreResource100000MESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkWaitSemaphoreResource100000MESA *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ vn_decode_VkSemaphore_lookup(dec, &args->semaphore);
+}
+
+static inline void vn_replace_vkWaitSemaphoreResource100000MESA_args_handle(struct vn_command_vkWaitSemaphoreResource100000MESA *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ vn_replace_VkSemaphore_handle(&args->semaphore);
+}
+
+static inline void vn_encode_vkWaitSemaphoreResource100000MESA_reply(struct vn_cs_encoder *enc, const struct vn_command_vkWaitSemaphoreResource100000MESA *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkWaitSemaphoreResource100000MESA_EXT});
+
+ /* skip args->device */
+ /* skip args->semaphore */
+}
+
+static inline void vn_decode_vkImportSemaphoreResource100000MESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkImportSemaphoreResource100000MESA *args)
+{
+ vn_decode_VkDevice_lookup(dec, &args->device);
+ if (vn_decode_simple_pointer(dec)) {
+ args->pImportSemaphoreResourceInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pImportSemaphoreResourceInfo));
+ if (!args->pImportSemaphoreResourceInfo) return;
+ vn_decode_VkImportSemaphoreResourceInfo100000MESA_temp(dec, (VkImportSemaphoreResourceInfo100000MESA *)args->pImportSemaphoreResourceInfo);
+ } else {
+ args->pImportSemaphoreResourceInfo = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ }
+}
+
+static inline void vn_replace_vkImportSemaphoreResource100000MESA_args_handle(struct vn_command_vkImportSemaphoreResource100000MESA *args)
+{
+ vn_replace_VkDevice_handle(&args->device);
+ if (args->pImportSemaphoreResourceInfo)
+ vn_replace_VkImportSemaphoreResourceInfo100000MESA_handle((VkImportSemaphoreResourceInfo100000MESA *)args->pImportSemaphoreResourceInfo);
+}
+
+static inline void vn_encode_vkImportSemaphoreResource100000MESA_reply(struct vn_cs_encoder *enc, const struct vn_command_vkImportSemaphoreResource100000MESA *args)
+{
+ vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkImportSemaphoreResource100000MESA_EXT});
+
+ /* skip args->device */
+ /* skip args->pImportSemaphoreResourceInfo */
+}
+
static inline void vn_dispatch_vkCreateSemaphore(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkCreateSemaphore args;
@@ -635,6 +725,56 @@ static inline void vn_dispatch_vkSignalSemaphore(struct vn_dispatch_context *ctx
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
+static inline void vn_dispatch_vkWaitSemaphoreResource100000MESA(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkWaitSemaphoreResource100000MESA args;
+
+ if (!ctx->dispatch_vkWaitSemaphoreResource100000MESA) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkWaitSemaphoreResource100000MESA_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkWaitSemaphoreResource100000MESA(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkWaitSemaphoreResource100000MESA_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
+static inline void vn_dispatch_vkImportSemaphoreResource100000MESA(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
+{
+ struct vn_command_vkImportSemaphoreResource100000MESA args;
+
+ if (!ctx->dispatch_vkImportSemaphoreResource100000MESA) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ vn_decode_vkImportSemaphoreResource100000MESA_args_temp(ctx->decoder, &args);
+ if (!args.device) {
+ vn_cs_decoder_set_fatal(ctx->decoder);
+ return;
+ }
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder))
+ ctx->dispatch_vkImportSemaphoreResource100000MESA(ctx, &args);
+
+
+ if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
+ vn_encode_vkImportSemaphoreResource100000MESA_reply(ctx->encoder, &args);
+
+ vn_cs_decoder_reset_temp_pool(ctx->decoder);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_SEMAPHORE_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_shader_module.h b/src/venus/venus-protocol/vn_protocol_renderer_shader_module.h
index e83a2754..81006b64 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_shader_module.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_shader_module.h
@@ -14,75 +14,6 @@
#pragma GCC diagnostic ignored "-Wpointer-arith"
#pragma GCC diagnostic ignored "-Wunused-parameter"
-/* struct VkShaderModuleCreateInfo chain */
-
-static inline void *
-vn_decode_VkShaderModuleCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
-{
- /* no known/supported struct */
- if (vn_decode_simple_pointer(dec))
- vn_cs_decoder_set_fatal(dec);
- return NULL;
-}
-
-static inline void
-vn_decode_VkShaderModuleCreateInfo_self_temp(struct vn_cs_decoder *dec, VkShaderModuleCreateInfo *val)
-{
- /* skip val->{sType,pNext} */
- vn_decode_VkFlags(dec, &val->flags);
- vn_decode_size_t(dec, &val->codeSize);
- if (vn_peek_array_size(dec)) {
- const size_t array_size = vn_decode_array_size(dec, val->codeSize / 4);
- val->pCode = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pCode) * array_size);
- if (!val->pCode) return;
- vn_decode_uint32_t_array(dec, (uint32_t *)val->pCode, array_size);
- } else {
- vn_decode_array_size(dec, val->codeSize / 4);
- val->pCode = NULL;
- }
-}
-
-static inline void
-vn_decode_VkShaderModuleCreateInfo_temp(struct vn_cs_decoder *dec, VkShaderModuleCreateInfo *val)
-{
- VkStructureType stype;
- vn_decode_VkStructureType(dec, &stype);
- if (stype != VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO)
- vn_cs_decoder_set_fatal(dec);
-
- val->sType = stype;
- val->pNext = vn_decode_VkShaderModuleCreateInfo_pnext_temp(dec);
- vn_decode_VkShaderModuleCreateInfo_self_temp(dec, val);
-}
-
-static inline void
-vn_replace_VkShaderModuleCreateInfo_handle_self(VkShaderModuleCreateInfo *val)
-{
- /* skip val->sType */
- /* skip val->pNext */
- /* skip val->flags */
- /* skip val->codeSize */
- /* skip val->pCode */
-}
-
-static inline void
-vn_replace_VkShaderModuleCreateInfo_handle(VkShaderModuleCreateInfo *val)
-{
- struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
-
- do {
- switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO:
- vn_replace_VkShaderModuleCreateInfo_handle_self((VkShaderModuleCreateInfo *)pnext);
- break;
- default:
- /* ignore unknown/unsupported struct */
- break;
- }
- pnext = pnext->pNext;
- } while (pnext);
-}
-
static inline void vn_decode_vkCreateShaderModule_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkCreateShaderModule *args)
{
vn_decode_VkDevice_lookup(dec, &args->device);
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_structs.h b/src/venus/venus-protocol/vn_protocol_renderer_structs.h
index 6c353253..21d22c2f 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_structs.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_structs.h
@@ -482,19 +482,77 @@ vn_replace_VkSamplerYcbcrConversionInfo_handle(VkSamplerYcbcrConversionInfo *val
} while (pnext);
}
-/* struct VkViewport */
+/* struct VkShaderModuleCreateInfo chain */
+
+static inline void *
+vn_decode_VkShaderModuleCreateInfo_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkShaderModuleCreateInfo_self_temp(struct vn_cs_decoder *dec, VkShaderModuleCreateInfo *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags(dec, &val->flags);
+ vn_decode_size_t(dec, &val->codeSize);
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, val->codeSize / 4);
+ val->pCode = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pCode) * array_size);
+ if (!val->pCode) return;
+ vn_decode_uint32_t_array(dec, (uint32_t *)val->pCode, array_size);
+ } else {
+ vn_decode_array_size(dec, val->codeSize / 4);
+ val->pCode = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkShaderModuleCreateInfo_temp(struct vn_cs_decoder *dec, VkShaderModuleCreateInfo *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkShaderModuleCreateInfo_pnext_temp(dec);
+ vn_decode_VkShaderModuleCreateInfo_self_temp(dec, val);
+}
static inline void
-vn_encode_VkViewport(struct vn_cs_encoder *enc, const VkViewport *val)
+vn_replace_VkShaderModuleCreateInfo_handle_self(VkShaderModuleCreateInfo *val)
{
- vn_encode_float(enc, &val->x);
- vn_encode_float(enc, &val->y);
- vn_encode_float(enc, &val->width);
- vn_encode_float(enc, &val->height);
- vn_encode_float(enc, &val->minDepth);
- vn_encode_float(enc, &val->maxDepth);
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->flags */
+ /* skip val->codeSize */
+ /* skip val->pCode */
+}
+
+static inline void
+vn_replace_VkShaderModuleCreateInfo_handle(VkShaderModuleCreateInfo *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO:
+ vn_replace_VkShaderModuleCreateInfo_handle_self((VkShaderModuleCreateInfo *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
}
+/* struct VkViewport */
+
static inline void
vn_decode_VkViewport_temp(struct vn_cs_decoder *dec, VkViewport *val)
{
@@ -607,6 +665,366 @@ vn_replace_VkRect2D_handle(VkRect2D *val)
vn_replace_VkExtent2D_handle(&val->extent);
}
+/* union VkClearColorValue */
+
+static inline void
+vn_decode_VkClearColorValue_temp(struct vn_cs_decoder *dec, VkClearColorValue *val)
+{
+ uint32_t tag;
+ vn_decode_uint32_t(dec, &tag);
+ switch (tag) {
+ case 0:
+ {
+ const size_t array_size = vn_decode_array_size(dec, 4);
+ vn_decode_float_array(dec, val->float32, array_size);
+ }
+ break;
+ case 1:
+ {
+ const size_t array_size = vn_decode_array_size(dec, 4);
+ vn_decode_int32_t_array(dec, val->int32, array_size);
+ }
+ break;
+ case 2:
+ {
+ const size_t array_size = vn_decode_array_size(dec, 4);
+ vn_decode_uint32_t_array(dec, val->uint32, array_size);
+ }
+ break;
+ default:
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+}
+
+/* struct VkMutableDescriptorTypeListEXT */
+
+static inline void
+vn_decode_VkMutableDescriptorTypeListEXT_temp(struct vn_cs_decoder *dec, VkMutableDescriptorTypeListEXT *val)
+{
+ vn_decode_uint32_t(dec, &val->descriptorTypeCount);
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, val->descriptorTypeCount);
+ val->pDescriptorTypes = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pDescriptorTypes) * array_size);
+ if (!val->pDescriptorTypes) return;
+ vn_decode_VkDescriptorType_array(dec, (VkDescriptorType *)val->pDescriptorTypes, array_size);
+ } else {
+ vn_decode_array_size(dec, val->descriptorTypeCount);
+ val->pDescriptorTypes = NULL;
+ }
+}
+
+static inline void
+vn_replace_VkMutableDescriptorTypeListEXT_handle(VkMutableDescriptorTypeListEXT *val)
+{
+ /* skip val->descriptorTypeCount */
+ /* skip val->pDescriptorTypes */
+}
+
+/* struct VkMutableDescriptorTypeCreateInfoEXT chain */
+
+static inline void *
+vn_decode_VkMutableDescriptorTypeCreateInfoEXT_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkMutableDescriptorTypeCreateInfoEXT_self_temp(struct vn_cs_decoder *dec, VkMutableDescriptorTypeCreateInfoEXT *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->mutableDescriptorTypeListCount);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->mutableDescriptorTypeListCount);
+ val->pMutableDescriptorTypeLists = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pMutableDescriptorTypeLists) * iter_count);
+ if (!val->pMutableDescriptorTypeLists) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkMutableDescriptorTypeListEXT_temp(dec, &((VkMutableDescriptorTypeListEXT *)val->pMutableDescriptorTypeLists)[i]);
+ } else {
+ vn_decode_array_size(dec, val->mutableDescriptorTypeListCount);
+ val->pMutableDescriptorTypeLists = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkMutableDescriptorTypeCreateInfoEXT_temp(struct vn_cs_decoder *dec, VkMutableDescriptorTypeCreateInfoEXT *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkMutableDescriptorTypeCreateInfoEXT_pnext_temp(dec);
+ vn_decode_VkMutableDescriptorTypeCreateInfoEXT_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkMutableDescriptorTypeCreateInfoEXT_handle_self(VkMutableDescriptorTypeCreateInfoEXT *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->mutableDescriptorTypeListCount */
+ if (val->pMutableDescriptorTypeLists) {
+ for (uint32_t i = 0; i < val->mutableDescriptorTypeListCount; i++)
+ vn_replace_VkMutableDescriptorTypeListEXT_handle(&((VkMutableDescriptorTypeListEXT *)val->pMutableDescriptorTypeLists)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkMutableDescriptorTypeCreateInfoEXT_handle(VkMutableDescriptorTypeCreateInfoEXT *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT:
+ vn_replace_VkMutableDescriptorTypeCreateInfoEXT_handle_self((VkMutableDescriptorTypeCreateInfoEXT *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkDescriptorImageInfo */
+
+static inline void
+vn_decode_VkDescriptorImageInfo_temp(struct vn_cs_decoder *dec, VkDescriptorImageInfo *val)
+{
+ vn_decode_VkSampler_lookup(dec, &val->sampler);
+ vn_decode_VkImageView_lookup(dec, &val->imageView);
+ vn_decode_VkImageLayout(dec, &val->imageLayout);
+}
+
+static inline void
+vn_replace_VkDescriptorImageInfo_handle(VkDescriptorImageInfo *val)
+{
+ vn_replace_VkSampler_handle(&val->sampler);
+ vn_replace_VkImageView_handle(&val->imageView);
+ /* skip val->imageLayout */
+}
+
+/* struct VkDescriptorBufferInfo */
+
+static inline void
+vn_decode_VkDescriptorBufferInfo_temp(struct vn_cs_decoder *dec, VkDescriptorBufferInfo *val)
+{
+ vn_decode_VkBuffer_lookup(dec, &val->buffer);
+ vn_decode_VkDeviceSize(dec, &val->offset);
+ vn_decode_VkDeviceSize(dec, &val->range);
+}
+
+static inline void
+vn_replace_VkDescriptorBufferInfo_handle(VkDescriptorBufferInfo *val)
+{
+ vn_replace_VkBuffer_handle(&val->buffer);
+ /* skip val->offset */
+ /* skip val->range */
+}
+
+/* struct VkWriteDescriptorSetInlineUniformBlock chain */
+
+static inline void *
+vn_decode_VkWriteDescriptorSetInlineUniformBlock_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkWriteDescriptorSetInlineUniformBlock_self_temp(struct vn_cs_decoder *dec, VkWriteDescriptorSetInlineUniformBlock *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_uint32_t(dec, &val->dataSize);
+ if (vn_peek_array_size(dec)) {
+ const size_t array_size = vn_decode_array_size(dec, val->dataSize);
+ val->pData = vn_cs_decoder_alloc_temp(dec, array_size);
+ if (!val->pData) return;
+ vn_decode_blob_array(dec, (void *)val->pData, array_size);
+ } else {
+ vn_decode_array_size(dec, val->dataSize);
+ val->pData = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkWriteDescriptorSetInlineUniformBlock_temp(struct vn_cs_decoder *dec, VkWriteDescriptorSetInlineUniformBlock *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkWriteDescriptorSetInlineUniformBlock_pnext_temp(dec);
+ vn_decode_VkWriteDescriptorSetInlineUniformBlock_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkWriteDescriptorSetInlineUniformBlock_handle_self(VkWriteDescriptorSetInlineUniformBlock *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->dataSize */
+ /* skip val->pData */
+}
+
+static inline void
+vn_replace_VkWriteDescriptorSetInlineUniformBlock_handle(VkWriteDescriptorSetInlineUniformBlock *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK:
+ vn_replace_VkWriteDescriptorSetInlineUniformBlock_handle_self((VkWriteDescriptorSetInlineUniformBlock *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
+/* struct VkWriteDescriptorSet chain */
+
+static inline void *
+vn_decode_VkWriteDescriptorSet_pnext_temp(struct vn_cs_decoder *dec)
+{
+ VkBaseOutStructure *pnext;
+ VkStructureType stype;
+
+ if (!vn_decode_simple_pointer(dec))
+ return NULL;
+
+ vn_decode_VkStructureType(dec, &stype);
+ switch ((int32_t)stype) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK:
+ pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkWriteDescriptorSetInlineUniformBlock));
+ if (pnext) {
+ pnext->sType = stype;
+ pnext->pNext = vn_decode_VkWriteDescriptorSet_pnext_temp(dec);
+ vn_decode_VkWriteDescriptorSetInlineUniformBlock_self_temp(dec, (VkWriteDescriptorSetInlineUniformBlock *)pnext);
+ }
+ break;
+ default:
+ /* unexpected struct */
+ pnext = NULL;
+ vn_cs_decoder_set_fatal(dec);
+ break;
+ }
+
+ return pnext;
+}
+
+static inline void
+vn_decode_VkWriteDescriptorSet_self_temp(struct vn_cs_decoder *dec, VkWriteDescriptorSet *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkDescriptorSet_lookup(dec, &val->dstSet);
+ vn_decode_uint32_t(dec, &val->dstBinding);
+ vn_decode_uint32_t(dec, &val->dstArrayElement);
+ vn_decode_uint32_t(dec, &val->descriptorCount);
+ vn_decode_VkDescriptorType(dec, &val->descriptorType);
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->descriptorCount);
+ val->pImageInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pImageInfo) * iter_count);
+ if (!val->pImageInfo) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkDescriptorImageInfo_temp(dec, &((VkDescriptorImageInfo *)val->pImageInfo)[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ val->pImageInfo = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->descriptorCount);
+ val->pBufferInfo = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pBufferInfo) * iter_count);
+ if (!val->pBufferInfo) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkDescriptorBufferInfo_temp(dec, &((VkDescriptorBufferInfo *)val->pBufferInfo)[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ val->pBufferInfo = NULL;
+ }
+ if (vn_peek_array_size(dec)) {
+ const uint32_t iter_count = vn_decode_array_size(dec, val->descriptorCount);
+ val->pTexelBufferView = vn_cs_decoder_alloc_temp(dec, sizeof(*val->pTexelBufferView) * iter_count);
+ if (!val->pTexelBufferView) return;
+ for (uint32_t i = 0; i < iter_count; i++)
+ vn_decode_VkBufferView_lookup(dec, &((VkBufferView *)val->pTexelBufferView)[i]);
+ } else {
+ vn_decode_array_size_unchecked(dec);
+ val->pTexelBufferView = NULL;
+ }
+}
+
+static inline void
+vn_decode_VkWriteDescriptorSet_temp(struct vn_cs_decoder *dec, VkWriteDescriptorSet *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkWriteDescriptorSet_pnext_temp(dec);
+ vn_decode_VkWriteDescriptorSet_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkWriteDescriptorSet_handle_self(VkWriteDescriptorSet *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ vn_replace_VkDescriptorSet_handle(&val->dstSet);
+ /* skip val->dstBinding */
+ /* skip val->dstArrayElement */
+ /* skip val->descriptorCount */
+ /* skip val->descriptorType */
+ if (val->pImageInfo) {
+ for (uint32_t i = 0; i < val->descriptorCount; i++)
+ vn_replace_VkDescriptorImageInfo_handle(&((VkDescriptorImageInfo *)val->pImageInfo)[i]);
+ }
+ if (val->pBufferInfo) {
+ for (uint32_t i = 0; i < val->descriptorCount; i++)
+ vn_replace_VkDescriptorBufferInfo_handle(&((VkDescriptorBufferInfo *)val->pBufferInfo)[i]);
+ }
+ if (val->pTexelBufferView) {
+ for (uint32_t i = 0; i < val->descriptorCount; i++)
+ vn_replace_VkBufferView_handle(&((VkBufferView *)val->pTexelBufferView)[i]);
+ }
+}
+
+static inline void
+vn_replace_VkWriteDescriptorSet_handle(VkWriteDescriptorSet *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ vn_replace_VkWriteDescriptorSet_handle_self((VkWriteDescriptorSet *)pnext);
+ break;
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK:
+ vn_replace_VkWriteDescriptorSetInlineUniformBlock_handle_self((VkWriteDescriptorSetInlineUniformBlock *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
/* struct VkMemoryDedicatedRequirements chain */
static inline void
@@ -753,6 +1171,69 @@ vn_decode_VkMemoryRequirements2_partial_temp(struct vn_cs_decoder *dec, VkMemory
vn_decode_VkMemoryRequirements2_self_partial_temp(dec, val);
}
+/* struct VkMemoryBarrier2 chain */
+
+static inline void *
+vn_decode_VkMemoryBarrier2_pnext_temp(struct vn_cs_decoder *dec)
+{
+ /* no known/supported struct */
+ if (vn_decode_simple_pointer(dec))
+ vn_cs_decoder_set_fatal(dec);
+ return NULL;
+}
+
+static inline void
+vn_decode_VkMemoryBarrier2_self_temp(struct vn_cs_decoder *dec, VkMemoryBarrier2 *val)
+{
+ /* skip val->{sType,pNext} */
+ vn_decode_VkFlags64(dec, &val->srcStageMask);
+ vn_decode_VkFlags64(dec, &val->srcAccessMask);
+ vn_decode_VkFlags64(dec, &val->dstStageMask);
+ vn_decode_VkFlags64(dec, &val->dstAccessMask);
+}
+
+static inline void
+vn_decode_VkMemoryBarrier2_temp(struct vn_cs_decoder *dec, VkMemoryBarrier2 *val)
+{
+ VkStructureType stype;
+ vn_decode_VkStructureType(dec, &stype);
+ if (stype != VK_STRUCTURE_TYPE_MEMORY_BARRIER_2)
+ vn_cs_decoder_set_fatal(dec);
+
+ val->sType = stype;
+ val->pNext = vn_decode_VkMemoryBarrier2_pnext_temp(dec);
+ vn_decode_VkMemoryBarrier2_self_temp(dec, val);
+}
+
+static inline void
+vn_replace_VkMemoryBarrier2_handle_self(VkMemoryBarrier2 *val)
+{
+ /* skip val->sType */
+ /* skip val->pNext */
+ /* skip val->srcStageMask */
+ /* skip val->srcAccessMask */
+ /* skip val->dstStageMask */
+ /* skip val->dstAccessMask */
+}
+
+static inline void
+vn_replace_VkMemoryBarrier2_handle(VkMemoryBarrier2 *val)
+{
+ struct VkBaseOutStructure *pnext = (struct VkBaseOutStructure *)val;
+
+ do {
+ switch ((int32_t)pnext->sType) {
+ case VK_STRUCTURE_TYPE_MEMORY_BARRIER_2:
+ vn_replace_VkMemoryBarrier2_handle_self((VkMemoryBarrier2 *)pnext);
+ break;
+ default:
+ /* ignore unknown/unsupported struct */
+ break;
+ }
+ pnext = pnext->pNext;
+ } while (pnext);
+}
+
#pragma GCC diagnostic pop
#endif /* VN_PROTOCOL_RENDERER_STRUCTS_H */
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_transport.h b/src/venus/venus-protocol/vn_protocol_renderer_transport.h
index 8cc01abd..0d9e7d73 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_transport.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_transport.h
@@ -14,13 +14,6 @@
#pragma GCC diagnostic ignored "-Wpointer-arith"
#pragma GCC diagnostic ignored "-Wunused-parameter"
-/*
- * These structs/unions/commands are not included
- *
- * vkGetMemoryFdKHR
- * vkGetMemoryFdPropertiesKHR
- */
-
/* struct VkCommandStreamDescriptionMESA */
static inline void
@@ -134,150 +127,6 @@ vn_replace_VkRingCreateInfoMESA_handle(VkRingCreateInfoMESA *val)
} while (pnext);
}
-/* struct VkMemoryResourceAllocationSizeProperties100000MESA chain */
-
-static inline void
-vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- /* no known/supported struct */
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_self(struct vn_cs_encoder *enc, const VkMemoryResourceAllocationSizeProperties100000MESA *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_uint64_t(enc, &val->allocationSize);
-}
-
-static inline void
-vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA(struct vn_cs_encoder *enc, const VkMemoryResourceAllocationSizeProperties100000MESA *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA });
- vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext(enc, val->pNext);
- vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_self(enc, val);
-}
-
-static inline void *
-vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext_partial_temp(struct vn_cs_decoder *dec)
-{
- /* no known/supported struct */
- if (vn_decode_simple_pointer(dec))
- vn_cs_decoder_set_fatal(dec);
- return NULL;
-}
-
-static inline void
-vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_self_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourceAllocationSizeProperties100000MESA *val)
-{
- /* skip val->{sType,pNext} */
- /* skip val->allocationSize */
-}
-
-static inline void
-vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourceAllocationSizeProperties100000MESA *val)
-{
- VkStructureType stype;
- vn_decode_VkStructureType(dec, &stype);
- if (stype != VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA)
- vn_cs_decoder_set_fatal(dec);
-
- val->sType = stype;
- val->pNext = vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_pnext_partial_temp(dec);
- vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_self_partial_temp(dec, val);
-}
-
-/* struct VkMemoryResourcePropertiesMESA chain */
-
-static inline void
-vn_encode_VkMemoryResourcePropertiesMESA_pnext(struct vn_cs_encoder *enc, const void *val)
-{
- const VkBaseInStructure *pnext = val;
-
- while (pnext) {
- switch ((int32_t)pnext->sType) {
- case VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA:
- vn_encode_simple_pointer(enc, pnext);
- vn_encode_VkStructureType(enc, &pnext->sType);
- vn_encode_VkMemoryResourcePropertiesMESA_pnext(enc, pnext->pNext);
- vn_encode_VkMemoryResourceAllocationSizeProperties100000MESA_self(enc, (const VkMemoryResourceAllocationSizeProperties100000MESA *)pnext);
- return;
- default:
- /* ignore unknown/unsupported struct */
- break;
- }
- pnext = pnext->pNext;
- }
-
- vn_encode_simple_pointer(enc, NULL);
-}
-
-static inline void
-vn_encode_VkMemoryResourcePropertiesMESA_self(struct vn_cs_encoder *enc, const VkMemoryResourcePropertiesMESA *val)
-{
- /* skip val->{sType,pNext} */
- vn_encode_uint32_t(enc, &val->memoryTypeBits);
-}
-
-static inline void
-vn_encode_VkMemoryResourcePropertiesMESA(struct vn_cs_encoder *enc, const VkMemoryResourcePropertiesMESA *val)
-{
- assert(val->sType == VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA);
- vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA });
- vn_encode_VkMemoryResourcePropertiesMESA_pnext(enc, val->pNext);
- vn_encode_VkMemoryResourcePropertiesMESA_self(enc, val);
-}
-
-static inline void *
-vn_decode_VkMemoryResourcePropertiesMESA_pnext_partial_temp(struct vn_cs_decoder *dec)
-{
- VkBaseOutStructure *pnext;
- VkStructureType stype;
-
- if (!vn_decode_simple_pointer(dec))
- return NULL;
-
- vn_decode_VkStructureType(dec, &stype);
- switch ((int32_t)stype) {
- case VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA:
- pnext = vn_cs_decoder_alloc_temp(dec, sizeof(VkMemoryResourceAllocationSizeProperties100000MESA));
- if (pnext) {
- pnext->sType = stype;
- pnext->pNext = vn_decode_VkMemoryResourcePropertiesMESA_pnext_partial_temp(dec);
- vn_decode_VkMemoryResourceAllocationSizeProperties100000MESA_self_partial_temp(dec, (VkMemoryResourceAllocationSizeProperties100000MESA *)pnext);
- }
- break;
- default:
- /* unexpected struct */
- pnext = NULL;
- vn_cs_decoder_set_fatal(dec);
- break;
- }
-
- return pnext;
-}
-
-static inline void
-vn_decode_VkMemoryResourcePropertiesMESA_self_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourcePropertiesMESA *val)
-{
- /* skip val->{sType,pNext} */
- /* skip val->memoryTypeBits */
-}
-
-static inline void
-vn_decode_VkMemoryResourcePropertiesMESA_partial_temp(struct vn_cs_decoder *dec, VkMemoryResourcePropertiesMESA *val)
-{
- VkStructureType stype;
- vn_decode_VkStructureType(dec, &stype);
- if (stype != VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA)
- vn_cs_decoder_set_fatal(dec);
-
- val->sType = stype;
- val->pNext = vn_decode_VkMemoryResourcePropertiesMESA_pnext_partial_temp(dec);
- vn_decode_VkMemoryResourcePropertiesMESA_self_partial_temp(dec, val);
-}
-
static inline void vn_decode_vkSetReplyCommandStreamMESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkSetReplyCommandStreamMESA *args)
{
if (vn_decode_simple_pointer(dec)) {
@@ -475,38 +324,6 @@ static inline void vn_encode_vkWriteRingExtraMESA_reply(struct vn_cs_encoder *en
/* skip args->value */
}
-static inline void vn_decode_vkGetMemoryResourcePropertiesMESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
-{
- vn_decode_VkDevice_lookup(dec, &args->device);
- vn_decode_uint32_t(dec, &args->resourceId);
- if (vn_decode_simple_pointer(dec)) {
- args->pMemoryResourceProperties = vn_cs_decoder_alloc_temp(dec, sizeof(*args->pMemoryResourceProperties));
- if (!args->pMemoryResourceProperties) return;
- vn_decode_VkMemoryResourcePropertiesMESA_partial_temp(dec, args->pMemoryResourceProperties);
- } else {
- args->pMemoryResourceProperties = NULL;
- vn_cs_decoder_set_fatal(dec);
- }
-}
-
-static inline void vn_replace_vkGetMemoryResourcePropertiesMESA_args_handle(struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
-{
- vn_replace_VkDevice_handle(&args->device);
- /* skip args->resourceId */
- /* skip args->pMemoryResourceProperties */
-}
-
-static inline void vn_encode_vkGetMemoryResourcePropertiesMESA_reply(struct vn_cs_encoder *enc, const struct vn_command_vkGetMemoryResourcePropertiesMESA *args)
-{
- vn_encode_VkCommandTypeEXT(enc, &(VkCommandTypeEXT){VK_COMMAND_TYPE_vkGetMemoryResourcePropertiesMESA_EXT});
-
- vn_encode_VkResult(enc, &args->ret);
- /* skip args->device */
- /* skip args->resourceId */
- if (vn_encode_simple_pointer(enc, args->pMemoryResourceProperties))
- vn_encode_VkMemoryResourcePropertiesMESA(enc, args->pMemoryResourceProperties);
-}
-
static inline void vn_decode_vkGetVenusExperimentalFeatureData100000MESA_args_temp(struct vn_cs_decoder *dec, struct vn_command_vkGetVenusExperimentalFeatureData100000MESA *args)
{
if (vn_decode_simple_pointer(dec)) {
@@ -694,35 +511,6 @@ static inline void vn_dispatch_vkWriteRingExtraMESA(struct vn_dispatch_context *
vn_cs_decoder_reset_temp_pool(ctx->decoder);
}
-static inline void vn_dispatch_vkGetMemoryResourcePropertiesMESA(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
-{
- struct vn_command_vkGetMemoryResourcePropertiesMESA args;
-
- if (!ctx->dispatch_vkGetMemoryResourcePropertiesMESA) {
- vn_cs_decoder_set_fatal(ctx->decoder);
- return;
- }
-
- vn_decode_vkGetMemoryResourcePropertiesMESA_args_temp(ctx->decoder, &args);
- if (!args.device) {
- vn_cs_decoder_set_fatal(ctx->decoder);
- return;
- }
-
- if (!vn_cs_decoder_get_fatal(ctx->decoder))
- ctx->dispatch_vkGetMemoryResourcePropertiesMESA(ctx, &args);
-
-#ifdef DEBUG
- if (!vn_cs_decoder_get_fatal(ctx->decoder) && vn_dispatch_should_log_result(args.ret))
- vn_dispatch_debug_log(ctx, "vkGetMemoryResourcePropertiesMESA returned %d", args.ret);
-#endif
-
- if (!vn_cs_decoder_get_fatal(ctx->decoder) && (flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT))
- vn_encode_vkGetMemoryResourcePropertiesMESA_reply(ctx->encoder, &args);
-
- vn_cs_decoder_reset_temp_pool(ctx->decoder);
-}
-
static inline void vn_dispatch_vkGetVenusExperimentalFeatureData100000MESA(struct vn_dispatch_context *ctx, VkCommandFlagsEXT flags)
{
struct vn_command_vkGetVenusExperimentalFeatureData100000MESA args;
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_types.h b/src/venus/venus-protocol/vn_protocol_renderer_types.h
index f370014a..0e6e18be 100644
--- a/src/venus/venus-protocol/vn_protocol_renderer_types.h
+++ b/src/venus/venus-protocol/vn_protocol_renderer_types.h
@@ -297,6 +297,20 @@ vn_decode_uint8_t_array(struct vn_cs_decoder *dec, uint8_t *val, uint32_t count)
vn_decode(dec, (size + 3) & ~3, val, size);
}
+/* uint16_t */
+
+static inline void
+vn_encode_uint16_t(struct vn_cs_encoder *enc, const uint16_t *val)
+{
+ vn_encode(enc, 4, val, sizeof(*val));
+}
+
+static inline void
+vn_decode_uint16_t(struct vn_cs_decoder *dec, uint16_t *val)
+{
+ vn_decode(dec, 4, val, sizeof(*val));
+}
+
/* typedef uint32_t VkSampleMask */
static inline void
@@ -375,6 +389,20 @@ vn_decode_VkFlags_array(struct vn_cs_decoder *dec, VkFlags *val, uint32_t count)
vn_decode_uint32_t_array(dec, val, count);
}
+/* typedef uint64_t VkFlags64 */
+
+static inline void
+vn_encode_VkFlags64(struct vn_cs_encoder *enc, const VkFlags64 *val)
+{
+ vn_encode_uint64_t(enc, val);
+}
+
+static inline void
+vn_decode_VkFlags64(struct vn_cs_decoder *dec, VkFlags64 *val)
+{
+ vn_decode_uint64_t(dec, val);
+}
+
/* typedef uint64_t VkDeviceSize */
static inline void
@@ -469,6 +497,20 @@ vn_decode_VkSamplerCreateFlagBits(struct vn_cs_decoder *dec, VkSamplerCreateFlag
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkPipelineLayoutCreateFlagBits */
+
+static inline void
+vn_encode_VkPipelineLayoutCreateFlagBits(struct vn_cs_encoder *enc, const VkPipelineLayoutCreateFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkPipelineLayoutCreateFlagBits(struct vn_cs_decoder *dec, VkPipelineLayoutCreateFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkPipelineCacheCreateFlagBits */
static inline void
@@ -483,6 +525,34 @@ vn_decode_VkPipelineCacheCreateFlagBits(struct vn_cs_decoder *dec, VkPipelineCac
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkPipelineDepthStencilStateCreateFlagBits */
+
+static inline void
+vn_encode_VkPipelineDepthStencilStateCreateFlagBits(struct vn_cs_encoder *enc, const VkPipelineDepthStencilStateCreateFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkPipelineDepthStencilStateCreateFlagBits(struct vn_cs_decoder *dec, VkPipelineDepthStencilStateCreateFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
+/* enum VkPipelineColorBlendStateCreateFlagBits */
+
+static inline void
+vn_encode_VkPipelineColorBlendStateCreateFlagBits(struct vn_cs_encoder *enc, const VkPipelineColorBlendStateCreateFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkPipelineColorBlendStateCreateFlagBits(struct vn_cs_decoder *dec, VkPipelineColorBlendStateCreateFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkPipelineShaderStageCreateFlagBits */
static inline void
@@ -511,6 +581,20 @@ vn_decode_VkDescriptorSetLayoutCreateFlagBits(struct vn_cs_decoder *dec, VkDescr
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkInstanceCreateFlagBits */
+
+static inline void
+vn_encode_VkInstanceCreateFlagBits(struct vn_cs_encoder *enc, const VkInstanceCreateFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkInstanceCreateFlagBits(struct vn_cs_decoder *dec, VkInstanceCreateFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkDeviceQueueCreateFlagBits */
static inline void
@@ -917,6 +1001,18 @@ vn_decode_VkSampleCountFlagBits(struct vn_cs_decoder *dec, VkSampleCountFlagBits
vn_decode_int32_t(dec, (int32_t *)val);
}
+static inline void
+vn_encode_VkSampleCountFlagBits_array(struct vn_cs_encoder *enc, const VkSampleCountFlagBits *val, uint32_t count)
+{
+ vn_encode_int32_t_array(enc, (const int32_t *)val, count);
+}
+
+static inline void
+vn_decode_VkSampleCountFlagBits_array(struct vn_cs_decoder *dec, VkSampleCountFlagBits *val, uint32_t count)
+{
+ vn_decode_int32_t_array(dec, (int32_t *)val, count);
+}
+
/* enum VkAttachmentDescriptionFlagBits */
static inline void
@@ -1001,6 +1097,20 @@ vn_decode_VkSubgroupFeatureFlagBits(struct vn_cs_decoder *dec, VkSubgroupFeature
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkPipelineCreationFeedbackFlagBits */
+
+static inline void
+vn_encode_VkPipelineCreationFeedbackFlagBits(struct vn_cs_encoder *enc, const VkPipelineCreationFeedbackFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkPipelineCreationFeedbackFlagBits(struct vn_cs_decoder *dec, VkPipelineCreationFeedbackFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkSemaphoreWaitFlagBits */
static inline void
@@ -1015,6 +1125,62 @@ vn_decode_VkSemaphoreWaitFlagBits(struct vn_cs_decoder *dec, VkSemaphoreWaitFlag
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* typedef VkFlags64 VkAccessFlagBits2 */
+
+static inline void
+vn_encode_VkAccessFlagBits2(struct vn_cs_encoder *enc, const VkAccessFlagBits2 *val)
+{
+ vn_encode_uint64_t(enc, (const uint64_t *)val);
+}
+
+static inline void
+vn_decode_VkAccessFlagBits2(struct vn_cs_decoder *dec, VkAccessFlagBits2 *val)
+{
+ vn_decode_uint64_t(dec, (uint64_t *)val);
+}
+
+/* typedef VkFlags64 VkPipelineStageFlagBits2 */
+
+static inline void
+vn_encode_VkPipelineStageFlagBits2(struct vn_cs_encoder *enc, const VkPipelineStageFlagBits2 *val)
+{
+ vn_encode_uint64_t(enc, (const uint64_t *)val);
+}
+
+static inline void
+vn_decode_VkPipelineStageFlagBits2(struct vn_cs_decoder *dec, VkPipelineStageFlagBits2 *val)
+{
+ vn_decode_uint64_t(dec, (uint64_t *)val);
+}
+
+/* typedef VkFlags64 VkFormatFeatureFlagBits2 */
+
+static inline void
+vn_encode_VkFormatFeatureFlagBits2(struct vn_cs_encoder *enc, const VkFormatFeatureFlagBits2 *val)
+{
+ vn_encode_uint64_t(enc, (const uint64_t *)val);
+}
+
+static inline void
+vn_decode_VkFormatFeatureFlagBits2(struct vn_cs_decoder *dec, VkFormatFeatureFlagBits2 *val)
+{
+ vn_decode_uint64_t(dec, (uint64_t *)val);
+}
+
+/* enum VkRenderingFlagBits */
+
+static inline void
+vn_encode_VkRenderingFlagBits(struct vn_cs_encoder *enc, const VkRenderingFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkRenderingFlagBits(struct vn_cs_decoder *dec, VkRenderingFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkPeerMemoryFeatureFlagBits */
static inline void
@@ -1169,6 +1335,20 @@ vn_decode_VkDescriptorBindingFlagBits(struct vn_cs_decoder *dec, VkDescriptorBin
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkConditionalRenderingFlagBitsEXT */
+
+static inline void
+vn_encode_VkConditionalRenderingFlagBitsEXT(struct vn_cs_encoder *enc, const VkConditionalRenderingFlagBitsEXT *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkConditionalRenderingFlagBitsEXT(struct vn_cs_decoder *dec, VkConditionalRenderingFlagBitsEXT *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkResolveModeFlagBits */
static inline void
@@ -1183,6 +1363,34 @@ vn_decode_VkResolveModeFlagBits(struct vn_cs_decoder *dec, VkResolveModeFlagBits
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkToolPurposeFlagBits */
+
+static inline void
+vn_encode_VkToolPurposeFlagBits(struct vn_cs_encoder *enc, const VkToolPurposeFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkToolPurposeFlagBits(struct vn_cs_decoder *dec, VkToolPurposeFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
+/* enum VkSubmitFlagBits */
+
+static inline void
+vn_encode_VkSubmitFlagBits(struct vn_cs_encoder *enc, const VkSubmitFlagBits *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkSubmitFlagBits(struct vn_cs_decoder *dec, VkSubmitFlagBits *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkAttachmentLoadOp */
static inline void
@@ -1761,6 +1969,46 @@ vn_decode_VkPointClippingBehavior(struct vn_cs_decoder *dec, VkPointClippingBeha
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkTimeDomainEXT */
+
+static inline void
+vn_encode_VkTimeDomainEXT(struct vn_cs_encoder *enc, const VkTimeDomainEXT *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkTimeDomainEXT(struct vn_cs_decoder *dec, VkTimeDomainEXT *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
+static inline void
+vn_encode_VkTimeDomainEXT_array(struct vn_cs_encoder *enc, const VkTimeDomainEXT *val, uint32_t count)
+{
+ vn_encode_int32_t_array(enc, (const int32_t *)val, count);
+}
+
+static inline void
+vn_decode_VkTimeDomainEXT_array(struct vn_cs_decoder *dec, VkTimeDomainEXT *val, uint32_t count)
+{
+ vn_decode_int32_t_array(dec, (int32_t *)val, count);
+}
+
+/* enum VkConservativeRasterizationModeEXT */
+
+static inline void
+vn_encode_VkConservativeRasterizationModeEXT(struct vn_cs_encoder *enc, const VkConservativeRasterizationModeEXT *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkConservativeRasterizationModeEXT(struct vn_cs_decoder *dec, VkConservativeRasterizationModeEXT *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkSemaphoreType */
static inline void
@@ -1775,6 +2023,34 @@ vn_decode_VkSemaphoreType(struct vn_cs_decoder *dec, VkSemaphoreType *val)
vn_decode_int32_t(dec, (int32_t *)val);
}
+/* enum VkLineRasterizationModeEXT */
+
+static inline void
+vn_encode_VkLineRasterizationModeEXT(struct vn_cs_encoder *enc, const VkLineRasterizationModeEXT *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkLineRasterizationModeEXT(struct vn_cs_decoder *dec, VkLineRasterizationModeEXT *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
+/* enum VkProvokingVertexModeEXT */
+
+static inline void
+vn_encode_VkProvokingVertexModeEXT(struct vn_cs_encoder *enc, const VkProvokingVertexModeEXT *val)
+{
+ vn_encode_int32_t(enc, (const int32_t *)val);
+}
+
+static inline void
+vn_decode_VkProvokingVertexModeEXT(struct vn_cs_decoder *dec, VkProvokingVertexModeEXT *val)
+{
+ vn_decode_int32_t(dec, (int32_t *)val);
+}
+
/* enum VkTessellationDomainOrigin */
static inline void
diff --git a/src/venus/venus-protocol/vn_protocol_renderer_util.h b/src/venus/venus-protocol/vn_protocol_renderer_util.h
new file mode 100644
index 00000000..0f243be3
--- /dev/null
+++ b/src/venus/venus-protocol/vn_protocol_renderer_util.h
@@ -0,0 +1,703 @@
+/* This file is generated by venus-protocol. See vn_protocol_renderer.h. */
+
+/*
+ * Copyright 2022 Google LLC
+ * Copyright 2022 Collabora Ltd.
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef VN_PROTOCOL_RENDERER_UTIL_H
+#define VN_PROTOCOL_RENDERER_UTIL_H
+
+#include "vn_protocol_renderer_info.h"
+
+struct vn_physical_device_proc_table {
+ PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT GetPhysicalDeviceCalibrateableTimeDomainsEXT;
+ PFN_vkGetPhysicalDeviceToolProperties GetPhysicalDeviceToolProperties;
+};
+
+struct vn_device_proc_table {
+ PFN_vkAllocateCommandBuffers AllocateCommandBuffers;
+ PFN_vkAllocateDescriptorSets AllocateDescriptorSets;
+ PFN_vkAllocateMemory AllocateMemory;
+ PFN_vkBeginCommandBuffer BeginCommandBuffer;
+ PFN_vkBindBufferMemory BindBufferMemory;
+ PFN_vkBindBufferMemory2 BindBufferMemory2;
+ PFN_vkBindImageMemory BindImageMemory;
+ PFN_vkBindImageMemory2 BindImageMemory2;
+ PFN_vkCmdBeginConditionalRenderingEXT CmdBeginConditionalRenderingEXT;
+ PFN_vkCmdBeginQuery CmdBeginQuery;
+ PFN_vkCmdBeginQueryIndexedEXT CmdBeginQueryIndexedEXT;
+ PFN_vkCmdBeginRenderPass CmdBeginRenderPass;
+ PFN_vkCmdBeginRenderPass2 CmdBeginRenderPass2;
+ PFN_vkCmdBeginRendering CmdBeginRendering;
+ PFN_vkCmdBeginTransformFeedbackEXT CmdBeginTransformFeedbackEXT;
+ PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets;
+ PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer;
+ PFN_vkCmdBindPipeline CmdBindPipeline;
+ PFN_vkCmdBindTransformFeedbackBuffersEXT CmdBindTransformFeedbackBuffersEXT;
+ PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers;
+ PFN_vkCmdBindVertexBuffers2 CmdBindVertexBuffers2;
+ PFN_vkCmdBlitImage CmdBlitImage;
+ PFN_vkCmdBlitImage2 CmdBlitImage2;
+ PFN_vkCmdClearAttachments CmdClearAttachments;
+ PFN_vkCmdClearColorImage CmdClearColorImage;
+ PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage;
+ PFN_vkCmdCopyBuffer CmdCopyBuffer;
+ PFN_vkCmdCopyBuffer2 CmdCopyBuffer2;
+ PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage;
+ PFN_vkCmdCopyBufferToImage2 CmdCopyBufferToImage2;
+ PFN_vkCmdCopyImage CmdCopyImage;
+ PFN_vkCmdCopyImage2 CmdCopyImage2;
+ PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer;
+ PFN_vkCmdCopyImageToBuffer2 CmdCopyImageToBuffer2;
+ PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults;
+ PFN_vkCmdDispatch CmdDispatch;
+ PFN_vkCmdDispatchBase CmdDispatchBase;
+ PFN_vkCmdDispatchIndirect CmdDispatchIndirect;
+ PFN_vkCmdDraw CmdDraw;
+ PFN_vkCmdDrawIndexed CmdDrawIndexed;
+ PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect;
+ PFN_vkCmdDrawIndexedIndirectCount CmdDrawIndexedIndirectCount;
+ PFN_vkCmdDrawIndirect CmdDrawIndirect;
+ PFN_vkCmdDrawIndirectByteCountEXT CmdDrawIndirectByteCountEXT;
+ PFN_vkCmdDrawIndirectCount CmdDrawIndirectCount;
+ PFN_vkCmdDrawMultiEXT CmdDrawMultiEXT;
+ PFN_vkCmdDrawMultiIndexedEXT CmdDrawMultiIndexedEXT;
+ PFN_vkCmdEndConditionalRenderingEXT CmdEndConditionalRenderingEXT;
+ PFN_vkCmdEndQuery CmdEndQuery;
+ PFN_vkCmdEndQueryIndexedEXT CmdEndQueryIndexedEXT;
+ PFN_vkCmdEndRenderPass CmdEndRenderPass;
+ PFN_vkCmdEndRenderPass2 CmdEndRenderPass2;
+ PFN_vkCmdEndRendering CmdEndRendering;
+ PFN_vkCmdEndTransformFeedbackEXT CmdEndTransformFeedbackEXT;
+ PFN_vkCmdExecuteCommands CmdExecuteCommands;
+ PFN_vkCmdFillBuffer CmdFillBuffer;
+ PFN_vkCmdNextSubpass CmdNextSubpass;
+ PFN_vkCmdNextSubpass2 CmdNextSubpass2;
+ PFN_vkCmdPipelineBarrier CmdPipelineBarrier;
+ PFN_vkCmdPipelineBarrier2 CmdPipelineBarrier2;
+ PFN_vkCmdPushConstants CmdPushConstants;
+ PFN_vkCmdPushDescriptorSetKHR CmdPushDescriptorSetKHR;
+ PFN_vkCmdPushDescriptorSetWithTemplateKHR CmdPushDescriptorSetWithTemplateKHR;
+ PFN_vkCmdResetEvent CmdResetEvent;
+ PFN_vkCmdResetEvent2 CmdResetEvent2;
+ PFN_vkCmdResetQueryPool CmdResetQueryPool;
+ PFN_vkCmdResolveImage CmdResolveImage;
+ PFN_vkCmdResolveImage2 CmdResolveImage2;
+ PFN_vkCmdSetBlendConstants CmdSetBlendConstants;
+ PFN_vkCmdSetCullMode CmdSetCullMode;
+ PFN_vkCmdSetDepthBias CmdSetDepthBias;
+ PFN_vkCmdSetDepthBiasEnable CmdSetDepthBiasEnable;
+ PFN_vkCmdSetDepthBounds CmdSetDepthBounds;
+ PFN_vkCmdSetDepthBoundsTestEnable CmdSetDepthBoundsTestEnable;
+ PFN_vkCmdSetDepthCompareOp CmdSetDepthCompareOp;
+ PFN_vkCmdSetDepthTestEnable CmdSetDepthTestEnable;
+ PFN_vkCmdSetDepthWriteEnable CmdSetDepthWriteEnable;
+ PFN_vkCmdSetDeviceMask CmdSetDeviceMask;
+ PFN_vkCmdSetEvent CmdSetEvent;
+ PFN_vkCmdSetEvent2 CmdSetEvent2;
+ PFN_vkCmdSetFrontFace CmdSetFrontFace;
+ PFN_vkCmdSetLineStippleEXT CmdSetLineStippleEXT;
+ PFN_vkCmdSetLineWidth CmdSetLineWidth;
+ PFN_vkCmdSetLogicOpEXT CmdSetLogicOpEXT;
+ PFN_vkCmdSetPatchControlPointsEXT CmdSetPatchControlPointsEXT;
+ PFN_vkCmdSetPrimitiveRestartEnable CmdSetPrimitiveRestartEnable;
+ PFN_vkCmdSetPrimitiveTopology CmdSetPrimitiveTopology;
+ PFN_vkCmdSetRasterizerDiscardEnable CmdSetRasterizerDiscardEnable;
+ PFN_vkCmdSetScissor CmdSetScissor;
+ PFN_vkCmdSetScissorWithCount CmdSetScissorWithCount;
+ PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask;
+ PFN_vkCmdSetStencilOp CmdSetStencilOp;
+ PFN_vkCmdSetStencilReference CmdSetStencilReference;
+ PFN_vkCmdSetStencilTestEnable CmdSetStencilTestEnable;
+ PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask;
+ PFN_vkCmdSetViewport CmdSetViewport;
+ PFN_vkCmdSetViewportWithCount CmdSetViewportWithCount;
+ PFN_vkCmdUpdateBuffer CmdUpdateBuffer;
+ PFN_vkCmdWaitEvents CmdWaitEvents;
+ PFN_vkCmdWaitEvents2 CmdWaitEvents2;
+ PFN_vkCmdWriteTimestamp CmdWriteTimestamp;
+ PFN_vkCmdWriteTimestamp2 CmdWriteTimestamp2;
+ PFN_vkCreateBuffer CreateBuffer;
+ PFN_vkCreateBufferView CreateBufferView;
+ PFN_vkCreateCommandPool CreateCommandPool;
+ PFN_vkCreateComputePipelines CreateComputePipelines;
+ PFN_vkCreateDescriptorPool CreateDescriptorPool;
+ PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout;
+ PFN_vkCreateDescriptorUpdateTemplate CreateDescriptorUpdateTemplate;
+ PFN_vkCreateEvent CreateEvent;
+ PFN_vkCreateFence CreateFence;
+ PFN_vkCreateFramebuffer CreateFramebuffer;
+ PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines;
+ PFN_vkCreateImage CreateImage;
+ PFN_vkCreateImageView CreateImageView;
+ PFN_vkCreatePipelineCache CreatePipelineCache;
+ PFN_vkCreatePipelineLayout CreatePipelineLayout;
+ PFN_vkCreatePrivateDataSlot CreatePrivateDataSlot;
+ PFN_vkCreateQueryPool CreateQueryPool;
+ PFN_vkCreateRenderPass CreateRenderPass;
+ PFN_vkCreateRenderPass2 CreateRenderPass2;
+ PFN_vkCreateSampler CreateSampler;
+ PFN_vkCreateSamplerYcbcrConversion CreateSamplerYcbcrConversion;
+ PFN_vkCreateSemaphore CreateSemaphore;
+ PFN_vkCreateShaderModule CreateShaderModule;
+ PFN_vkDestroyBuffer DestroyBuffer;
+ PFN_vkDestroyBufferView DestroyBufferView;
+ PFN_vkDestroyCommandPool DestroyCommandPool;
+ PFN_vkDestroyDescriptorPool DestroyDescriptorPool;
+ PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout;
+ PFN_vkDestroyDescriptorUpdateTemplate DestroyDescriptorUpdateTemplate;
+ PFN_vkDestroyDevice DestroyDevice;
+ PFN_vkDestroyEvent DestroyEvent;
+ PFN_vkDestroyFence DestroyFence;
+ PFN_vkDestroyFramebuffer DestroyFramebuffer;
+ PFN_vkDestroyImage DestroyImage;
+ PFN_vkDestroyImageView DestroyImageView;
+ PFN_vkDestroyPipeline DestroyPipeline;
+ PFN_vkDestroyPipelineCache DestroyPipelineCache;
+ PFN_vkDestroyPipelineLayout DestroyPipelineLayout;
+ PFN_vkDestroyPrivateDataSlot DestroyPrivateDataSlot;
+ PFN_vkDestroyQueryPool DestroyQueryPool;
+ PFN_vkDestroyRenderPass DestroyRenderPass;
+ PFN_vkDestroySampler DestroySampler;
+ PFN_vkDestroySamplerYcbcrConversion DestroySamplerYcbcrConversion;
+ PFN_vkDestroySemaphore DestroySemaphore;
+ PFN_vkDestroyShaderModule DestroyShaderModule;
+ PFN_vkDeviceWaitIdle DeviceWaitIdle;
+ PFN_vkEndCommandBuffer EndCommandBuffer;
+ PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges;
+ PFN_vkFreeCommandBuffers FreeCommandBuffers;
+ PFN_vkFreeDescriptorSets FreeDescriptorSets;
+ PFN_vkFreeMemory FreeMemory;
+ PFN_vkGetBufferDeviceAddress GetBufferDeviceAddress;
+ PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements;
+ PFN_vkGetBufferMemoryRequirements2 GetBufferMemoryRequirements2;
+ PFN_vkGetBufferOpaqueCaptureAddress GetBufferOpaqueCaptureAddress;
+ PFN_vkGetCalibratedTimestampsEXT GetCalibratedTimestampsEXT;
+ PFN_vkGetDescriptorSetLayoutSupport GetDescriptorSetLayoutSupport;
+ PFN_vkGetDeviceBufferMemoryRequirements GetDeviceBufferMemoryRequirements;
+ PFN_vkGetDeviceGroupPeerMemoryFeatures GetDeviceGroupPeerMemoryFeatures;
+ PFN_vkGetDeviceImageMemoryRequirements GetDeviceImageMemoryRequirements;
+ PFN_vkGetDeviceImageSparseMemoryRequirements GetDeviceImageSparseMemoryRequirements;
+ PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment;
+ PFN_vkGetDeviceMemoryOpaqueCaptureAddress GetDeviceMemoryOpaqueCaptureAddress;
+ PFN_vkGetDeviceProcAddr GetDeviceProcAddr;
+ PFN_vkGetDeviceQueue GetDeviceQueue;
+ PFN_vkGetDeviceQueue2 GetDeviceQueue2;
+ PFN_vkGetEventStatus GetEventStatus;
+ PFN_vkGetFenceFdKHR GetFenceFdKHR;
+ PFN_vkGetFenceStatus GetFenceStatus;
+ PFN_vkGetImageDrmFormatModifierPropertiesEXT GetImageDrmFormatModifierPropertiesEXT;
+ PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements;
+ PFN_vkGetImageMemoryRequirements2 GetImageMemoryRequirements2;
+ PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements;
+ PFN_vkGetImageSparseMemoryRequirements2 GetImageSparseMemoryRequirements2;
+ PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout;
+ PFN_vkGetMemoryFdKHR GetMemoryFdKHR;
+ PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR;
+ PFN_vkGetPipelineCacheData GetPipelineCacheData;
+ PFN_vkGetPrivateData GetPrivateData;
+ PFN_vkGetQueryPoolResults GetQueryPoolResults;
+ PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity;
+ PFN_vkGetSemaphoreCounterValue GetSemaphoreCounterValue;
+ PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR;
+ PFN_vkImportFenceFdKHR ImportFenceFdKHR;
+ PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR;
+ PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges;
+ PFN_vkMapMemory MapMemory;
+ PFN_vkMergePipelineCaches MergePipelineCaches;
+ PFN_vkQueueBindSparse QueueBindSparse;
+ PFN_vkQueueSubmit QueueSubmit;
+ PFN_vkQueueSubmit2 QueueSubmit2;
+ PFN_vkQueueWaitIdle QueueWaitIdle;
+ PFN_vkResetCommandBuffer ResetCommandBuffer;
+ PFN_vkResetCommandPool ResetCommandPool;
+ PFN_vkResetDescriptorPool ResetDescriptorPool;
+ PFN_vkResetEvent ResetEvent;
+ PFN_vkResetFences ResetFences;
+ PFN_vkResetQueryPool ResetQueryPool;
+ PFN_vkSetEvent SetEvent;
+ PFN_vkSetPrivateData SetPrivateData;
+ PFN_vkSignalSemaphore SignalSemaphore;
+ PFN_vkTrimCommandPool TrimCommandPool;
+ PFN_vkUnmapMemory UnmapMemory;
+ PFN_vkUpdateDescriptorSetWithTemplate UpdateDescriptorSetWithTemplate;
+ PFN_vkUpdateDescriptorSets UpdateDescriptorSets;
+ PFN_vkWaitForFences WaitForFences;
+ PFN_vkWaitSemaphores WaitSemaphores;
+};
+
+static inline void
+vn_util_init_physical_device_proc_table(VkInstance instance,
+ struct vn_physical_device_proc_table *proc_table)
+{
+#define VN_GIPA(instance, cmd) (PFN_ ## cmd)vkGetInstanceProcAddr(instance, #cmd)
+ proc_table->GetPhysicalDeviceCalibrateableTimeDomainsEXT = VN_GIPA(instance, vkGetPhysicalDeviceCalibrateableTimeDomainsEXT);
+ proc_table->GetPhysicalDeviceToolProperties = VN_GIPA(instance, vkGetPhysicalDeviceToolProperties);
+ if (!proc_table->GetPhysicalDeviceToolProperties)
+ proc_table->GetPhysicalDeviceToolProperties = VN_GIPA(instance, vkGetPhysicalDeviceToolPropertiesEXT);
+#undef VN_GIPA
+}
+
+
+static inline void
+vn_util_init_device_proc_table(VkDevice dev,
+ uint32_t api_version,
+ const struct vn_info_extension_table *ext_table,
+ struct vn_device_proc_table *proc_table)
+{
+#define VN_GDPA(dev, cmd) (PFN_ ## cmd)vkGetDeviceProcAddr(dev, #cmd)
+ proc_table->AllocateCommandBuffers = VN_GDPA(dev, vkAllocateCommandBuffers);
+ proc_table->AllocateDescriptorSets = VN_GDPA(dev, vkAllocateDescriptorSets);
+ proc_table->AllocateMemory = VN_GDPA(dev, vkAllocateMemory);
+ proc_table->BeginCommandBuffer = VN_GDPA(dev, vkBeginCommandBuffer);
+ proc_table->BindBufferMemory = VN_GDPA(dev, vkBindBufferMemory);
+ proc_table->BindBufferMemory2 =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkBindBufferMemory2) :
+ ext_table->KHR_bind_memory2 ? VN_GDPA(dev, vkBindBufferMemory2KHR) :
+ NULL;
+ proc_table->BindImageMemory = VN_GDPA(dev, vkBindImageMemory);
+ proc_table->BindImageMemory2 =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkBindImageMemory2) :
+ ext_table->KHR_bind_memory2 ? VN_GDPA(dev, vkBindImageMemory2KHR) :
+ NULL;
+ proc_table->CmdBeginConditionalRenderingEXT =
+ ext_table->EXT_conditional_rendering ? VN_GDPA(dev, vkCmdBeginConditionalRenderingEXT) :
+ NULL;
+ proc_table->CmdBeginQuery = VN_GDPA(dev, vkCmdBeginQuery);
+ proc_table->CmdBeginQueryIndexedEXT =
+ ext_table->EXT_transform_feedback ? VN_GDPA(dev, vkCmdBeginQueryIndexedEXT) :
+ NULL;
+ proc_table->CmdBeginRenderPass = VN_GDPA(dev, vkCmdBeginRenderPass);
+ proc_table->CmdBeginRenderPass2 =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkCmdBeginRenderPass2) :
+ ext_table->KHR_create_renderpass2 ? VN_GDPA(dev, vkCmdBeginRenderPass2KHR) :
+ NULL;
+ proc_table->CmdBeginRendering =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdBeginRendering) :
+ ext_table->KHR_dynamic_rendering ? VN_GDPA(dev, vkCmdBeginRenderingKHR) :
+ NULL;
+ proc_table->CmdBeginTransformFeedbackEXT =
+ ext_table->EXT_transform_feedback ? VN_GDPA(dev, vkCmdBeginTransformFeedbackEXT) :
+ NULL;
+ proc_table->CmdBindDescriptorSets = VN_GDPA(dev, vkCmdBindDescriptorSets);
+ proc_table->CmdBindIndexBuffer = VN_GDPA(dev, vkCmdBindIndexBuffer);
+ proc_table->CmdBindPipeline = VN_GDPA(dev, vkCmdBindPipeline);
+ proc_table->CmdBindTransformFeedbackBuffersEXT =
+ ext_table->EXT_transform_feedback ? VN_GDPA(dev, vkCmdBindTransformFeedbackBuffersEXT) :
+ NULL;
+ proc_table->CmdBindVertexBuffers = VN_GDPA(dev, vkCmdBindVertexBuffers);
+ proc_table->CmdBindVertexBuffers2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdBindVertexBuffers2) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdBindVertexBuffers2EXT) :
+ NULL;
+ proc_table->CmdBlitImage = VN_GDPA(dev, vkCmdBlitImage);
+ proc_table->CmdBlitImage2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdBlitImage2) :
+ ext_table->KHR_copy_commands2 ? VN_GDPA(dev, vkCmdBlitImage2KHR) :
+ NULL;
+ proc_table->CmdClearAttachments = VN_GDPA(dev, vkCmdClearAttachments);
+ proc_table->CmdClearColorImage = VN_GDPA(dev, vkCmdClearColorImage);
+ proc_table->CmdClearDepthStencilImage = VN_GDPA(dev, vkCmdClearDepthStencilImage);
+ proc_table->CmdCopyBuffer = VN_GDPA(dev, vkCmdCopyBuffer);
+ proc_table->CmdCopyBuffer2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdCopyBuffer2) :
+ ext_table->KHR_copy_commands2 ? VN_GDPA(dev, vkCmdCopyBuffer2KHR) :
+ NULL;
+ proc_table->CmdCopyBufferToImage = VN_GDPA(dev, vkCmdCopyBufferToImage);
+ proc_table->CmdCopyBufferToImage2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdCopyBufferToImage2) :
+ ext_table->KHR_copy_commands2 ? VN_GDPA(dev, vkCmdCopyBufferToImage2KHR) :
+ NULL;
+ proc_table->CmdCopyImage = VN_GDPA(dev, vkCmdCopyImage);
+ proc_table->CmdCopyImage2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdCopyImage2) :
+ ext_table->KHR_copy_commands2 ? VN_GDPA(dev, vkCmdCopyImage2KHR) :
+ NULL;
+ proc_table->CmdCopyImageToBuffer = VN_GDPA(dev, vkCmdCopyImageToBuffer);
+ proc_table->CmdCopyImageToBuffer2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdCopyImageToBuffer2) :
+ ext_table->KHR_copy_commands2 ? VN_GDPA(dev, vkCmdCopyImageToBuffer2KHR) :
+ NULL;
+ proc_table->CmdCopyQueryPoolResults = VN_GDPA(dev, vkCmdCopyQueryPoolResults);
+ proc_table->CmdDispatch = VN_GDPA(dev, vkCmdDispatch);
+ proc_table->CmdDispatchBase =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkCmdDispatchBase) :
+ ext_table->KHR_device_group ? VN_GDPA(dev, vkCmdDispatchBaseKHR) :
+ NULL;
+ proc_table->CmdDispatchIndirect = VN_GDPA(dev, vkCmdDispatchIndirect);
+ proc_table->CmdDraw = VN_GDPA(dev, vkCmdDraw);
+ proc_table->CmdDrawIndexed = VN_GDPA(dev, vkCmdDrawIndexed);
+ proc_table->CmdDrawIndexedIndirect = VN_GDPA(dev, vkCmdDrawIndexedIndirect);
+ proc_table->CmdDrawIndexedIndirectCount =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkCmdDrawIndexedIndirectCount) :
+ ext_table->KHR_draw_indirect_count ? VN_GDPA(dev, vkCmdDrawIndexedIndirectCountKHR) :
+ NULL;
+ proc_table->CmdDrawIndirect = VN_GDPA(dev, vkCmdDrawIndirect);
+ proc_table->CmdDrawIndirectByteCountEXT =
+ ext_table->EXT_transform_feedback ? VN_GDPA(dev, vkCmdDrawIndirectByteCountEXT) :
+ NULL;
+ proc_table->CmdDrawIndirectCount =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkCmdDrawIndirectCount) :
+ ext_table->KHR_draw_indirect_count ? VN_GDPA(dev, vkCmdDrawIndirectCountKHR) :
+ NULL;
+ proc_table->CmdDrawMultiEXT =
+ ext_table->EXT_multi_draw ? VN_GDPA(dev, vkCmdDrawMultiEXT) :
+ NULL;
+ proc_table->CmdDrawMultiIndexedEXT =
+ ext_table->EXT_multi_draw ? VN_GDPA(dev, vkCmdDrawMultiIndexedEXT) :
+ NULL;
+ proc_table->CmdEndConditionalRenderingEXT =
+ ext_table->EXT_conditional_rendering ? VN_GDPA(dev, vkCmdEndConditionalRenderingEXT) :
+ NULL;
+ proc_table->CmdEndQuery = VN_GDPA(dev, vkCmdEndQuery);
+ proc_table->CmdEndQueryIndexedEXT =
+ ext_table->EXT_transform_feedback ? VN_GDPA(dev, vkCmdEndQueryIndexedEXT) :
+ NULL;
+ proc_table->CmdEndRenderPass = VN_GDPA(dev, vkCmdEndRenderPass);
+ proc_table->CmdEndRenderPass2 =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkCmdEndRenderPass2) :
+ ext_table->KHR_create_renderpass2 ? VN_GDPA(dev, vkCmdEndRenderPass2KHR) :
+ NULL;
+ proc_table->CmdEndRendering =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdEndRendering) :
+ ext_table->KHR_dynamic_rendering ? VN_GDPA(dev, vkCmdEndRenderingKHR) :
+ NULL;
+ proc_table->CmdEndTransformFeedbackEXT =
+ ext_table->EXT_transform_feedback ? VN_GDPA(dev, vkCmdEndTransformFeedbackEXT) :
+ NULL;
+ proc_table->CmdExecuteCommands = VN_GDPA(dev, vkCmdExecuteCommands);
+ proc_table->CmdFillBuffer = VN_GDPA(dev, vkCmdFillBuffer);
+ proc_table->CmdNextSubpass = VN_GDPA(dev, vkCmdNextSubpass);
+ proc_table->CmdNextSubpass2 =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkCmdNextSubpass2) :
+ ext_table->KHR_create_renderpass2 ? VN_GDPA(dev, vkCmdNextSubpass2KHR) :
+ NULL;
+ proc_table->CmdPipelineBarrier = VN_GDPA(dev, vkCmdPipelineBarrier);
+ proc_table->CmdPipelineBarrier2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdPipelineBarrier2) :
+ ext_table->KHR_synchronization2 ? VN_GDPA(dev, vkCmdPipelineBarrier2KHR) :
+ NULL;
+ proc_table->CmdPushConstants = VN_GDPA(dev, vkCmdPushConstants);
+ proc_table->CmdPushDescriptorSetKHR =
+ ext_table->KHR_push_descriptor ? VN_GDPA(dev, vkCmdPushDescriptorSetKHR) :
+ NULL;
+ proc_table->CmdPushDescriptorSetWithTemplateKHR =
+ ext_table->KHR_push_descriptor ? VN_GDPA(dev, vkCmdPushDescriptorSetWithTemplateKHR) :
+ NULL;
+ proc_table->CmdResetEvent = VN_GDPA(dev, vkCmdResetEvent);
+ proc_table->CmdResetEvent2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdResetEvent2) :
+ ext_table->KHR_synchronization2 ? VN_GDPA(dev, vkCmdResetEvent2KHR) :
+ NULL;
+ proc_table->CmdResetQueryPool = VN_GDPA(dev, vkCmdResetQueryPool);
+ proc_table->CmdResolveImage = VN_GDPA(dev, vkCmdResolveImage);
+ proc_table->CmdResolveImage2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdResolveImage2) :
+ ext_table->KHR_copy_commands2 ? VN_GDPA(dev, vkCmdResolveImage2KHR) :
+ NULL;
+ proc_table->CmdSetBlendConstants = VN_GDPA(dev, vkCmdSetBlendConstants);
+ proc_table->CmdSetCullMode =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetCullMode) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetCullModeEXT) :
+ NULL;
+ proc_table->CmdSetDepthBias = VN_GDPA(dev, vkCmdSetDepthBias);
+ proc_table->CmdSetDepthBiasEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetDepthBiasEnable) :
+ ext_table->EXT_extended_dynamic_state2 ? VN_GDPA(dev, vkCmdSetDepthBiasEnableEXT) :
+ NULL;
+ proc_table->CmdSetDepthBounds = VN_GDPA(dev, vkCmdSetDepthBounds);
+ proc_table->CmdSetDepthBoundsTestEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetDepthBoundsTestEnable) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetDepthBoundsTestEnableEXT) :
+ NULL;
+ proc_table->CmdSetDepthCompareOp =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetDepthCompareOp) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetDepthCompareOpEXT) :
+ NULL;
+ proc_table->CmdSetDepthTestEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetDepthTestEnable) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetDepthTestEnableEXT) :
+ NULL;
+ proc_table->CmdSetDepthWriteEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetDepthWriteEnable) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetDepthWriteEnableEXT) :
+ NULL;
+ proc_table->CmdSetDeviceMask =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkCmdSetDeviceMask) :
+ ext_table->KHR_device_group ? VN_GDPA(dev, vkCmdSetDeviceMaskKHR) :
+ NULL;
+ proc_table->CmdSetEvent = VN_GDPA(dev, vkCmdSetEvent);
+ proc_table->CmdSetEvent2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetEvent2) :
+ ext_table->KHR_synchronization2 ? VN_GDPA(dev, vkCmdSetEvent2KHR) :
+ NULL;
+ proc_table->CmdSetFrontFace =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetFrontFace) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetFrontFaceEXT) :
+ NULL;
+ proc_table->CmdSetLineStippleEXT =
+ ext_table->EXT_line_rasterization ? VN_GDPA(dev, vkCmdSetLineStippleEXT) :
+ NULL;
+ proc_table->CmdSetLineWidth = VN_GDPA(dev, vkCmdSetLineWidth);
+ proc_table->CmdSetLogicOpEXT =
+ ext_table->EXT_extended_dynamic_state2 ? VN_GDPA(dev, vkCmdSetLogicOpEXT) :
+ NULL;
+ proc_table->CmdSetPatchControlPointsEXT =
+ ext_table->EXT_extended_dynamic_state2 ? VN_GDPA(dev, vkCmdSetPatchControlPointsEXT) :
+ NULL;
+ proc_table->CmdSetPrimitiveRestartEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetPrimitiveRestartEnable) :
+ ext_table->EXT_extended_dynamic_state2 ? VN_GDPA(dev, vkCmdSetPrimitiveRestartEnableEXT) :
+ NULL;
+ proc_table->CmdSetPrimitiveTopology =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetPrimitiveTopology) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetPrimitiveTopologyEXT) :
+ NULL;
+ proc_table->CmdSetRasterizerDiscardEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetRasterizerDiscardEnable) :
+ ext_table->EXT_extended_dynamic_state2 ? VN_GDPA(dev, vkCmdSetRasterizerDiscardEnableEXT) :
+ NULL;
+ proc_table->CmdSetScissor = VN_GDPA(dev, vkCmdSetScissor);
+ proc_table->CmdSetScissorWithCount =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetScissorWithCount) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetScissorWithCountEXT) :
+ NULL;
+ proc_table->CmdSetStencilCompareMask = VN_GDPA(dev, vkCmdSetStencilCompareMask);
+ proc_table->CmdSetStencilOp =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetStencilOp) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetStencilOpEXT) :
+ NULL;
+ proc_table->CmdSetStencilReference = VN_GDPA(dev, vkCmdSetStencilReference);
+ proc_table->CmdSetStencilTestEnable =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetStencilTestEnable) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetStencilTestEnableEXT) :
+ NULL;
+ proc_table->CmdSetStencilWriteMask = VN_GDPA(dev, vkCmdSetStencilWriteMask);
+ proc_table->CmdSetViewport = VN_GDPA(dev, vkCmdSetViewport);
+ proc_table->CmdSetViewportWithCount =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdSetViewportWithCount) :
+ ext_table->EXT_extended_dynamic_state ? VN_GDPA(dev, vkCmdSetViewportWithCountEXT) :
+ NULL;
+ proc_table->CmdUpdateBuffer = VN_GDPA(dev, vkCmdUpdateBuffer);
+ proc_table->CmdWaitEvents = VN_GDPA(dev, vkCmdWaitEvents);
+ proc_table->CmdWaitEvents2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdWaitEvents2) :
+ ext_table->KHR_synchronization2 ? VN_GDPA(dev, vkCmdWaitEvents2KHR) :
+ NULL;
+ proc_table->CmdWriteTimestamp = VN_GDPA(dev, vkCmdWriteTimestamp);
+ proc_table->CmdWriteTimestamp2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCmdWriteTimestamp2) :
+ ext_table->KHR_synchronization2 ? VN_GDPA(dev, vkCmdWriteTimestamp2KHR) :
+ NULL;
+ proc_table->CreateBuffer = VN_GDPA(dev, vkCreateBuffer);
+ proc_table->CreateBufferView = VN_GDPA(dev, vkCreateBufferView);
+ proc_table->CreateCommandPool = VN_GDPA(dev, vkCreateCommandPool);
+ proc_table->CreateComputePipelines = VN_GDPA(dev, vkCreateComputePipelines);
+ proc_table->CreateDescriptorPool = VN_GDPA(dev, vkCreateDescriptorPool);
+ proc_table->CreateDescriptorSetLayout = VN_GDPA(dev, vkCreateDescriptorSetLayout);
+ proc_table->CreateDescriptorUpdateTemplate =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkCreateDescriptorUpdateTemplate) :
+ ext_table->KHR_descriptor_update_template ? VN_GDPA(dev, vkCreateDescriptorUpdateTemplateKHR) :
+ NULL;
+ proc_table->CreateEvent = VN_GDPA(dev, vkCreateEvent);
+ proc_table->CreateFence = VN_GDPA(dev, vkCreateFence);
+ proc_table->CreateFramebuffer = VN_GDPA(dev, vkCreateFramebuffer);
+ proc_table->CreateGraphicsPipelines = VN_GDPA(dev, vkCreateGraphicsPipelines);
+ proc_table->CreateImage = VN_GDPA(dev, vkCreateImage);
+ proc_table->CreateImageView = VN_GDPA(dev, vkCreateImageView);
+ proc_table->CreatePipelineCache = VN_GDPA(dev, vkCreatePipelineCache);
+ proc_table->CreatePipelineLayout = VN_GDPA(dev, vkCreatePipelineLayout);
+ proc_table->CreatePrivateDataSlot =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkCreatePrivateDataSlot) :
+ ext_table->EXT_private_data ? VN_GDPA(dev, vkCreatePrivateDataSlotEXT) :
+ NULL;
+ proc_table->CreateQueryPool = VN_GDPA(dev, vkCreateQueryPool);
+ proc_table->CreateRenderPass = VN_GDPA(dev, vkCreateRenderPass);
+ proc_table->CreateRenderPass2 =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkCreateRenderPass2) :
+ ext_table->KHR_create_renderpass2 ? VN_GDPA(dev, vkCreateRenderPass2KHR) :
+ NULL;
+ proc_table->CreateSampler = VN_GDPA(dev, vkCreateSampler);
+ proc_table->CreateSamplerYcbcrConversion =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkCreateSamplerYcbcrConversion) :
+ ext_table->KHR_sampler_ycbcr_conversion ? VN_GDPA(dev, vkCreateSamplerYcbcrConversionKHR) :
+ NULL;
+ proc_table->CreateSemaphore = VN_GDPA(dev, vkCreateSemaphore);
+ proc_table->CreateShaderModule = VN_GDPA(dev, vkCreateShaderModule);
+ proc_table->DestroyBuffer = VN_GDPA(dev, vkDestroyBuffer);
+ proc_table->DestroyBufferView = VN_GDPA(dev, vkDestroyBufferView);
+ proc_table->DestroyCommandPool = VN_GDPA(dev, vkDestroyCommandPool);
+ proc_table->DestroyDescriptorPool = VN_GDPA(dev, vkDestroyDescriptorPool);
+ proc_table->DestroyDescriptorSetLayout = VN_GDPA(dev, vkDestroyDescriptorSetLayout);
+ proc_table->DestroyDescriptorUpdateTemplate =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkDestroyDescriptorUpdateTemplate) :
+ ext_table->KHR_descriptor_update_template ? VN_GDPA(dev, vkDestroyDescriptorUpdateTemplateKHR) :
+ NULL;
+ proc_table->DestroyDevice = VN_GDPA(dev, vkDestroyDevice);
+ proc_table->DestroyEvent = VN_GDPA(dev, vkDestroyEvent);
+ proc_table->DestroyFence = VN_GDPA(dev, vkDestroyFence);
+ proc_table->DestroyFramebuffer = VN_GDPA(dev, vkDestroyFramebuffer);
+ proc_table->DestroyImage = VN_GDPA(dev, vkDestroyImage);
+ proc_table->DestroyImageView = VN_GDPA(dev, vkDestroyImageView);
+ proc_table->DestroyPipeline = VN_GDPA(dev, vkDestroyPipeline);
+ proc_table->DestroyPipelineCache = VN_GDPA(dev, vkDestroyPipelineCache);
+ proc_table->DestroyPipelineLayout = VN_GDPA(dev, vkDestroyPipelineLayout);
+ proc_table->DestroyPrivateDataSlot =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkDestroyPrivateDataSlot) :
+ ext_table->EXT_private_data ? VN_GDPA(dev, vkDestroyPrivateDataSlotEXT) :
+ NULL;
+ proc_table->DestroyQueryPool = VN_GDPA(dev, vkDestroyQueryPool);
+ proc_table->DestroyRenderPass = VN_GDPA(dev, vkDestroyRenderPass);
+ proc_table->DestroySampler = VN_GDPA(dev, vkDestroySampler);
+ proc_table->DestroySamplerYcbcrConversion =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkDestroySamplerYcbcrConversion) :
+ ext_table->KHR_sampler_ycbcr_conversion ? VN_GDPA(dev, vkDestroySamplerYcbcrConversionKHR) :
+ NULL;
+ proc_table->DestroySemaphore = VN_GDPA(dev, vkDestroySemaphore);
+ proc_table->DestroyShaderModule = VN_GDPA(dev, vkDestroyShaderModule);
+ proc_table->DeviceWaitIdle = VN_GDPA(dev, vkDeviceWaitIdle);
+ proc_table->EndCommandBuffer = VN_GDPA(dev, vkEndCommandBuffer);
+ proc_table->FlushMappedMemoryRanges = VN_GDPA(dev, vkFlushMappedMemoryRanges);
+ proc_table->FreeCommandBuffers = VN_GDPA(dev, vkFreeCommandBuffers);
+ proc_table->FreeDescriptorSets = VN_GDPA(dev, vkFreeDescriptorSets);
+ proc_table->FreeMemory = VN_GDPA(dev, vkFreeMemory);
+ proc_table->GetBufferDeviceAddress =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkGetBufferDeviceAddress) :
+ ext_table->KHR_buffer_device_address ? VN_GDPA(dev, vkGetBufferDeviceAddressKHR) :
+ NULL;
+ proc_table->GetBufferMemoryRequirements = VN_GDPA(dev, vkGetBufferMemoryRequirements);
+ proc_table->GetBufferMemoryRequirements2 =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkGetBufferMemoryRequirements2) :
+ ext_table->KHR_get_memory_requirements2 ? VN_GDPA(dev, vkGetBufferMemoryRequirements2KHR) :
+ NULL;
+ proc_table->GetBufferOpaqueCaptureAddress =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkGetBufferOpaqueCaptureAddress) :
+ ext_table->KHR_buffer_device_address ? VN_GDPA(dev, vkGetBufferOpaqueCaptureAddressKHR) :
+ NULL;
+ proc_table->GetCalibratedTimestampsEXT =
+ ext_table->EXT_calibrated_timestamps ? VN_GDPA(dev, vkGetCalibratedTimestampsEXT) :
+ NULL;
+ proc_table->GetDescriptorSetLayoutSupport =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkGetDescriptorSetLayoutSupport) :
+ ext_table->KHR_maintenance3 ? VN_GDPA(dev, vkGetDescriptorSetLayoutSupportKHR) :
+ NULL;
+ proc_table->GetDeviceBufferMemoryRequirements =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkGetDeviceBufferMemoryRequirements) :
+ ext_table->KHR_maintenance4 ? VN_GDPA(dev, vkGetDeviceBufferMemoryRequirementsKHR) :
+ NULL;
+ proc_table->GetDeviceGroupPeerMemoryFeatures =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkGetDeviceGroupPeerMemoryFeatures) :
+ ext_table->KHR_device_group ? VN_GDPA(dev, vkGetDeviceGroupPeerMemoryFeaturesKHR) :
+ NULL;
+ proc_table->GetDeviceImageMemoryRequirements =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkGetDeviceImageMemoryRequirements) :
+ ext_table->KHR_maintenance4 ? VN_GDPA(dev, vkGetDeviceImageMemoryRequirementsKHR) :
+ NULL;
+ proc_table->GetDeviceImageSparseMemoryRequirements =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkGetDeviceImageSparseMemoryRequirements) :
+ ext_table->KHR_maintenance4 ? VN_GDPA(dev, vkGetDeviceImageSparseMemoryRequirementsKHR) :
+ NULL;
+ proc_table->GetDeviceMemoryCommitment = VN_GDPA(dev, vkGetDeviceMemoryCommitment);
+ proc_table->GetDeviceMemoryOpaqueCaptureAddress =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkGetDeviceMemoryOpaqueCaptureAddress) :
+ ext_table->KHR_buffer_device_address ? VN_GDPA(dev, vkGetDeviceMemoryOpaqueCaptureAddressKHR) :
+ NULL;
+ proc_table->GetDeviceProcAddr = VN_GDPA(dev, vkGetDeviceProcAddr);
+ proc_table->GetDeviceQueue = VN_GDPA(dev, vkGetDeviceQueue);
+ proc_table->GetDeviceQueue2 =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkGetDeviceQueue2) :
+ NULL;
+ proc_table->GetEventStatus = VN_GDPA(dev, vkGetEventStatus);
+ proc_table->GetFenceFdKHR =
+ ext_table->KHR_external_fence_fd ? VN_GDPA(dev, vkGetFenceFdKHR) :
+ NULL;
+ proc_table->GetFenceStatus = VN_GDPA(dev, vkGetFenceStatus);
+ proc_table->GetImageDrmFormatModifierPropertiesEXT =
+ ext_table->EXT_image_drm_format_modifier ? VN_GDPA(dev, vkGetImageDrmFormatModifierPropertiesEXT) :
+ NULL;
+ proc_table->GetImageMemoryRequirements = VN_GDPA(dev, vkGetImageMemoryRequirements);
+ proc_table->GetImageMemoryRequirements2 =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkGetImageMemoryRequirements2) :
+ ext_table->KHR_get_memory_requirements2 ? VN_GDPA(dev, vkGetImageMemoryRequirements2KHR) :
+ NULL;
+ proc_table->GetImageSparseMemoryRequirements = VN_GDPA(dev, vkGetImageSparseMemoryRequirements);
+ proc_table->GetImageSparseMemoryRequirements2 =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkGetImageSparseMemoryRequirements2) :
+ ext_table->KHR_get_memory_requirements2 ? VN_GDPA(dev, vkGetImageSparseMemoryRequirements2KHR) :
+ NULL;
+ proc_table->GetImageSubresourceLayout = VN_GDPA(dev, vkGetImageSubresourceLayout);
+ proc_table->GetMemoryFdKHR =
+ ext_table->KHR_external_memory_fd ? VN_GDPA(dev, vkGetMemoryFdKHR) :
+ NULL;
+ proc_table->GetMemoryFdPropertiesKHR =
+ ext_table->KHR_external_memory_fd ? VN_GDPA(dev, vkGetMemoryFdPropertiesKHR) :
+ NULL;
+ proc_table->GetPipelineCacheData = VN_GDPA(dev, vkGetPipelineCacheData);
+ proc_table->GetPrivateData =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkGetPrivateData) :
+ ext_table->EXT_private_data ? VN_GDPA(dev, vkGetPrivateDataEXT) :
+ NULL;
+ proc_table->GetQueryPoolResults = VN_GDPA(dev, vkGetQueryPoolResults);
+ proc_table->GetRenderAreaGranularity = VN_GDPA(dev, vkGetRenderAreaGranularity);
+ proc_table->GetSemaphoreCounterValue =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkGetSemaphoreCounterValue) :
+ ext_table->KHR_timeline_semaphore ? VN_GDPA(dev, vkGetSemaphoreCounterValueKHR) :
+ NULL;
+ proc_table->GetSemaphoreFdKHR =
+ ext_table->KHR_external_semaphore_fd ? VN_GDPA(dev, vkGetSemaphoreFdKHR) :
+ NULL;
+ proc_table->ImportFenceFdKHR =
+ ext_table->KHR_external_fence_fd ? VN_GDPA(dev, vkImportFenceFdKHR) :
+ NULL;
+ proc_table->ImportSemaphoreFdKHR =
+ ext_table->KHR_external_semaphore_fd ? VN_GDPA(dev, vkImportSemaphoreFdKHR) :
+ NULL;
+ proc_table->InvalidateMappedMemoryRanges = VN_GDPA(dev, vkInvalidateMappedMemoryRanges);
+ proc_table->MapMemory = VN_GDPA(dev, vkMapMemory);
+ proc_table->MergePipelineCaches = VN_GDPA(dev, vkMergePipelineCaches);
+ proc_table->QueueBindSparse = VN_GDPA(dev, vkQueueBindSparse);
+ proc_table->QueueSubmit = VN_GDPA(dev, vkQueueSubmit);
+ proc_table->QueueSubmit2 =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkQueueSubmit2) :
+ ext_table->KHR_synchronization2 ? VN_GDPA(dev, vkQueueSubmit2KHR) :
+ NULL;
+ proc_table->QueueWaitIdle = VN_GDPA(dev, vkQueueWaitIdle);
+ proc_table->ResetCommandBuffer = VN_GDPA(dev, vkResetCommandBuffer);
+ proc_table->ResetCommandPool = VN_GDPA(dev, vkResetCommandPool);
+ proc_table->ResetDescriptorPool = VN_GDPA(dev, vkResetDescriptorPool);
+ proc_table->ResetEvent = VN_GDPA(dev, vkResetEvent);
+ proc_table->ResetFences = VN_GDPA(dev, vkResetFences);
+ proc_table->ResetQueryPool =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkResetQueryPool) :
+ ext_table->EXT_host_query_reset ? VN_GDPA(dev, vkResetQueryPoolEXT) :
+ NULL;
+ proc_table->SetEvent = VN_GDPA(dev, vkSetEvent);
+ proc_table->SetPrivateData =
+ api_version >= VK_API_VERSION_1_3 ? VN_GDPA(dev, vkSetPrivateData) :
+ ext_table->EXT_private_data ? VN_GDPA(dev, vkSetPrivateDataEXT) :
+ NULL;
+ proc_table->SignalSemaphore =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkSignalSemaphore) :
+ ext_table->KHR_timeline_semaphore ? VN_GDPA(dev, vkSignalSemaphoreKHR) :
+ NULL;
+ proc_table->TrimCommandPool =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkTrimCommandPool) :
+ ext_table->KHR_maintenance1 ? VN_GDPA(dev, vkTrimCommandPoolKHR) :
+ NULL;
+ proc_table->UnmapMemory = VN_GDPA(dev, vkUnmapMemory);
+ proc_table->UpdateDescriptorSetWithTemplate =
+ api_version >= VK_API_VERSION_1_1 ? VN_GDPA(dev, vkUpdateDescriptorSetWithTemplate) :
+ ext_table->KHR_descriptor_update_template ? VN_GDPA(dev, vkUpdateDescriptorSetWithTemplateKHR) :
+ NULL;
+ proc_table->UpdateDescriptorSets = VN_GDPA(dev, vkUpdateDescriptorSets);
+ proc_table->WaitForFences = VN_GDPA(dev, vkWaitForFences);
+ proc_table->WaitSemaphores =
+ api_version >= VK_API_VERSION_1_2 ? VN_GDPA(dev, vkWaitSemaphores) :
+ ext_table->KHR_timeline_semaphore ? VN_GDPA(dev, vkWaitSemaphoresKHR) :
+ NULL;
+#undef VN_GDPA
+}
+
+#endif /* VN_PROTOCOL_RENDERER_UTIL_H */
diff --git a/src/venus/venus-protocol/vulkan.h b/src/venus/venus-protocol/vulkan.h
index 3f7cdba5..3510ac91 100644
--- a/src/venus/venus-protocol/vulkan.h
+++ b/src/venus/venus-protocol/vulkan.h
@@ -2,7 +2,7 @@
#define VULKAN_H_ 1
/*
-** Copyright 2015-2021 The Khronos Group Inc.
+** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
@@ -38,7 +38,6 @@
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
-#include <wayland-client.h>
#include "vulkan_wayland.h"
#endif
diff --git a/src/venus/venus-protocol/vulkan_core.h b/src/venus/venus-protocol/vulkan_core.h
index b2448fb2..6cc788e7 100644
--- a/src/venus/venus-protocol/vulkan_core.h
+++ b/src/venus/venus-protocol/vulkan_core.h
@@ -2,7 +2,7 @@
#define VULKAN_CORE_H_ 1
/*
-** Copyright 2015-2021 The Khronos Group Inc.
+** Copyright 2015-2022 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
@@ -72,10 +72,10 @@ extern "C" {
#define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0
// Version of this file
-#define VK_HEADER_VERSION 182
+#define VK_HEADER_VERSION 228
// Complete version of this file
-#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 2, VK_HEADER_VERSION)
+#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 3, VK_HEADER_VERSION)
// DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead.
#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22)
@@ -130,11 +130,11 @@ VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool)
#define VK_TRUE 1U
#define VK_WHOLE_SIZE (~0ULL)
#define VK_MAX_MEMORY_TYPES 32U
-#define VK_MAX_MEMORY_HEAPS 16U
#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256U
#define VK_UUID_SIZE 16U
#define VK_MAX_EXTENSION_NAME_SIZE 256U
#define VK_MAX_DESCRIPTION_SIZE 256U
+#define VK_MAX_MEMORY_HEAPS 16U
typedef enum VkResult {
VK_SUCCESS = 0,
@@ -160,6 +160,7 @@ typedef enum VkResult {
VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003,
VK_ERROR_FRAGMENTATION = -1000161000,
VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000,
+ VK_PIPELINE_COMPILE_REQUIRED = 1000297000,
VK_ERROR_SURFACE_LOST_KHR = -1000000000,
VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001,
VK_SUBOPTIMAL_KHR = 1000001003,
@@ -167,20 +168,40 @@ typedef enum VkResult {
VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001,
VK_ERROR_VALIDATION_FAILED_EXT = -1000011001,
VK_ERROR_INVALID_SHADER_NV = -1000012000,
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR = -1000023000,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR = -1000023001,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR = -1000023002,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR = -1000023003,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR = -1000023004,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR = -1000023005,
+#endif
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000,
- VK_ERROR_NOT_PERMITTED_EXT = -1000174001,
+ VK_ERROR_NOT_PERMITTED_KHR = -1000174001,
VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000,
VK_THREAD_IDLE_KHR = 1000268000,
VK_THREAD_DONE_KHR = 1000268001,
VK_OPERATION_DEFERRED_KHR = 1000268002,
VK_OPERATION_NOT_DEFERRED_KHR = 1000268003,
- VK_PIPELINE_COMPILE_REQUIRED_EXT = 1000297000,
+ VK_ERROR_COMPRESSION_EXHAUSTED_EXT = -1000338000,
VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY,
VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE,
VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION,
+ VK_ERROR_NOT_PERMITTED_EXT = VK_ERROR_NOT_PERMITTED_KHR,
VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS,
VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS,
- VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED_EXT,
+ VK_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED,
+ VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED,
VK_RESULT_MAX_ENUM = 0x7FFFFFFF
} VkResult;
@@ -349,6 +370,58 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002,
VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003,
VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES = 53,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES = 54,
+ VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO = 1000192000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES = 1000215000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES = 1000245000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES = 1000276000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES = 1000295000,
+ VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO = 1000295001,
+ VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO = 1000295002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES = 1000297000,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER_2 = 1000314000,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2 = 1000314001,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2 = 1000314002,
+ VK_STRUCTURE_TYPE_DEPENDENCY_INFO = 1000314003,
+ VK_STRUCTURE_TYPE_SUBMIT_INFO_2 = 1000314004,
+ VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO = 1000314005,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO = 1000314006,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES = 1000314007,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES = 1000325000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES = 1000335000,
+ VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2 = 1000337000,
+ VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2 = 1000337001,
+ VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2 = 1000337002,
+ VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2 = 1000337003,
+ VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2 = 1000337004,
+ VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2 = 1000337005,
+ VK_STRUCTURE_TYPE_BUFFER_COPY_2 = 1000337006,
+ VK_STRUCTURE_TYPE_IMAGE_COPY_2 = 1000337007,
+ VK_STRUCTURE_TYPE_IMAGE_BLIT_2 = 1000337008,
+ VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2 = 1000337009,
+ VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2 = 1000337010,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES = 1000225000,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO = 1000225001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES = 1000225002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES = 1000138000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES = 1000138001,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK = 1000138002,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO = 1000138003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES = 1000066000,
+ VK_STRUCTURE_TYPE_RENDERING_INFO = 1000044000,
+ VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO = 1000044001,
+ VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO = 1000044002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES = 1000044003,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO = 1000044004,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES = 1000280000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES = 1000280001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES = 1000281001,
+ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3 = 1000360000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES = 1000413000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 1000413001,
+ VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS = 1000413002,
+ VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS = 1000413003,
VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000,
VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001,
VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007,
@@ -371,19 +444,19 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001,
VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002,
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_PROFILE_KHR = 1000023000,
+ VK_STRUCTURE_TYPE_VIDEO_PROFILE_INFO_KHR = 1000023000,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR = 1000023001,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_KHR = 1000023002,
+ VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR = 1000023002,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_GET_MEMORY_PROPERTIES_KHR = 1000023003,
+ VK_STRUCTURE_TYPE_VIDEO_SESSION_MEMORY_REQUIREMENTS_KHR = 1000023003,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_BIND_MEMORY_KHR = 1000023004,
+ VK_STRUCTURE_TYPE_BIND_VIDEO_SESSION_MEMORY_INFO_KHR = 1000023004,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR = 1000023005,
@@ -404,13 +477,13 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR = 1000023010,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_KHR = 1000023011,
+ VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_INFO_KHR = 1000023011,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_QUEUE_FAMILY_PROPERTIES_2_KHR = 1000023012,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_VIDEO_PROPERTIES_KHR = 1000023012,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_PROFILES_KHR = 1000023013,
+ VK_STRUCTURE_TYPE_VIDEO_PROFILE_LIST_INFO_KHR = 1000023013,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR = 1000023014,
@@ -419,8 +492,17 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR = 1000023015,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_QUERY_RESULT_STATUS_PROPERTIES_KHR = 1000023016,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR = 1000024000,
#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_CAPABILITIES_KHR = 1000024001,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_USAGE_INFO_KHR = 1000024002,
+#endif
VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000,
VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001,
VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002,
@@ -436,54 +518,94 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_EXT = 1000038000,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_EXT = 1000038001,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000038001,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000038002,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000038002,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000038003,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT = 1000038003,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_VCL_FRAME_INFO_EXT = 1000038004,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT = 1000038004,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_EXT = 1000038005,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_INFO_EXT = 1000038005,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_EXT = 1000038006,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_INFO_EXT = 1000038006,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_EMIT_PICTURE_PARAMETERS_EXT = 1000038007,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_INFO_EXT = 1000038007,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_EXT = 1000038008,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_INFO_EXT = 1000038008,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT = 1000040000,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_EXT = 1000038009,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_REFERENCE_LISTS_INFO_EXT = 1000038010,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_CAPABILITIES_EXT = 1000039000,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000039001,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_CREATE_INFO_EXT = 1000040001,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000039002,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT = 1000040002,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_VCL_FRAME_INFO_EXT = 1000039003,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_EXT = 1000040003,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_DPB_SLOT_INFO_EXT = 1000039004,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_EXT = 1000040004,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_EXT = 1000039005,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000040005,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_EMIT_PICTURE_PARAMETERS_INFO_EXT = 1000039006,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000040006,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PROFILE_INFO_EXT = 1000039007,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT = 1000040007,
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_REFERENCE_LISTS_INFO_EXT = 1000039008,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_INFO_EXT = 1000039009,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_EXT = 1000039010,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_EXT = 1000040000,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_EXT = 1000040001,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_MVC_INFO_EXT = 1000040002,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_INFO_EXT = 1000040003,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000040004,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_EXT = 1000040005,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_EXT = 1000040006,
#endif
VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000,
+ VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000044006,
+ VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT = 1000044007,
+ VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD = 1000044008,
+ VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_ATTRIBUTES_INFO_NVX = 1000044009,
VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000,
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000,
@@ -493,9 +615,11 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000,
VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,
VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000,
VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001,
+ VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT = 1000068000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT = 1000068001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT = 1000068002,
VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000,
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001,
VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002,
@@ -565,10 +689,7 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003,
VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004,
VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001,
- VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002,
- VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003,
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_2_ANDROID = 1000129006,
VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000,
VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001,
VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002,
@@ -607,6 +728,7 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003,
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004,
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005,
+ VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT = 1000158006,
VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000,
VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001,
#ifdef VK_ENABLE_BETA_EXTENSIONS
@@ -634,7 +756,6 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000,
VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001,
- VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000,
VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000,
VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002,
@@ -646,33 +767,31 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_EXT = 1000187000,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_CREATE_INFO_EXT = 1000187001,
-#endif
-#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000187002,
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_EXT = 1000187001,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000187003,
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_EXT = 1000187002,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_EXT = 1000187004,
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_EXT = 1000187003,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT = 1000187005,
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_EXT = 1000187004,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
- VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT = 1000187006,
+ VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_EXT = 1000187005,
#endif
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR = 1000174000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR = 1000388000,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR = 1000388001,
VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002,
VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000,
- VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000192000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = 1000203000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002,
@@ -689,14 +808,10 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000,
VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001,
VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = 1000215000,
VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001,
VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000,
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002,
VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000,
VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002,
@@ -712,8 +827,8 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000,
VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = 1000245000,
VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR = 1000248000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000,
VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002,
@@ -741,7 +856,7 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003,
VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004,
VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT = 1000273000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000,
VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001,
VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002,
@@ -753,7 +868,6 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV = 1000278000,
VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV = 1000278001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001,
VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000,
VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000,
@@ -765,80 +879,153 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002,
VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = 1000295000,
- VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = 1000295001,
- VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = 1000295002,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = 1000297000,
+ VK_STRUCTURE_TYPE_PRESENT_ID_KHR = 1000294000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR = 1000294001,
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR = 1000299000,
#endif
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR = 1000299001,
#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR = 1000299002,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_CAPABILITIES_KHR = 1000299003,
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+ VK_STRUCTURE_TYPE_VIDEO_ENCODE_USAGE_INFO_KHR = 1000299004,
+#endif
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000,
VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001,
- VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR = 1000314000,
- VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR = 1000314001,
- VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR = 1000314002,
- VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR = 1000314003,
- VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR = 1000314004,
- VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR = 1000314005,
- VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR = 1000314006,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR = 1000314007,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECT_CREATE_INFO_EXT = 1000311000,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT = 1000311001,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT = 1000311002,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT = 1000311003,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT = 1000311004,
+ VK_STRUCTURE_TYPE_IMPORT_METAL_BUFFER_INFO_EXT = 1000311005,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT = 1000311006,
+ VK_STRUCTURE_TYPE_IMPORT_METAL_TEXTURE_INFO_EXT = 1000311007,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT = 1000311008,
+ VK_STRUCTURE_TYPE_IMPORT_METAL_IO_SURFACE_INFO_EXT = 1000311009,
+ VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311010,
+ VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311011,
VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV = 1000314008,
VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV = 1000314009,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT = 1000320000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT = 1000320001,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT = 1000320002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD = 1000321000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR = 1000203000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR = 1000322000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR = 1000323000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR = 1000325000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001,
VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002,
VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV = 1000327000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV = 1000327001,
VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MOTION_INFO_NV = 1000327002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT = 1000328000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT = 1000328001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT = 1000330000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001,
VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = 1000335000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR = 1000336000,
- VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = 1000337000,
- VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = 1000337001,
- VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = 1000337002,
- VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = 1000337003,
- VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = 1000337004,
- VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = 1000337005,
- VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = 1000337006,
- VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = 1000337007,
- VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = 1000337008,
- VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = 1000337009,
- VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = 1000337010,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT = 1000338000,
+ VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT = 1000338001,
+ VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT = 1000338002,
+ VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT = 1000338003,
+ VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT = 1000338004,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT = 1000339000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT = 1000344000,
VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = 1000351000,
- VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = 1000351002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT = 1000352000,
VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT = 1000352001,
VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT = 1000352002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT = 1000353000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT = 1000355000,
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT = 1000355001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT = 1000356000,
VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364000,
VK_STRUCTURE_TYPE_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA = 1000364001,
VK_STRUCTURE_TYPE_MEMORY_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364002,
VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365000,
VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365001,
- VK_STRUCTURE_TYPE_SUBPASSS_SHADING_PIPELINE_CREATE_INFO_HUAWEI = 1000369000,
+ VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA = 1000366000,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA = 1000366001,
+ VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA = 1000366002,
+ VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES_FUCHSIA = 1000366003,
+ VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA = 1000366004,
+ VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA = 1000366005,
+ VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA = 1000366006,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA = 1000366007,
+ VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA = 1000366008,
+ VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA = 1000366009,
+ VK_STRUCTURE_TYPE_SUBPASS_SHADING_PIPELINE_CREATE_INFO_HUAWEI = 1000369000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI = 1000369001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_PROPERTIES_HUAWEI = 1000369002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI = 1000370000,
+ VK_STRUCTURE_TYPE_MEMORY_GET_REMOTE_ADDRESS_INFO_NV = 1000371000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_RDMA_FEATURES_NV = 1000371001,
+ VK_STRUCTURE_TYPE_PIPELINE_PROPERTIES_IDENTIFIER_EXT = 1000372000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT = 1000372001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT = 1000376000,
+ VK_STRUCTURE_TYPE_SUBPASS_RESOLVE_PERFORMANCE_QUERY_EXT = 1000376001,
+ VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT = 1000376002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT = 1000377000,
VK_STRUCTURE_TYPE_SCREEN_SURFACE_CREATE_INFO_QNX = 1000378000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT = 1000381000,
VK_STRUCTURE_TYPE_PIPELINE_COLOR_WRITE_CREATE_INFO_EXT = 1000381001,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = 1000388000,
- VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = 1000388001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT = 1000382000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR = 1000386000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT = 1000391000,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT = 1000391001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT = 1000392000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT = 1000392001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT = 1000393000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT = 1000411000,
+ VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT = 1000411001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT = 1000412000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE = 1000420000,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE = 1000420001,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE = 1000420002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT = 1000421000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT = 1000422000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM = 1000425000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM = 1000425001,
+ VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM = 1000425002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV = 1000430000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT = 1000437000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM = 1000440000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM = 1000440001,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM = 1000440002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT = 1000458000,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT = 1000458001,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000458002,
+ VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT = 1000458003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT = 1000462000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT = 1000462001,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT = 1000462002,
+ VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT = 1000462003,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT = 1000342000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT = 1000465000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM = 1000484000,
+ VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM = 1000484001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC = 1000485000,
+ VK_STRUCTURE_TYPE_AMIGO_PROFILING_SUBMIT_INFO_SEC = 1000485001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT = 1000351000,
+ VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT = 1000351002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES,
VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT,
+ VK_STRUCTURE_TYPE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INFO,
+ VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
+ VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO,
+ VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_NV = VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD,
VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES,
@@ -858,6 +1045,7 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO,
VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO,
VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES,
VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
@@ -900,6 +1088,10 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES,
VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO,
VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,
@@ -921,13 +1113,16 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT,
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES,
+ VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES,
VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES,
VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
@@ -936,12 +1131,17 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,
VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES,
VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT,
VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT,
VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES,
VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES,
@@ -950,9 +1150,54 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO,
VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES,
+ VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
+ VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR = VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
+ VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES,
+ VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2,
+ VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2,
+ VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2,
+ VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2,
+ VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
+ VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2,
+ VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_COPY_2,
+ VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_IMAGE_COPY_2,
+ VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
+ VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2,
+ VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT,
+ VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT,
+ VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3,
+ VK_STRUCTURE_TYPE_PIPELINE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR,
+ VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES,
+ VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS,
+ VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS,
VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF
} VkStructureType;
+typedef enum VkPipelineCacheHeaderVersion {
+ VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,
+ VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCacheHeaderVersion;
+
typedef enum VkImageLayout {
VK_IMAGE_LAYOUT_UNDEFINED = 0,
VK_IMAGE_LAYOUT_GENERAL = 1,
@@ -969,6 +1214,8 @@ typedef enum VkImageLayout {
VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001,
VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002,
VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003,
+ VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL = 1000314000,
+ VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL = 1000314001,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002,
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR = 1000024000,
@@ -991,8 +1238,7 @@ typedef enum VkImageLayout {
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR = 1000299002,
#endif
- VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR = 1000314000,
- VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR = 1000314001,
+ VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT = 1000339000,
VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR,
@@ -1000,6 +1246,8 @@ typedef enum VkImageLayout {
VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL,
+ VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
+ VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF
} VkImageLayout;
@@ -1032,6 +1280,7 @@ typedef enum VkObjectType {
VK_OBJECT_TYPE_COMMAND_POOL = 25,
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000,
VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000,
+ VK_OBJECT_TYPE_PRIVATE_DATA_SLOT = 1000295000,
VK_OBJECT_TYPE_SURFACE_KHR = 1000000000,
VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000,
VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000,
@@ -1052,9 +1301,10 @@ typedef enum VkObjectType {
VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000,
VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000,
VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000,
- VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = 1000295000,
+ VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA = 1000366000,
VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE,
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION,
+ VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT,
VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF
} VkObjectType;
@@ -1068,11 +1318,6 @@ typedef enum VkVendorId {
VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF
} VkVendorId;
-typedef enum VkPipelineCacheHeaderVersion {
- VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1,
- VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF
-} VkPipelineCacheHeaderVersion;
-
typedef enum VkSystemAllocationScope {
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1,
@@ -1307,6 +1552,26 @@ typedef enum VkFormat {
VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031,
VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032,
VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033,
+ VK_FORMAT_G8_B8R8_2PLANE_444_UNORM = 1000330000,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 = 1000330001,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 = 1000330002,
+ VK_FORMAT_G16_B16R16_2PLANE_444_UNORM = 1000330003,
+ VK_FORMAT_A4R4G4B4_UNORM_PACK16 = 1000340000,
+ VK_FORMAT_A4B4G4R4_UNORM_PACK16 = 1000340001,
+ VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK = 1000066000,
+ VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK = 1000066001,
+ VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK = 1000066002,
+ VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK = 1000066003,
+ VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK = 1000066004,
+ VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK = 1000066005,
+ VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK = 1000066006,
+ VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK = 1000066007,
+ VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK = 1000066008,
+ VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK = 1000066009,
+ VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK = 1000066010,
+ VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK = 1000066011,
+ VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK = 1000066012,
+ VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK = 1000066013,
VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,
VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,
VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,
@@ -1315,26 +1580,20 @@ typedef enum VkFormat {
VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
- VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = 1000066000,
- VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = 1000066001,
- VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = 1000066002,
- VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = 1000066003,
- VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = 1000066004,
- VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = 1000066005,
- VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = 1000066006,
- VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = 1000066007,
- VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = 1000066008,
- VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = 1000066009,
- VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = 1000066010,
- VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = 1000066011,
- VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = 1000066012,
- VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = 1000066013,
- VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = 1000330000,
- VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = 1000330001,
- VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = 1000330002,
- VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = 1000330003,
- VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = 1000340000,
- VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = 1000340001,
+ VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK,
+ VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK,
VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM,
VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM,
VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
@@ -1369,6 +1628,12 @@ typedef enum VkFormat {
VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM,
VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM,
VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = VK_FORMAT_G8_B8R8_2PLANE_444_UNORM,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = VK_FORMAT_G16_B16R16_2PLANE_444_UNORM,
+ VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16,
+ VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16,
VK_FORMAT_MAX_ENUM = 0x7FFFFFFF
} VkFormat;
@@ -1411,6 +1676,10 @@ typedef enum VkQueryType {
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_QUERY_TYPE_VIDEO_ENCODE_BITSTREAM_BUFFER_RANGE_KHR = 1000299000,
#endif
+ VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT = 1000328000,
+ VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT = 1000382000,
+ VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR = 1000386000,
+ VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR = 1000386001,
VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF
} VkQueryType;
@@ -1542,6 +1811,21 @@ typedef enum VkDynamicState {
VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7,
VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8,
+ VK_DYNAMIC_STATE_CULL_MODE = 1000267000,
+ VK_DYNAMIC_STATE_FRONT_FACE = 1000267001,
+ VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY = 1000267002,
+ VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT = 1000267003,
+ VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT = 1000267004,
+ VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE = 1000267005,
+ VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE = 1000267006,
+ VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE = 1000267007,
+ VK_DYNAMIC_STATE_DEPTH_COMPARE_OP = 1000267008,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE = 1000267009,
+ VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE = 1000267010,
+ VK_DYNAMIC_STATE_STENCIL_OP = 1000267011,
+ VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE = 1000377001,
+ VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE = 1000377002,
+ VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE = 1000377004,
VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000,
VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000,
VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000,
@@ -1551,25 +1835,25 @@ typedef enum VkDynamicState {
VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001,
VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000,
VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000,
- VK_DYNAMIC_STATE_CULL_MODE_EXT = 1000267000,
- VK_DYNAMIC_STATE_FRONT_FACE_EXT = 1000267001,
- VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = 1000267002,
- VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = 1000267003,
- VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = 1000267004,
- VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = 1000267005,
- VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = 1000267006,
- VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = 1000267007,
- VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = 1000267008,
- VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = 1000267009,
- VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = 1000267010,
- VK_DYNAMIC_STATE_STENCIL_OP_EXT = 1000267011,
VK_DYNAMIC_STATE_VERTEX_INPUT_EXT = 1000352000,
VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT = 1000377000,
- VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = 1000377001,
- VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = 1000377002,
VK_DYNAMIC_STATE_LOGIC_OP_EXT = 1000377003,
- VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = 1000377004,
VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT = 1000381000,
+ VK_DYNAMIC_STATE_CULL_MODE_EXT = VK_DYNAMIC_STATE_CULL_MODE,
+ VK_DYNAMIC_STATE_FRONT_FACE_EXT = VK_DYNAMIC_STATE_FRONT_FACE,
+ VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY,
+ VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT,
+ VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT,
+ VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE,
+ VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE,
+ VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE,
+ VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = VK_DYNAMIC_STATE_DEPTH_COMPARE_OP,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE,
+ VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE,
+ VK_DYNAMIC_STATE_STENCIL_OP_EXT = VK_DYNAMIC_STATE_STENCIL_OP,
+ VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE,
+ VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE,
+ VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE,
VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF
} VkDynamicState;
@@ -1655,8 +1939,8 @@ typedef enum VkBorderColor {
typedef enum VkFilter {
VK_FILTER_NEAREST = 0,
VK_FILTER_LINEAR = 1,
- VK_FILTER_CUBIC_IMG = 1000015000,
- VK_FILTER_CUBIC_EXT = VK_FILTER_CUBIC_IMG,
+ VK_FILTER_CUBIC_EXT = 1000015000,
+ VK_FILTER_CUBIC_IMG = VK_FILTER_CUBIC_EXT,
VK_FILTER_MAX_ENUM = 0x7FFFFFFF
} VkFilter;
@@ -1688,10 +1972,14 @@ typedef enum VkDescriptorType {
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
- VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000,
+ VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK = 1000138000,
VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000,
VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,
- VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = 1000351000,
+ VK_DESCRIPTOR_TYPE_SAMPLE_WEIGHT_IMAGE_QCOM = 1000440000,
+ VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM = 1000440001,
+ VK_DESCRIPTOR_TYPE_MUTABLE_EXT = 1000351000,
+ VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK,
+ VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = VK_DESCRIPTOR_TYPE_MUTABLE_EXT,
VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF
} VkDescriptorType;
@@ -1699,13 +1987,17 @@ typedef enum VkAttachmentLoadOp {
VK_ATTACHMENT_LOAD_OP_LOAD = 0,
VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
+ VK_ATTACHMENT_LOAD_OP_NONE_EXT = 1000400000,
VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF
} VkAttachmentLoadOp;
typedef enum VkAttachmentStoreOp {
VK_ATTACHMENT_STORE_OP_STORE = 0,
VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
- VK_ATTACHMENT_STORE_OP_NONE_QCOM = 1000301000,
+ VK_ATTACHMENT_STORE_OP_NONE = 1000301000,
+ VK_ATTACHMENT_STORE_OP_NONE_KHR = VK_ATTACHMENT_STORE_OP_NONE,
+ VK_ATTACHMENT_STORE_OP_NONE_QCOM = VK_ATTACHMENT_STORE_OP_NONE,
+ VK_ATTACHMENT_STORE_OP_NONE_EXT = VK_ATTACHMENT_STORE_OP_NONE,
VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF
} VkAttachmentStoreOp;
@@ -1757,6 +2049,7 @@ typedef enum VkAccessFlagBits {
VK_ACCESS_HOST_WRITE_BIT = 0x00004000,
VK_ACCESS_MEMORY_READ_BIT = 0x00008000,
VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000,
+ VK_ACCESS_NONE = 0,
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000,
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000,
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000,
@@ -1768,10 +2061,10 @@ typedef enum VkAccessFlagBits {
VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000,
VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000,
VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000,
- VK_ACCESS_NONE_KHR = 0,
VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR,
VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR,
VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
+ VK_ACCESS_NONE_KHR = VK_ACCESS_NONE,
VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkAccessFlagBits;
typedef VkFlags VkAccessFlags;
@@ -1784,6 +2077,7 @@ typedef enum VkImageAspectFlagBits {
VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010,
VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020,
VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040,
+ VK_IMAGE_ASPECT_NONE = 0,
VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080,
VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100,
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200,
@@ -1791,6 +2085,7 @@ typedef enum VkImageAspectFlagBits {
VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT,
VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT,
VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT,
+ VK_IMAGE_ASPECT_NONE_KHR = VK_IMAGE_ASPECT_NONE,
VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkImageAspectFlagBits;
typedef VkFlags VkImageAspectFlags;
@@ -1819,7 +2114,6 @@ typedef enum VkFormatFeatureFlagBits {
VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000,
VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000,
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000,
- VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000,
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000,
#endif
@@ -1827,6 +2121,7 @@ typedef enum VkFormatFeatureFlagBits {
VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000,
#endif
VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000,
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000,
VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000,
VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000,
#ifdef VK_ENABLE_BETA_EXTENSIONS
@@ -1835,6 +2130,7 @@ typedef enum VkFormatFeatureFlagBits {
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000,
#endif
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT,
VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT,
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT,
@@ -1845,7 +2141,6 @@ typedef enum VkFormatFeatureFlagBits {
VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT,
VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT,
VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT,
- VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG,
VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkFormatFeatureFlagBits;
typedef VkFlags VkFormatFeatureFlags;
@@ -1866,6 +2161,9 @@ typedef enum VkImageCreateFlagBits {
VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000,
VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000,
VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000,
+ VK_IMAGE_CREATE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_BIT_EXT = 0x00040000,
+ VK_IMAGE_CREATE_2D_VIEW_COMPATIBLE_BIT_EXT = 0x00020000,
+ VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_QCOM = 0x00008000,
VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT,
VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT,
VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT,
@@ -1917,10 +2215,19 @@ typedef enum VkImageUsageFlagBits {
#ifdef VK_ENABLE_BETA_EXTENSIONS
VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR = 0x00008000,
#endif
+ VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x00080000,
+ VK_IMAGE_USAGE_INVOCATION_MASK_BIT_HUAWEI = 0x00040000,
+ VK_IMAGE_USAGE_SAMPLE_WEIGHT_BIT_QCOM = 0x00100000,
+ VK_IMAGE_USAGE_SAMPLE_BLOCK_MATCH_BIT_QCOM = 0x00200000,
VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkImageUsageFlagBits;
typedef VkFlags VkImageUsageFlags;
+
+typedef enum VkInstanceCreateFlagBits {
+ VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR = 0x00000001,
+ VK_INSTANCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkInstanceCreateFlagBits;
typedef VkFlags VkInstanceCreateFlags;
typedef enum VkMemoryHeapFlagBits {
@@ -1940,6 +2247,7 @@ typedef enum VkMemoryPropertyFlagBits {
VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020,
VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040,
VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080,
+ VK_MEMORY_PROPERTY_RDMA_CAPABLE_BIT_NV = 0x00000100,
VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkMemoryPropertyFlagBits;
typedef VkFlags VkMemoryPropertyFlags;
@@ -1985,19 +2293,22 @@ typedef enum VkPipelineStageFlagBits {
VK_PIPELINE_STAGE_HOST_BIT = 0x00004000,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000,
+ VK_PIPELINE_STAGE_NONE = 0,
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000,
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000,
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000,
VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000,
- VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000,
- VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000,
VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000,
VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000,
VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000,
- VK_PIPELINE_STAGE_NONE_KHR = 0,
+ VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT = 0x00080000,
+ VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT = 0x00100000,
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR,
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
+ VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT,
+ VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT,
+ VK_PIPELINE_STAGE_NONE_KHR = VK_PIPELINE_STAGE_NONE,
VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkPipelineStageFlagBits;
typedef VkFlags VkPipelineStageFlags;
@@ -2025,7 +2336,8 @@ typedef VkFlags VkFenceCreateFlags;
typedef VkFlags VkSemaphoreCreateFlags;
typedef enum VkEventCreateFlagBits {
- VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR = 0x00000001,
+ VK_EVENT_CREATE_DEVICE_ONLY_BIT = 0x00000001,
+ VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR = VK_EVENT_CREATE_DEVICE_ONLY_BIT,
VK_EVENT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkEventCreateFlagBits;
typedef VkFlags VkEventCreateFlags;
@@ -2042,6 +2354,8 @@ typedef enum VkQueryPipelineStatisticFlagBits {
VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100,
VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200,
VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400,
+ VK_QUERY_PIPELINE_STATISTIC_TASK_SHADER_INVOCATIONS_BIT_EXT = 0x00000800,
+ VK_QUERY_PIPELINE_STATISTIC_MESH_SHADER_INVOCATIONS_BIT_EXT = 0x00001000,
VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkQueryPipelineStatisticFlagBits;
typedef VkFlags VkQueryPipelineStatisticFlags;
@@ -2114,14 +2428,11 @@ typedef enum VkImageViewCreateFlagBits {
VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkImageViewCreateFlagBits;
typedef VkFlags VkImageViewCreateFlags;
-
-typedef enum VkShaderModuleCreateFlagBits {
- VK_SHADER_MODULE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-} VkShaderModuleCreateFlagBits;
typedef VkFlags VkShaderModuleCreateFlags;
typedef enum VkPipelineCacheCreateFlagBits {
- VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = 0x00000001,
+ VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
+ VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkPipelineCacheCreateFlagBits;
typedef VkFlags VkPipelineCacheCreateFlags;
@@ -2141,6 +2452,10 @@ typedef enum VkPipelineCreateFlagBits {
VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008,
VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010,
+ VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100,
+ VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200,
+ VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000,
+ VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000,
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000,
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000,
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000,
@@ -2153,19 +2468,27 @@ typedef enum VkPipelineCreateFlagBits {
VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080,
VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000,
VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800,
- VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = 0x00000100,
- VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = 0x00000200,
+ VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000,
+ VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400,
VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000,
+ VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000,
+ VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000,
VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT,
+ VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
+ VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT,
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT,
VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE,
+ VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT,
+ VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT,
VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkPipelineCreateFlagBits;
typedef VkFlags VkPipelineCreateFlags;
typedef enum VkPipelineShaderStageCreateFlagBits {
- VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000001,
- VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000002,
+ VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT = 0x00000001,
+ VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT = 0x00000002,
+ VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT,
+ VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT,
VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkPipelineShaderStageCreateFlagBits;
typedef VkFlags VkPipelineShaderStageCreateFlags;
@@ -2185,8 +2508,8 @@ typedef enum VkShaderStageFlagBits {
VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800,
VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000,
VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000,
- VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040,
- VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080,
+ VK_SHADER_STAGE_TASK_BIT_EXT = 0x00000040,
+ VK_SHADER_STAGE_MESH_BIT_EXT = 0x00000080,
VK_SHADER_STAGE_SUBPASS_SHADING_BIT_HUAWEI = 0x00004000,
VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR,
VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR,
@@ -2194,6 +2517,8 @@ typedef enum VkShaderStageFlagBits {
VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR,
VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR,
VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR,
+ VK_SHADER_STAGE_TASK_BIT_NV = VK_SHADER_STAGE_TASK_BIT_EXT,
+ VK_SHADER_STAGE_MESH_BIT_NV = VK_SHADER_STAGE_MESH_BIT_EXT,
VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkShaderStageFlagBits;
@@ -2211,15 +2536,36 @@ typedef VkFlags VkPipelineTessellationStateCreateFlags;
typedef VkFlags VkPipelineViewportStateCreateFlags;
typedef VkFlags VkPipelineRasterizationStateCreateFlags;
typedef VkFlags VkPipelineMultisampleStateCreateFlags;
+
+typedef enum VkPipelineDepthStencilStateCreateFlagBits {
+ VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT = 0x00000001,
+ VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT = 0x00000002,
+ VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT,
+ VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT,
+ VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineDepthStencilStateCreateFlagBits;
typedef VkFlags VkPipelineDepthStencilStateCreateFlags;
+
+typedef enum VkPipelineColorBlendStateCreateFlagBits {
+ VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_EXT = 0x00000001,
+ VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM = VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_EXT,
+ VK_PIPELINE_COLOR_BLEND_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineColorBlendStateCreateFlagBits;
typedef VkFlags VkPipelineColorBlendStateCreateFlags;
typedef VkFlags VkPipelineDynamicStateCreateFlags;
+
+typedef enum VkPipelineLayoutCreateFlagBits {
+ VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT = 0x00000002,
+ VK_PIPELINE_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineLayoutCreateFlagBits;
typedef VkFlags VkPipelineLayoutCreateFlags;
typedef VkFlags VkShaderStageFlags;
typedef enum VkSamplerCreateFlagBits {
VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001,
VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002,
+ VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT = 0x00000004,
+ VK_SAMPLER_CREATE_IMAGE_PROCESSING_BIT_QCOM = 0x00000010,
VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkSamplerCreateFlagBits;
typedef VkFlags VkSamplerCreateFlags;
@@ -2227,8 +2573,9 @@ typedef VkFlags VkSamplerCreateFlags;
typedef enum VkDescriptorPoolCreateFlagBits {
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001,
VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002,
- VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE = 0x00000004,
+ VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT = 0x00000004,
VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,
+ VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE = VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT,
VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkDescriptorPoolCreateFlagBits;
typedef VkFlags VkDescriptorPoolCreateFlags;
@@ -2237,8 +2584,9 @@ typedef VkFlags VkDescriptorPoolResetFlags;
typedef enum VkDescriptorSetLayoutCreateFlagBits {
VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002,
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001,
- VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = 0x00000004,
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT = 0x00000004,
VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
+ VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT,
VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkDescriptorSetLayoutCreateFlagBits;
typedef VkFlags VkDescriptorSetLayoutCreateFlags;
@@ -2253,6 +2601,7 @@ typedef enum VkDependencyFlagBits {
VK_DEPENDENCY_BY_REGION_BIT = 0x00000001,
VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004,
VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002,
+ VK_DEPENDENCY_FEEDBACK_LOOP_BIT_EXT = 0x00000008,
VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT,
VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT,
VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
@@ -2277,6 +2626,13 @@ typedef enum VkSubpassDescriptionFlagBits {
VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002,
VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = 0x00000004,
VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = 0x00000008,
+ VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT = 0x00000010,
+ VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT = 0x00000020,
+ VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT = 0x00000040,
+ VK_SUBPASS_DESCRIPTION_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x00000080,
+ VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_ARM = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT,
+ VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT,
+ VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT,
VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkSubpassDescriptionFlagBits;
typedef VkFlags VkSubpassDescriptionFlags;
@@ -2421,6 +2777,14 @@ typedef struct VkMemoryBarrier {
VkAccessFlags dstAccessMask;
} VkMemoryBarrier;
+typedef struct VkPipelineCacheHeaderVersionOne {
+ uint32_t headerSize;
+ VkPipelineCacheHeaderVersion headerVersion;
+ uint32_t vendorID;
+ uint32_t deviceID;
+ uint8_t pipelineCacheUUID[VK_UUID_SIZE];
+} VkPipelineCacheHeaderVersionOne;
+
typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)(
void* pUserData,
size_t size,
@@ -4492,6 +4856,7 @@ typedef enum VkExternalMemoryHandleTypeFlagBits {
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA = 0x00000800,
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_RDMA_ADDRESS_BIT_NV = 0x00001000,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
@@ -5264,6 +5629,13 @@ typedef enum VkDriverId {
VK_DRIVER_ID_MOLTENVK = 14,
VK_DRIVER_ID_COREAVI_PROPRIETARY = 15,
VK_DRIVER_ID_JUICE_PROPRIETARY = 16,
+ VK_DRIVER_ID_VERISILICON_PROPRIETARY = 17,
+ VK_DRIVER_ID_MESA_TURNIP = 18,
+ VK_DRIVER_ID_MESA_V3DV = 19,
+ VK_DRIVER_ID_MESA_PANVK = 20,
+ VK_DRIVER_ID_SAMSUNG_PROPRIETARY = 21,
+ VK_DRIVER_ID_MESA_VENUS = 22,
+ VK_DRIVER_ID_MESA_DOZEN = 23,
VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY,
VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE,
VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV,
@@ -5985,6 +6357,1046 @@ VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress(
#endif
+#define VK_VERSION_1_3 1
+// Vulkan 1.3 version number
+#define VK_API_VERSION_1_3 VK_MAKE_API_VERSION(0, 1, 3, 0)// Patch version should always be set to 0
+
+typedef uint64_t VkFlags64;
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot)
+
+typedef enum VkPipelineCreationFeedbackFlagBits {
+ VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT = 0x00000001,
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT = 0x00000002,
+ VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT = 0x00000004,
+ VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
+ VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT,
+ VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT,
+ VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineCreationFeedbackFlagBits;
+typedef VkFlags VkPipelineCreationFeedbackFlags;
+
+typedef enum VkToolPurposeFlagBits {
+ VK_TOOL_PURPOSE_VALIDATION_BIT = 0x00000001,
+ VK_TOOL_PURPOSE_PROFILING_BIT = 0x00000002,
+ VK_TOOL_PURPOSE_TRACING_BIT = 0x00000004,
+ VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT = 0x00000008,
+ VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT = 0x00000010,
+ VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020,
+ VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040,
+ VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = VK_TOOL_PURPOSE_VALIDATION_BIT,
+ VK_TOOL_PURPOSE_PROFILING_BIT_EXT = VK_TOOL_PURPOSE_PROFILING_BIT,
+ VK_TOOL_PURPOSE_TRACING_BIT_EXT = VK_TOOL_PURPOSE_TRACING_BIT,
+ VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT,
+ VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT,
+ VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkToolPurposeFlagBits;
+typedef VkFlags VkToolPurposeFlags;
+typedef VkFlags VkPrivateDataSlotCreateFlags;
+typedef VkFlags64 VkPipelineStageFlags2;
+
+// Flag bits for VkPipelineStageFlagBits2
+typedef VkFlags64 VkPipelineStageFlagBits2;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE = 0ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT = 0x00000001ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT = 0x00000002ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT = 0x00000004ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT = 0x00000008ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT = 0x00000040ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT = 0x00000080ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT = 0x00000100ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT = 0x00000200ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT = 0x00000800ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT = 0x00001000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT = 0x00001000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT = 0x00002000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT = 0x00004000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT = 0x00008000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT = 0x00010000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT = 0x100000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT = 0x200000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT = 0x400000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT = 0x800000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT = 0x1000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT = 0x2000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT = 0x4000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL;
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL;
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL;
+#endif
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV = 0x00400000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR = 0x00200000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV = 0x00200000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV = 0x00080000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0x00100000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT = 0x00080000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT = 0x00100000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI = 0x8000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI = 0x10000000000ULL;
+static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR = 0x10000000ULL;
+
+typedef VkFlags64 VkAccessFlags2;
+
+// Flag bits for VkAccessFlagBits2
+typedef VkFlags64 VkAccessFlagBits2;
+static const VkAccessFlagBits2 VK_ACCESS_2_NONE = 0ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_NONE_KHR = 0ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT = 0x00000001ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT = 0x00000002ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT = 0x00000008ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT = 0x00000010ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT = 0x00000020ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT = 0x00000040ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT = 0x00000080ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT = 0x00000800ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT = 0x00001000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT = 0x00002000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT = 0x00004000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT = 0x00008000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT = 0x00010000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT = 0x100000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT = 0x200000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT = 0x400000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL;
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL;
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL;
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL;
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL;
+#endif
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI = 0x8000000000ULL;
+static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR = 0x10000000000ULL;
+
+
+typedef enum VkSubmitFlagBits {
+ VK_SUBMIT_PROTECTED_BIT = 0x00000001,
+ VK_SUBMIT_PROTECTED_BIT_KHR = VK_SUBMIT_PROTECTED_BIT,
+ VK_SUBMIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkSubmitFlagBits;
+typedef VkFlags VkSubmitFlags;
+
+typedef enum VkRenderingFlagBits {
+ VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT = 0x00000001,
+ VK_RENDERING_SUSPENDING_BIT = 0x00000002,
+ VK_RENDERING_RESUMING_BIT = 0x00000004,
+ VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x00000008,
+ VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT,
+ VK_RENDERING_SUSPENDING_BIT_KHR = VK_RENDERING_SUSPENDING_BIT,
+ VK_RENDERING_RESUMING_BIT_KHR = VK_RENDERING_RESUMING_BIT,
+ VK_RENDERING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkRenderingFlagBits;
+typedef VkFlags VkRenderingFlags;
+typedef VkFlags64 VkFormatFeatureFlags2;
+
+// Flag bits for VkFormatFeatureFlagBits2
+typedef VkFlags64 VkFormatFeatureFlagBits2;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT = 0x00000001ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR = 0x00000001ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT = 0x00000002ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR = 0x00000002ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR = 0x00000004ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000010ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000010ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR = 0x00000020ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT = 0x00000040ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR = 0x00000040ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT = 0x00000080ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR = 0x00000080ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR = 0x00000100ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR = 0x00000200ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT = 0x00000400ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR = 0x00000400ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT = 0x00000800ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR = 0x00000800ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR = 0x00001000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT = 0x00002000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT = 0x00004000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR = 0x00004000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT = 0x00008000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR = 0x00008000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR = 0x00010000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT = 0x00400000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR = 0x00400000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT = 0x00800000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT = 0x80000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR = 0x80000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT = 0x100000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR = 0x100000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT = 0x200000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR = 0x200000000ULL;
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000ULL;
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000ULL;
+#endif
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000ULL;
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000ULL;
+#endif
+#ifdef VK_ENABLE_BETA_EXTENSIONS
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000ULL;
+#endif
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV = 0x4000000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_IMAGE_BIT_QCOM = 0x400000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_SAMPLED_IMAGE_BIT_QCOM = 0x800000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLOCK_MATCHING_BIT_QCOM = 0x1000000000ULL;
+static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BOX_FILTER_SAMPLED_BIT_QCOM = 0x2000000000ULL;
+
+typedef struct VkPhysicalDeviceVulkan13Features {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 robustImageAccess;
+ VkBool32 inlineUniformBlock;
+ VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind;
+ VkBool32 pipelineCreationCacheControl;
+ VkBool32 privateData;
+ VkBool32 shaderDemoteToHelperInvocation;
+ VkBool32 shaderTerminateInvocation;
+ VkBool32 subgroupSizeControl;
+ VkBool32 computeFullSubgroups;
+ VkBool32 synchronization2;
+ VkBool32 textureCompressionASTC_HDR;
+ VkBool32 shaderZeroInitializeWorkgroupMemory;
+ VkBool32 dynamicRendering;
+ VkBool32 shaderIntegerDotProduct;
+ VkBool32 maintenance4;
+} VkPhysicalDeviceVulkan13Features;
+
+typedef struct VkPhysicalDeviceVulkan13Properties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t minSubgroupSize;
+ uint32_t maxSubgroupSize;
+ uint32_t maxComputeWorkgroupSubgroups;
+ VkShaderStageFlags requiredSubgroupSizeStages;
+ uint32_t maxInlineUniformBlockSize;
+ uint32_t maxPerStageDescriptorInlineUniformBlocks;
+ uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks;
+ uint32_t maxDescriptorSetInlineUniformBlocks;
+ uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks;
+ uint32_t maxInlineUniformTotalSize;
+ VkBool32 integerDotProduct8BitUnsignedAccelerated;
+ VkBool32 integerDotProduct8BitSignedAccelerated;
+ VkBool32 integerDotProduct8BitMixedSignednessAccelerated;
+ VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated;
+ VkBool32 integerDotProduct4x8BitPackedSignedAccelerated;
+ VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated;
+ VkBool32 integerDotProduct16BitUnsignedAccelerated;
+ VkBool32 integerDotProduct16BitSignedAccelerated;
+ VkBool32 integerDotProduct16BitMixedSignednessAccelerated;
+ VkBool32 integerDotProduct32BitUnsignedAccelerated;
+ VkBool32 integerDotProduct32BitSignedAccelerated;
+ VkBool32 integerDotProduct32BitMixedSignednessAccelerated;
+ VkBool32 integerDotProduct64BitUnsignedAccelerated;
+ VkBool32 integerDotProduct64BitSignedAccelerated;
+ VkBool32 integerDotProduct64BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated;
+ VkDeviceSize storageTexelBufferOffsetAlignmentBytes;
+ VkBool32 storageTexelBufferOffsetSingleTexelAlignment;
+ VkDeviceSize uniformTexelBufferOffsetAlignmentBytes;
+ VkBool32 uniformTexelBufferOffsetSingleTexelAlignment;
+ VkDeviceSize maxBufferSize;
+} VkPhysicalDeviceVulkan13Properties;
+
+typedef struct VkPipelineCreationFeedback {
+ VkPipelineCreationFeedbackFlags flags;
+ uint64_t duration;
+} VkPipelineCreationFeedback;
+
+typedef struct VkPipelineCreationFeedbackCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreationFeedback* pPipelineCreationFeedback;
+ uint32_t pipelineStageCreationFeedbackCount;
+ VkPipelineCreationFeedback* pPipelineStageCreationFeedbacks;
+} VkPipelineCreationFeedbackCreateInfo;
+
+typedef struct VkPhysicalDeviceShaderTerminateInvocationFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderTerminateInvocation;
+} VkPhysicalDeviceShaderTerminateInvocationFeatures;
+
+typedef struct VkPhysicalDeviceToolProperties {
+ VkStructureType sType;
+ void* pNext;
+ char name[VK_MAX_EXTENSION_NAME_SIZE];
+ char version[VK_MAX_EXTENSION_NAME_SIZE];
+ VkToolPurposeFlags purposes;
+ char description[VK_MAX_DESCRIPTION_SIZE];
+ char layer[VK_MAX_EXTENSION_NAME_SIZE];
+} VkPhysicalDeviceToolProperties;
+
+typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderDemoteToHelperInvocation;
+} VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures;
+
+typedef struct VkPhysicalDevicePrivateDataFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 privateData;
+} VkPhysicalDevicePrivateDataFeatures;
+
+typedef struct VkDevicePrivateDataCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t privateDataSlotRequestCount;
+} VkDevicePrivateDataCreateInfo;
+
+typedef struct VkPrivateDataSlotCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkPrivateDataSlotCreateFlags flags;
+} VkPrivateDataSlotCreateInfo;
+
+typedef struct VkPhysicalDevicePipelineCreationCacheControlFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 pipelineCreationCacheControl;
+} VkPhysicalDevicePipelineCreationCacheControlFeatures;
+
+typedef struct VkMemoryBarrier2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineStageFlags2 srcStageMask;
+ VkAccessFlags2 srcAccessMask;
+ VkPipelineStageFlags2 dstStageMask;
+ VkAccessFlags2 dstAccessMask;
+} VkMemoryBarrier2;
+
+typedef struct VkBufferMemoryBarrier2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineStageFlags2 srcStageMask;
+ VkAccessFlags2 srcAccessMask;
+ VkPipelineStageFlags2 dstStageMask;
+ VkAccessFlags2 dstAccessMask;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkBuffer buffer;
+ VkDeviceSize offset;
+ VkDeviceSize size;
+} VkBufferMemoryBarrier2;
+
+typedef struct VkImageMemoryBarrier2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineStageFlags2 srcStageMask;
+ VkAccessFlags2 srcAccessMask;
+ VkPipelineStageFlags2 dstStageMask;
+ VkAccessFlags2 dstAccessMask;
+ VkImageLayout oldLayout;
+ VkImageLayout newLayout;
+ uint32_t srcQueueFamilyIndex;
+ uint32_t dstQueueFamilyIndex;
+ VkImage image;
+ VkImageSubresourceRange subresourceRange;
+} VkImageMemoryBarrier2;
+
+typedef struct VkDependencyInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkDependencyFlags dependencyFlags;
+ uint32_t memoryBarrierCount;
+ const VkMemoryBarrier2* pMemoryBarriers;
+ uint32_t bufferMemoryBarrierCount;
+ const VkBufferMemoryBarrier2* pBufferMemoryBarriers;
+ uint32_t imageMemoryBarrierCount;
+ const VkImageMemoryBarrier2* pImageMemoryBarriers;
+} VkDependencyInfo;
+
+typedef struct VkSemaphoreSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ uint64_t value;
+ VkPipelineStageFlags2 stageMask;
+ uint32_t deviceIndex;
+} VkSemaphoreSubmitInfo;
+
+typedef struct VkCommandBufferSubmitInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkCommandBuffer commandBuffer;
+ uint32_t deviceMask;
+} VkCommandBufferSubmitInfo;
+
+typedef struct VkSubmitInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkSubmitFlags flags;
+ uint32_t waitSemaphoreInfoCount;
+ const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos;
+ uint32_t commandBufferInfoCount;
+ const VkCommandBufferSubmitInfo* pCommandBufferInfos;
+ uint32_t signalSemaphoreInfoCount;
+ const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos;
+} VkSubmitInfo2;
+
+typedef struct VkPhysicalDeviceSynchronization2Features {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 synchronization2;
+} VkPhysicalDeviceSynchronization2Features;
+
+typedef struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderZeroInitializeWorkgroupMemory;
+} VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures;
+
+typedef struct VkPhysicalDeviceImageRobustnessFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 robustImageAccess;
+} VkPhysicalDeviceImageRobustnessFeatures;
+
+typedef struct VkBufferCopy2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize srcOffset;
+ VkDeviceSize dstOffset;
+ VkDeviceSize size;
+} VkBufferCopy2;
+
+typedef struct VkCopyBufferInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer srcBuffer;
+ VkBuffer dstBuffer;
+ uint32_t regionCount;
+ const VkBufferCopy2* pRegions;
+} VkCopyBufferInfo2;
+
+typedef struct VkImageCopy2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageCopy2;
+
+typedef struct VkCopyImageInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage srcImage;
+ VkImageLayout srcImageLayout;
+ VkImage dstImage;
+ VkImageLayout dstImageLayout;
+ uint32_t regionCount;
+ const VkImageCopy2* pRegions;
+} VkCopyImageInfo2;
+
+typedef struct VkBufferImageCopy2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize bufferOffset;
+ uint32_t bufferRowLength;
+ uint32_t bufferImageHeight;
+ VkImageSubresourceLayers imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy2;
+
+typedef struct VkCopyBufferToImageInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkBuffer srcBuffer;
+ VkImage dstImage;
+ VkImageLayout dstImageLayout;
+ uint32_t regionCount;
+ const VkBufferImageCopy2* pRegions;
+} VkCopyBufferToImageInfo2;
+
+typedef struct VkCopyImageToBufferInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage srcImage;
+ VkImageLayout srcImageLayout;
+ VkBuffer dstBuffer;
+ uint32_t regionCount;
+ const VkBufferImageCopy2* pRegions;
+} VkCopyImageToBufferInfo2;
+
+typedef struct VkImageBlit2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffsets[2];
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffsets[2];
+} VkImageBlit2;
+
+typedef struct VkBlitImageInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage srcImage;
+ VkImageLayout srcImageLayout;
+ VkImage dstImage;
+ VkImageLayout dstImageLayout;
+ uint32_t regionCount;
+ const VkImageBlit2* pRegions;
+ VkFilter filter;
+} VkBlitImageInfo2;
+
+typedef struct VkImageResolve2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageSubresourceLayers srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresourceLayers dstSubresource;
+ VkOffset3D dstOffset;
+ VkExtent3D extent;
+} VkImageResolve2;
+
+typedef struct VkResolveImageInfo2 {
+ VkStructureType sType;
+ const void* pNext;
+ VkImage srcImage;
+ VkImageLayout srcImageLayout;
+ VkImage dstImage;
+ VkImageLayout dstImageLayout;
+ uint32_t regionCount;
+ const VkImageResolve2* pRegions;
+} VkResolveImageInfo2;
+
+typedef struct VkPhysicalDeviceSubgroupSizeControlFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 subgroupSizeControl;
+ VkBool32 computeFullSubgroups;
+} VkPhysicalDeviceSubgroupSizeControlFeatures;
+
+typedef struct VkPhysicalDeviceSubgroupSizeControlProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t minSubgroupSize;
+ uint32_t maxSubgroupSize;
+ uint32_t maxComputeWorkgroupSubgroups;
+ VkShaderStageFlags requiredSubgroupSizeStages;
+} VkPhysicalDeviceSubgroupSizeControlProperties;
+
+typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfo {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t requiredSubgroupSize;
+} VkPipelineShaderStageRequiredSubgroupSizeCreateInfo;
+
+typedef struct VkPhysicalDeviceInlineUniformBlockFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 inlineUniformBlock;
+ VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind;
+} VkPhysicalDeviceInlineUniformBlockFeatures;
+
+typedef struct VkPhysicalDeviceInlineUniformBlockProperties {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxInlineUniformBlockSize;
+ uint32_t maxPerStageDescriptorInlineUniformBlocks;
+ uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks;
+ uint32_t maxDescriptorSetInlineUniformBlocks;
+ uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks;
+} VkPhysicalDeviceInlineUniformBlockProperties;
+
+typedef struct VkWriteDescriptorSetInlineUniformBlock {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t dataSize;
+ const void* pData;
+} VkWriteDescriptorSetInlineUniformBlock;
+
+typedef struct VkDescriptorPoolInlineUniformBlockCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t maxInlineUniformBlockBindings;
+} VkDescriptorPoolInlineUniformBlockCreateInfo;
+
+typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 textureCompressionASTC_HDR;
+} VkPhysicalDeviceTextureCompressionASTCHDRFeatures;
+
+typedef struct VkRenderingAttachmentInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+ VkResolveModeFlagBits resolveMode;
+ VkImageView resolveImageView;
+ VkImageLayout resolveImageLayout;
+ VkAttachmentLoadOp loadOp;
+ VkAttachmentStoreOp storeOp;
+ VkClearValue clearValue;
+} VkRenderingAttachmentInfo;
+
+typedef struct VkRenderingInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderingFlags flags;
+ VkRect2D renderArea;
+ uint32_t layerCount;
+ uint32_t viewMask;
+ uint32_t colorAttachmentCount;
+ const VkRenderingAttachmentInfo* pColorAttachments;
+ const VkRenderingAttachmentInfo* pDepthAttachment;
+ const VkRenderingAttachmentInfo* pStencilAttachment;
+} VkRenderingInfo;
+
+typedef struct VkPipelineRenderingCreateInfo {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t viewMask;
+ uint32_t colorAttachmentCount;
+ const VkFormat* pColorAttachmentFormats;
+ VkFormat depthAttachmentFormat;
+ VkFormat stencilAttachmentFormat;
+} VkPipelineRenderingCreateInfo;
+
+typedef struct VkPhysicalDeviceDynamicRenderingFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 dynamicRendering;
+} VkPhysicalDeviceDynamicRenderingFeatures;
+
+typedef struct VkCommandBufferInheritanceRenderingInfo {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderingFlags flags;
+ uint32_t viewMask;
+ uint32_t colorAttachmentCount;
+ const VkFormat* pColorAttachmentFormats;
+ VkFormat depthAttachmentFormat;
+ VkFormat stencilAttachmentFormat;
+ VkSampleCountFlagBits rasterizationSamples;
+} VkCommandBufferInheritanceRenderingInfo;
+
+typedef struct VkPhysicalDeviceShaderIntegerDotProductFeatures {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderIntegerDotProduct;
+} VkPhysicalDeviceShaderIntegerDotProductFeatures;
+
+typedef struct VkPhysicalDeviceShaderIntegerDotProductProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 integerDotProduct8BitUnsignedAccelerated;
+ VkBool32 integerDotProduct8BitSignedAccelerated;
+ VkBool32 integerDotProduct8BitMixedSignednessAccelerated;
+ VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated;
+ VkBool32 integerDotProduct4x8BitPackedSignedAccelerated;
+ VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated;
+ VkBool32 integerDotProduct16BitUnsignedAccelerated;
+ VkBool32 integerDotProduct16BitSignedAccelerated;
+ VkBool32 integerDotProduct16BitMixedSignednessAccelerated;
+ VkBool32 integerDotProduct32BitUnsignedAccelerated;
+ VkBool32 integerDotProduct32BitSignedAccelerated;
+ VkBool32 integerDotProduct32BitMixedSignednessAccelerated;
+ VkBool32 integerDotProduct64BitUnsignedAccelerated;
+ VkBool32 integerDotProduct64BitSignedAccelerated;
+ VkBool32 integerDotProduct64BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated;
+ VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated;
+} VkPhysicalDeviceShaderIntegerDotProductProperties;
+
+typedef struct VkPhysicalDeviceTexelBufferAlignmentProperties {
+ VkStructureType sType;
+ void* pNext;
+ VkDeviceSize storageTexelBufferOffsetAlignmentBytes;
+ VkBool32 storageTexelBufferOffsetSingleTexelAlignment;
+ VkDeviceSize uniformTexelBufferOffsetAlignmentBytes;
+ VkBool32 uniformTexelBufferOffsetSingleTexelAlignment;
+} VkPhysicalDeviceTexelBufferAlignmentProperties;
+
+typedef struct VkFormatProperties3 {
+ VkStructureType sType;
+ void* pNext;
+ VkFormatFeatureFlags2 linearTilingFeatures;
+ VkFormatFeatureFlags2 optimalTilingFeatures;
+ VkFormatFeatureFlags2 bufferFeatures;
+} VkFormatProperties3;
+
+typedef struct VkPhysicalDeviceMaintenance4Features {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 maintenance4;
+} VkPhysicalDeviceMaintenance4Features;
+
+typedef struct VkPhysicalDeviceMaintenance4Properties {
+ VkStructureType sType;
+ void* pNext;
+ VkDeviceSize maxBufferSize;
+} VkPhysicalDeviceMaintenance4Properties;
+
+typedef struct VkDeviceBufferMemoryRequirements {
+ VkStructureType sType;
+ const void* pNext;
+ const VkBufferCreateInfo* pCreateInfo;
+} VkDeviceBufferMemoryRequirements;
+
+typedef struct VkDeviceImageMemoryRequirements {
+ VkStructureType sType;
+ const void* pNext;
+ const VkImageCreateInfo* pCreateInfo;
+ VkImageAspectFlagBits planeAspect;
+} VkDeviceImageMemoryRequirements;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolProperties)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlot)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot);
+typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlot)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data);
+typedef void (VKAPI_PTR *PFN_vkGetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos);
+typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2)(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdBeginRendering)(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdEndRendering)(VkCommandBuffer commandBuffer);
+typedef void (VKAPI_PTR *PFN_vkCmdSetCullMode)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode);
+typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFace)(VkCommandBuffer commandBuffer, VkFrontFace frontFace);
+typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopology)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology);
+typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCount)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports);
+typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCount)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors);
+typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnable)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOp)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnable)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable);
+typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOp)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp);
+typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnable)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable);
+typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable);
+typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnable)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirements)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolProperties(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pToolCount,
+ VkPhysicalDeviceToolProperties* pToolProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlot(
+ VkDevice device,
+ const VkPrivateDataSlotCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPrivateDataSlot* pPrivateDataSlot);
+
+VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlot(
+ VkDevice device,
+ VkPrivateDataSlot privateDataSlot,
+ const VkAllocationCallbacks* pAllocator);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateData(
+ VkDevice device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ VkPrivateDataSlot privateDataSlot,
+ uint64_t data);
+
+VKAPI_ATTR void VKAPI_CALL vkGetPrivateData(
+ VkDevice device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ VkPrivateDataSlot privateDataSlot,
+ uint64_t* pData);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ const VkDependencyInfo* pDependencyInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags2 stageMask);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ const VkDependencyInfo* pDependencyInfos);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2(
+ VkCommandBuffer commandBuffer,
+ const VkDependencyInfo* pDependencyInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags2 stage,
+ VkQueryPool queryPool,
+ uint32_t query);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2(
+ VkQueue queue,
+ uint32_t submitCount,
+ const VkSubmitInfo2* pSubmits,
+ VkFence fence);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2(
+ VkCommandBuffer commandBuffer,
+ const VkCopyBufferInfo2* pCopyBufferInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2(
+ VkCommandBuffer commandBuffer,
+ const VkCopyImageInfo2* pCopyImageInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2(
+ VkCommandBuffer commandBuffer,
+ const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2(
+ VkCommandBuffer commandBuffer,
+ const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2(
+ VkCommandBuffer commandBuffer,
+ const VkBlitImageInfo2* pBlitImageInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2(
+ VkCommandBuffer commandBuffer,
+ const VkResolveImageInfo2* pResolveImageInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRendering(
+ VkCommandBuffer commandBuffer,
+ const VkRenderingInfo* pRenderingInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndRendering(
+ VkCommandBuffer commandBuffer);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetCullMode(
+ VkCommandBuffer commandBuffer,
+ VkCullModeFlags cullMode);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFace(
+ VkCommandBuffer commandBuffer,
+ VkFrontFace frontFace);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopology(
+ VkCommandBuffer commandBuffer,
+ VkPrimitiveTopology primitiveTopology);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCount(
+ VkCommandBuffer commandBuffer,
+ uint32_t viewportCount,
+ const VkViewport* pViewports);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCount(
+ VkCommandBuffer commandBuffer,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes,
+ const VkDeviceSize* pStrides);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthTestEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthWriteEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOp(
+ VkCommandBuffer commandBuffer,
+ VkCompareOp depthCompareOp);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthBoundsTestEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 stencilTestEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOp(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ VkStencilOp failOp,
+ VkStencilOp passOp,
+ VkStencilOp depthFailOp,
+ VkCompareOp compareOp);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 rasterizerDiscardEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 depthBiasEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnable(
+ VkCommandBuffer commandBuffer,
+ VkBool32 primitiveRestartEnable);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirements(
+ VkDevice device,
+ const VkDeviceBufferMemoryRequirements* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirements(
+ VkDevice device,
+ const VkDeviceImageMemoryRequirements* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirements(
+ VkDevice device,
+ const VkDeviceImageMemoryRequirements* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+#endif
+
+
#define VK_KHR_surface 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR)
#define VK_KHR_SURFACE_SPEC_VERSION 25
@@ -6177,7 +7589,7 @@ typedef struct VkAcquireNextImageInfoKHR {
typedef struct VkDeviceGroupPresentCapabilitiesKHR {
VkStructureType sType;
- const void* pNext;
+ void* pNext;
uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE];
VkDeviceGroupPresentModeFlagsKHR modes;
} VkDeviceGroupPresentCapabilitiesKHR;
@@ -6411,6 +7823,68 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(
#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge"
+#define VK_KHR_dynamic_rendering 1
+#define VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION 1
+#define VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME "VK_KHR_dynamic_rendering"
+typedef VkRenderingFlags VkRenderingFlagsKHR;
+
+typedef VkRenderingFlagBits VkRenderingFlagBitsKHR;
+
+typedef VkRenderingInfo VkRenderingInfoKHR;
+
+typedef VkRenderingAttachmentInfo VkRenderingAttachmentInfoKHR;
+
+typedef VkPipelineRenderingCreateInfo VkPipelineRenderingCreateInfoKHR;
+
+typedef VkPhysicalDeviceDynamicRenderingFeatures VkPhysicalDeviceDynamicRenderingFeaturesKHR;
+
+typedef VkCommandBufferInheritanceRenderingInfo VkCommandBufferInheritanceRenderingInfoKHR;
+
+typedef struct VkRenderingFragmentShadingRateAttachmentInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+ VkExtent2D shadingRateAttachmentTexelSize;
+} VkRenderingFragmentShadingRateAttachmentInfoKHR;
+
+typedef struct VkRenderingFragmentDensityMapAttachmentInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageView imageView;
+ VkImageLayout imageLayout;
+} VkRenderingFragmentDensityMapAttachmentInfoEXT;
+
+typedef struct VkAttachmentSampleCountInfoAMD {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t colorAttachmentCount;
+ const VkSampleCountFlagBits* pColorAttachmentSamples;
+ VkSampleCountFlagBits depthStencilAttachmentSamples;
+} VkAttachmentSampleCountInfoAMD;
+
+typedef VkAttachmentSampleCountInfoAMD VkAttachmentSampleCountInfoNV;
+
+typedef struct VkMultiviewPerViewAttributesInfoNVX {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 perViewAttributes;
+ VkBool32 perViewAttributesPositionXOnly;
+} VkMultiviewPerViewAttributesInfoNVX;
+
+typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderingKHR)(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdEndRenderingKHR)(VkCommandBuffer commandBuffer);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderingKHR(
+ VkCommandBuffer commandBuffer,
+ const VkRenderingInfo* pRenderingInfo);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderingKHR(
+ VkCommandBuffer commandBuffer);
+#endif
+
+
#define VK_KHR_multiview 1
#define VK_KHR_MULTIVIEW_SPEC_VERSION 1
#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview"
@@ -6545,8 +8019,10 @@ VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR(
#define VK_KHR_maintenance1 1
-#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2
-#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1"
+#define VK_KHR_MAINTENANCE_1_SPEC_VERSION 2
+#define VK_KHR_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_maintenance1"
+#define VK_KHR_MAINTENANCE1_SPEC_VERSION VK_KHR_MAINTENANCE_1_SPEC_VERSION
+#define VK_KHR_MAINTENANCE1_EXTENSION_NAME VK_KHR_MAINTENANCE_1_EXTENSION_NAME
typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR;
typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);
@@ -7052,7 +8528,7 @@ typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR {
typedef struct VkPerformanceCounterKHR {
VkStructureType sType;
- const void* pNext;
+ void* pNext;
VkPerformanceCounterUnitKHR unit;
VkPerformanceCounterScopeKHR scope;
VkPerformanceCounterStorageKHR storage;
@@ -7061,7 +8537,7 @@ typedef struct VkPerformanceCounterKHR {
typedef struct VkPerformanceCounterDescriptionKHR {
VkStructureType sType;
- const void* pNext;
+ void* pNext;
VkPerformanceCounterDescriptionFlagsKHR flags;
char name[VK_MAX_DESCRIPTION_SIZE];
char category[VK_MAX_DESCRIPTION_SIZE];
@@ -7126,8 +8602,10 @@ VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR(
#define VK_KHR_maintenance2 1
-#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1
-#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2"
+#define VK_KHR_MAINTENANCE_2_SPEC_VERSION 1
+#define VK_KHR_MAINTENANCE_2_EXTENSION_NAME "VK_KHR_maintenance2"
+#define VK_KHR_MAINTENANCE2_SPEC_VERSION VK_KHR_MAINTENANCE_2_SPEC_VERSION
+#define VK_KHR_MAINTENANCE2_EXTENSION_NAME VK_KHR_MAINTENANCE_2_EXTENSION_NAME
typedef VkPointClippingBehavior VkPointClippingBehaviorKHR;
typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR;
@@ -7380,8 +8858,10 @@ VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR(
#define VK_KHR_maintenance3 1
-#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1
-#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3"
+#define VK_KHR_MAINTENANCE_3_SPEC_VERSION 1
+#define VK_KHR_MAINTENANCE_3_EXTENSION_NAME "VK_KHR_maintenance3"
+#define VK_KHR_MAINTENANCE3_SPEC_VERSION VK_KHR_MAINTENANCE_3_SPEC_VERSION
+#define VK_KHR_MAINTENANCE3_EXTENSION_NAME VK_KHR_MAINTENANCE_3_EXTENSION_NAME
typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR;
typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR;
@@ -7456,6 +8936,43 @@ typedef struct VkPhysicalDeviceShaderClockFeaturesKHR {
+#define VK_KHR_global_priority 1
+#define VK_MAX_GLOBAL_PRIORITY_SIZE_KHR 16U
+#define VK_KHR_GLOBAL_PRIORITY_SPEC_VERSION 1
+#define VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME "VK_KHR_global_priority"
+
+typedef enum VkQueueGlobalPriorityKHR {
+ VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR = 128,
+ VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR = 256,
+ VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR = 512,
+ VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR = 1024,
+ VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR,
+ VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR,
+ VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR,
+ VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR,
+ VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkQueueGlobalPriorityKHR;
+typedef struct VkDeviceQueueGlobalPriorityCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkQueueGlobalPriorityKHR globalPriority;
+} VkDeviceQueueGlobalPriorityCreateInfoKHR;
+
+typedef struct VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 globalPriorityQuery;
+} VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR;
+
+typedef struct VkQueueFamilyGlobalPriorityPropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t priorityCount;
+ VkQueueGlobalPriorityKHR priorities[VK_MAX_GLOBAL_PRIORITY_SIZE_KHR];
+} VkQueueFamilyGlobalPriorityPropertiesKHR;
+
+
+
#define VK_KHR_driver_properties 1
#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1
#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties"
@@ -7548,16 +9065,12 @@ typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryMo
#define VK_KHR_shader_terminate_invocation 1
#define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1
#define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME "VK_KHR_shader_terminate_invocation"
-typedef struct VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR {
- VkStructureType sType;
- void* pNext;
- VkBool32 shaderTerminateInvocation;
-} VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR;
+typedef VkPhysicalDeviceShaderTerminateInvocationFeatures VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR;
#define VK_KHR_fragment_shading_rate 1
-#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 1
+#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 2
#define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME "VK_KHR_fragment_shading_rate"
typedef enum VkFragmentShadingRateCombinerOpKHR {
@@ -7662,6 +9175,26 @@ typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayou
+#define VK_KHR_present_wait 1
+#define VK_KHR_PRESENT_WAIT_SPEC_VERSION 1
+#define VK_KHR_PRESENT_WAIT_EXTENSION_NAME "VK_KHR_present_wait"
+typedef struct VkPhysicalDevicePresentWaitFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 presentWait;
+} VkPhysicalDevicePresentWaitFeaturesKHR;
+
+typedef VkResult (VKAPI_PTR *PFN_vkWaitForPresentKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitForPresentKHR(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint64_t presentId,
+ uint64_t timeout);
+#endif
+
+
#define VK_KHR_uniform_buffer_standard_layout 1
#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1
#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout"
@@ -7826,6 +9359,15 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR
#endif
+#define VK_KHR_shader_integer_dot_product 1
+#define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION 1
+#define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_EXTENSION_NAME "VK_KHR_shader_integer_dot_product"
+typedef VkPhysicalDeviceShaderIntegerDotProductFeatures VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR;
+
+typedef VkPhysicalDeviceShaderIntegerDotProductProperties VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR;
+
+
+
#define VK_KHR_pipeline_library 1
#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1
#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library"
@@ -7843,260 +9385,113 @@ typedef struct VkPipelineLibraryCreateInfoKHR {
#define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info"
+#define VK_KHR_present_id 1
+#define VK_KHR_PRESENT_ID_SPEC_VERSION 1
+#define VK_KHR_PRESENT_ID_EXTENSION_NAME "VK_KHR_present_id"
+typedef struct VkPresentIdKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const uint64_t* pPresentIds;
+} VkPresentIdKHR;
+
+typedef struct VkPhysicalDevicePresentIdFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 presentId;
+} VkPhysicalDevicePresentIdFeaturesKHR;
+
+
+
#define VK_KHR_synchronization2 1
-typedef uint64_t VkFlags64;
#define VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION 1
#define VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME "VK_KHR_synchronization2"
-typedef VkFlags64 VkPipelineStageFlags2KHR;
-
-// Flag bits for VkPipelineStageFlagBits2KHR
-typedef VkFlags64 VkPipelineStageFlagBits2KHR;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL;
-#ifdef VK_ENABLE_BETA_EXTENSIONS
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL;
-#endif
-#ifdef VK_ENABLE_BETA_EXTENSIONS
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL;
-#endif
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV = 0x00400000;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR = 0x00200000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV = 0x00200000;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV = 0x00080000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0x00100000ULL;
-static const VkPipelineStageFlagBits2KHR VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI = 0x8000000000ULL;
-
-typedef VkFlags64 VkAccessFlags2KHR;
-
-// Flag bits for VkAccessFlagBits2KHR
-typedef VkFlags64 VkAccessFlagBits2KHR;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_NONE_KHR = 0ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL;
-#ifdef VK_ENABLE_BETA_EXTENSIONS
-static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL;
-#endif
-#ifdef VK_ENABLE_BETA_EXTENSIONS
-static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL;
-#endif
-#ifdef VK_ENABLE_BETA_EXTENSIONS
-static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL;
-#endif
-#ifdef VK_ENABLE_BETA_EXTENSIONS
-static const VkAccessFlagBits2KHR VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL;
-#endif
-static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000ULL;
-static const VkAccessFlagBits2KHR VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000ULL;
-
-
-typedef enum VkSubmitFlagBitsKHR {
- VK_SUBMIT_PROTECTED_BIT_KHR = 0x00000001,
- VK_SUBMIT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
-} VkSubmitFlagBitsKHR;
-typedef VkFlags VkSubmitFlagsKHR;
-typedef struct VkMemoryBarrier2KHR {
- VkStructureType sType;
- const void* pNext;
- VkPipelineStageFlags2KHR srcStageMask;
- VkAccessFlags2KHR srcAccessMask;
- VkPipelineStageFlags2KHR dstStageMask;
- VkAccessFlags2KHR dstAccessMask;
-} VkMemoryBarrier2KHR;
+typedef VkPipelineStageFlags2 VkPipelineStageFlags2KHR;
-typedef struct VkBufferMemoryBarrier2KHR {
- VkStructureType sType;
- const void* pNext;
- VkPipelineStageFlags2KHR srcStageMask;
- VkAccessFlags2KHR srcAccessMask;
- VkPipelineStageFlags2KHR dstStageMask;
- VkAccessFlags2KHR dstAccessMask;
- uint32_t srcQueueFamilyIndex;
- uint32_t dstQueueFamilyIndex;
- VkBuffer buffer;
- VkDeviceSize offset;
- VkDeviceSize size;
-} VkBufferMemoryBarrier2KHR;
-
-typedef struct VkImageMemoryBarrier2KHR {
- VkStructureType sType;
- const void* pNext;
- VkPipelineStageFlags2KHR srcStageMask;
- VkAccessFlags2KHR srcAccessMask;
- VkPipelineStageFlags2KHR dstStageMask;
- VkAccessFlags2KHR dstAccessMask;
- VkImageLayout oldLayout;
- VkImageLayout newLayout;
- uint32_t srcQueueFamilyIndex;
- uint32_t dstQueueFamilyIndex;
- VkImage image;
- VkImageSubresourceRange subresourceRange;
-} VkImageMemoryBarrier2KHR;
-
-typedef struct VkDependencyInfoKHR {
- VkStructureType sType;
- const void* pNext;
- VkDependencyFlags dependencyFlags;
- uint32_t memoryBarrierCount;
- const VkMemoryBarrier2KHR* pMemoryBarriers;
- uint32_t bufferMemoryBarrierCount;
- const VkBufferMemoryBarrier2KHR* pBufferMemoryBarriers;
- uint32_t imageMemoryBarrierCount;
- const VkImageMemoryBarrier2KHR* pImageMemoryBarriers;
-} VkDependencyInfoKHR;
-
-typedef struct VkSemaphoreSubmitInfoKHR {
- VkStructureType sType;
- const void* pNext;
- VkSemaphore semaphore;
- uint64_t value;
- VkPipelineStageFlags2KHR stageMask;
- uint32_t deviceIndex;
-} VkSemaphoreSubmitInfoKHR;
+typedef VkPipelineStageFlagBits2 VkPipelineStageFlagBits2KHR;
-typedef struct VkCommandBufferSubmitInfoKHR {
- VkStructureType sType;
- const void* pNext;
- VkCommandBuffer commandBuffer;
- uint32_t deviceMask;
-} VkCommandBufferSubmitInfoKHR;
+typedef VkAccessFlags2 VkAccessFlags2KHR;
-typedef struct VkSubmitInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkSubmitFlagsKHR flags;
- uint32_t waitSemaphoreInfoCount;
- const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos;
- uint32_t commandBufferInfoCount;
- const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos;
- uint32_t signalSemaphoreInfoCount;
- const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos;
-} VkSubmitInfo2KHR;
+typedef VkAccessFlagBits2 VkAccessFlagBits2KHR;
-typedef struct VkPhysicalDeviceSynchronization2FeaturesKHR {
- VkStructureType sType;
- void* pNext;
- VkBool32 synchronization2;
-} VkPhysicalDeviceSynchronization2FeaturesKHR;
+typedef VkSubmitFlagBits VkSubmitFlagBitsKHR;
+
+typedef VkSubmitFlags VkSubmitFlagsKHR;
+
+typedef VkMemoryBarrier2 VkMemoryBarrier2KHR;
+
+typedef VkBufferMemoryBarrier2 VkBufferMemoryBarrier2KHR;
+
+typedef VkImageMemoryBarrier2 VkImageMemoryBarrier2KHR;
+
+typedef VkDependencyInfo VkDependencyInfoKHR;
+
+typedef VkSubmitInfo2 VkSubmitInfo2KHR;
+
+typedef VkSemaphoreSubmitInfo VkSemaphoreSubmitInfoKHR;
+
+typedef VkCommandBufferSubmitInfo VkCommandBufferSubmitInfoKHR;
+
+typedef VkPhysicalDeviceSynchronization2Features VkPhysicalDeviceSynchronization2FeaturesKHR;
typedef struct VkQueueFamilyCheckpointProperties2NV {
- VkStructureType sType;
- void* pNext;
- VkPipelineStageFlags2KHR checkpointExecutionStageMask;
+ VkStructureType sType;
+ void* pNext;
+ VkPipelineStageFlags2 checkpointExecutionStageMask;
} VkQueueFamilyCheckpointProperties2NV;
typedef struct VkCheckpointData2NV {
- VkStructureType sType;
- void* pNext;
- VkPipelineStageFlags2KHR stage;
- void* pCheckpointMarker;
+ VkStructureType sType;
+ void* pNext;
+ VkPipelineStageFlags2 stage;
+ void* pCheckpointMarker;
} VkCheckpointData2NV;
-typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR* pDependencyInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR stageMask);
-typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2KHR)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfoKHR* pDependencyInfos);
-typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2KHR)(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR* pDependencyInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2KHR)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkQueryPool queryPool, uint32_t query);
-typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2KHR)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR* pSubmits, VkFence fence);
-typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarker2AMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker);
+typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask);
+typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2KHR)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos);
+typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2KHR)(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2KHR)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query);
+typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2KHR)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarker2AMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker);
typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointData2NV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2KHR(
VkCommandBuffer commandBuffer,
VkEvent event,
- const VkDependencyInfoKHR* pDependencyInfo);
+ const VkDependencyInfo* pDependencyInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2KHR(
VkCommandBuffer commandBuffer,
VkEvent event,
- VkPipelineStageFlags2KHR stageMask);
+ VkPipelineStageFlags2 stageMask);
VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2KHR(
VkCommandBuffer commandBuffer,
uint32_t eventCount,
const VkEvent* pEvents,
- const VkDependencyInfoKHR* pDependencyInfos);
+ const VkDependencyInfo* pDependencyInfos);
VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2KHR(
VkCommandBuffer commandBuffer,
- const VkDependencyInfoKHR* pDependencyInfo);
+ const VkDependencyInfo* pDependencyInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2KHR(
VkCommandBuffer commandBuffer,
- VkPipelineStageFlags2KHR stage,
+ VkPipelineStageFlags2 stage,
VkQueryPool queryPool,
uint32_t query);
VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2KHR(
VkQueue queue,
uint32_t submitCount,
- const VkSubmitInfo2KHR* pSubmits,
+ const VkSubmitInfo2* pSubmits,
VkFence fence);
VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarker2AMD(
VkCommandBuffer commandBuffer,
- VkPipelineStageFlags2KHR stage,
+ VkPipelineStageFlags2 stage,
VkBuffer dstBuffer,
VkDeviceSize dstOffset,
uint32_t marker);
@@ -8108,6 +9503,23 @@ VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointData2NV(
#endif
+#define VK_KHR_fragment_shader_barycentric 1
+#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1
+#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_KHR_fragment_shader_barycentric"
+typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 fragmentShaderBarycentric;
+} VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR;
+
+typedef struct VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 triStripVertexOrderIndependentOfProvokingVertex;
+} VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR;
+
+
+
#define VK_KHR_shader_subgroup_uniform_control_flow 1
#define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_SPEC_VERSION 1
#define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_EXTENSION_NAME "VK_KHR_shader_subgroup_uniform_control_flow"
@@ -8122,11 +9534,7 @@ typedef struct VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR {
#define VK_KHR_zero_initialize_workgroup_memory 1
#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION 1
#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME "VK_KHR_zero_initialize_workgroup_memory"
-typedef struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR {
- VkStructureType sType;
- void* pNext;
- VkBool32 shaderZeroInitializeWorkgroupMemory;
-} VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR;
+typedef VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR;
@@ -8147,148 +9555,145 @@ typedef struct VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR {
#define VK_KHR_copy_commands2 1
#define VK_KHR_COPY_COMMANDS_2_SPEC_VERSION 1
#define VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME "VK_KHR_copy_commands2"
-typedef struct VkBufferCopy2KHR {
- VkStructureType sType;
- const void* pNext;
- VkDeviceSize srcOffset;
- VkDeviceSize dstOffset;
- VkDeviceSize size;
-} VkBufferCopy2KHR;
+typedef VkCopyBufferInfo2 VkCopyBufferInfo2KHR;
-typedef struct VkCopyBufferInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkBuffer srcBuffer;
- VkBuffer dstBuffer;
- uint32_t regionCount;
- const VkBufferCopy2KHR* pRegions;
-} VkCopyBufferInfo2KHR;
+typedef VkCopyImageInfo2 VkCopyImageInfo2KHR;
-typedef struct VkImageCopy2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImageSubresourceLayers srcSubresource;
- VkOffset3D srcOffset;
- VkImageSubresourceLayers dstSubresource;
- VkOffset3D dstOffset;
- VkExtent3D extent;
-} VkImageCopy2KHR;
+typedef VkCopyBufferToImageInfo2 VkCopyBufferToImageInfo2KHR;
-typedef struct VkCopyImageInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImage srcImage;
- VkImageLayout srcImageLayout;
- VkImage dstImage;
- VkImageLayout dstImageLayout;
- uint32_t regionCount;
- const VkImageCopy2KHR* pRegions;
-} VkCopyImageInfo2KHR;
+typedef VkCopyImageToBufferInfo2 VkCopyImageToBufferInfo2KHR;
-typedef struct VkBufferImageCopy2KHR {
- VkStructureType sType;
- const void* pNext;
- VkDeviceSize bufferOffset;
- uint32_t bufferRowLength;
- uint32_t bufferImageHeight;
- VkImageSubresourceLayers imageSubresource;
- VkOffset3D imageOffset;
- VkExtent3D imageExtent;
-} VkBufferImageCopy2KHR;
+typedef VkBlitImageInfo2 VkBlitImageInfo2KHR;
-typedef struct VkCopyBufferToImageInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkBuffer srcBuffer;
- VkImage dstImage;
- VkImageLayout dstImageLayout;
- uint32_t regionCount;
- const VkBufferImageCopy2KHR* pRegions;
-} VkCopyBufferToImageInfo2KHR;
-
-typedef struct VkCopyImageToBufferInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImage srcImage;
- VkImageLayout srcImageLayout;
- VkBuffer dstBuffer;
- uint32_t regionCount;
- const VkBufferImageCopy2KHR* pRegions;
-} VkCopyImageToBufferInfo2KHR;
-
-typedef struct VkImageBlit2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImageSubresourceLayers srcSubresource;
- VkOffset3D srcOffsets[2];
- VkImageSubresourceLayers dstSubresource;
- VkOffset3D dstOffsets[2];
-} VkImageBlit2KHR;
+typedef VkResolveImageInfo2 VkResolveImageInfo2KHR;
-typedef struct VkBlitImageInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImage srcImage;
- VkImageLayout srcImageLayout;
- VkImage dstImage;
- VkImageLayout dstImageLayout;
- uint32_t regionCount;
- const VkImageBlit2KHR* pRegions;
- VkFilter filter;
-} VkBlitImageInfo2KHR;
+typedef VkBufferCopy2 VkBufferCopy2KHR;
-typedef struct VkImageResolve2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImageSubresourceLayers srcSubresource;
- VkOffset3D srcOffset;
- VkImageSubresourceLayers dstSubresource;
- VkOffset3D dstOffset;
- VkExtent3D extent;
-} VkImageResolve2KHR;
+typedef VkImageCopy2 VkImageCopy2KHR;
-typedef struct VkResolveImageInfo2KHR {
- VkStructureType sType;
- const void* pNext;
- VkImage srcImage;
- VkImageLayout srcImageLayout;
- VkImage dstImage;
- VkImageLayout dstImageLayout;
- uint32_t regionCount;
- const VkImageResolve2KHR* pRegions;
-} VkResolveImageInfo2KHR;
+typedef VkImageBlit2 VkImageBlit2KHR;
+
+typedef VkBufferImageCopy2 VkBufferImageCopy2KHR;
+
+typedef VkImageResolve2 VkImageResolve2KHR;
-typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR* pCopyBufferInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR* pCopyImageInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR* pBlitImageInfo);
-typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo);
+typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2KHR(
VkCommandBuffer commandBuffer,
- const VkCopyBufferInfo2KHR* pCopyBufferInfo);
+ const VkCopyBufferInfo2* pCopyBufferInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2KHR(
VkCommandBuffer commandBuffer,
- const VkCopyImageInfo2KHR* pCopyImageInfo);
+ const VkCopyImageInfo2* pCopyImageInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2KHR(
VkCommandBuffer commandBuffer,
- const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo);
+ const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2KHR(
VkCommandBuffer commandBuffer,
- const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo);
+ const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2KHR(
VkCommandBuffer commandBuffer,
- const VkBlitImageInfo2KHR* pBlitImageInfo);
+ const VkBlitImageInfo2* pBlitImageInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2KHR(
VkCommandBuffer commandBuffer,
- const VkResolveImageInfo2KHR* pResolveImageInfo);
+ const VkResolveImageInfo2* pResolveImageInfo);
+#endif
+
+
+#define VK_KHR_format_feature_flags2 1
+#define VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION 2
+#define VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME "VK_KHR_format_feature_flags2"
+typedef VkFormatFeatureFlags2 VkFormatFeatureFlags2KHR;
+
+typedef VkFormatFeatureFlagBits2 VkFormatFeatureFlagBits2KHR;
+
+typedef VkFormatProperties3 VkFormatProperties3KHR;
+
+
+
+#define VK_KHR_ray_tracing_maintenance1 1
+#define VK_KHR_RAY_TRACING_MAINTENANCE_1_SPEC_VERSION 1
+#define VK_KHR_RAY_TRACING_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_ray_tracing_maintenance1"
+typedef struct VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 rayTracingMaintenance1;
+ VkBool32 rayTracingPipelineTraceRaysIndirect2;
+} VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR;
+
+typedef struct VkTraceRaysIndirectCommand2KHR {
+ VkDeviceAddress raygenShaderRecordAddress;
+ VkDeviceSize raygenShaderRecordSize;
+ VkDeviceAddress missShaderBindingTableAddress;
+ VkDeviceSize missShaderBindingTableSize;
+ VkDeviceSize missShaderBindingTableStride;
+ VkDeviceAddress hitShaderBindingTableAddress;
+ VkDeviceSize hitShaderBindingTableSize;
+ VkDeviceSize hitShaderBindingTableStride;
+ VkDeviceAddress callableShaderBindingTableAddress;
+ VkDeviceSize callableShaderBindingTableSize;
+ VkDeviceSize callableShaderBindingTableStride;
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+} VkTraceRaysIndirectCommand2KHR;
+
+typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirect2KHR)(VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirect2KHR(
+ VkCommandBuffer commandBuffer,
+ VkDeviceAddress indirectDeviceAddress);
+#endif
+
+
+#define VK_KHR_portability_enumeration 1
+#define VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION 1
+#define VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME "VK_KHR_portability_enumeration"
+
+
+#define VK_KHR_maintenance4 1
+#define VK_KHR_MAINTENANCE_4_SPEC_VERSION 2
+#define VK_KHR_MAINTENANCE_4_EXTENSION_NAME "VK_KHR_maintenance4"
+typedef VkPhysicalDeviceMaintenance4Features VkPhysicalDeviceMaintenance4FeaturesKHR;
+
+typedef VkPhysicalDeviceMaintenance4Properties VkPhysicalDeviceMaintenance4PropertiesKHR;
+
+typedef VkDeviceBufferMemoryRequirements VkDeviceBufferMemoryRequirementsKHR;
+
+typedef VkDeviceImageMemoryRequirements VkDeviceImageMemoryRequirementsKHR;
+
+typedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirementsKHR)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements);
+typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirementsKHR(
+ VkDevice device,
+ const VkDeviceBufferMemoryRequirements* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirementsKHR(
+ VkDevice device,
+ const VkDeviceImageMemoryRequirements* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirementsKHR(
+ VkDevice device,
+ const VkDeviceImageMemoryRequirements* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
#endif
@@ -8336,6 +9741,7 @@ typedef enum VkDebugReportObjectTypeEXT {
VK_DEBUG_REPORT_OBJECT_TYPE_CU_FUNCTION_NVX_EXT = 1000029001,
VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000,
VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA_EXT = 1000366000,
VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT,
@@ -8921,11 +10327,7 @@ typedef struct VkValidationFlagsEXT {
#define VK_EXT_texture_compression_astc_hdr 1
#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1
#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr"
-typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT {
- VkStructureType sType;
- void* pNext;
- VkBool32 textureCompressionASTC_HDR;
-} VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT;
+typedef VkPhysicalDeviceTextureCompressionASTCHDRFeatures VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT;
@@ -8946,6 +10348,51 @@ typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT {
+#define VK_EXT_pipeline_robustness 1
+#define VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION 1
+#define VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_pipeline_robustness"
+
+typedef enum VkPipelineRobustnessBufferBehaviorEXT {
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT = 0,
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT = 1,
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT = 2,
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT = 3,
+ VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkPipelineRobustnessBufferBehaviorEXT;
+
+typedef enum VkPipelineRobustnessImageBehaviorEXT {
+ VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT = 0,
+ VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT = 1,
+ VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT = 2,
+ VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT = 3,
+ VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkPipelineRobustnessImageBehaviorEXT;
+typedef struct VkPhysicalDevicePipelineRobustnessFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 pipelineRobustness;
+} VkPhysicalDevicePipelineRobustnessFeaturesEXT;
+
+typedef struct VkPhysicalDevicePipelineRobustnessPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessStorageBuffers;
+ VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessUniformBuffers;
+ VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessVertexInputs;
+ VkPipelineRobustnessImageBehaviorEXT defaultRobustnessImages;
+} VkPhysicalDevicePipelineRobustnessPropertiesEXT;
+
+typedef struct VkPipelineRobustnessCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineRobustnessBufferBehaviorEXT storageBuffers;
+ VkPipelineRobustnessBufferBehaviorEXT uniformBuffers;
+ VkPipelineRobustnessBufferBehaviorEXT vertexInputs;
+ VkPipelineRobustnessImageBehaviorEXT images;
+} VkPipelineRobustnessCreateInfoEXT;
+
+
+
#define VK_EXT_conditional_rendering 1
#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2
#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering"
@@ -9195,8 +10642,10 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE(
#define VK_NV_viewport_array2 1
-#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1
-#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2"
+#define VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION 1
+#define VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME "VK_NV_viewport_array2"
+#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION
+#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME
#define VK_NVX_multiview_per_view_attributes 1
@@ -9551,35 +11000,13 @@ typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFil
#define VK_EXT_inline_uniform_block 1
#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1
#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block"
-typedef struct VkPhysicalDeviceInlineUniformBlockFeaturesEXT {
- VkStructureType sType;
- void* pNext;
- VkBool32 inlineUniformBlock;
- VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind;
-} VkPhysicalDeviceInlineUniformBlockFeaturesEXT;
+typedef VkPhysicalDeviceInlineUniformBlockFeatures VkPhysicalDeviceInlineUniformBlockFeaturesEXT;
-typedef struct VkPhysicalDeviceInlineUniformBlockPropertiesEXT {
- VkStructureType sType;
- void* pNext;
- uint32_t maxInlineUniformBlockSize;
- uint32_t maxPerStageDescriptorInlineUniformBlocks;
- uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks;
- uint32_t maxDescriptorSetInlineUniformBlocks;
- uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks;
-} VkPhysicalDeviceInlineUniformBlockPropertiesEXT;
+typedef VkPhysicalDeviceInlineUniformBlockProperties VkPhysicalDeviceInlineUniformBlockPropertiesEXT;
-typedef struct VkWriteDescriptorSetInlineUniformBlockEXT {
- VkStructureType sType;
- const void* pNext;
- uint32_t dataSize;
- const void* pData;
-} VkWriteDescriptorSetInlineUniformBlockEXT;
+typedef VkWriteDescriptorSetInlineUniformBlock VkWriteDescriptorSetInlineUniformBlockEXT;
-typedef struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT {
- VkStructureType sType;
- const void* pNext;
- uint32_t maxInlineUniformBlockBindings;
-} VkDescriptorPoolInlineUniformBlockCreateInfoEXT;
+typedef VkDescriptorPoolInlineUniformBlockCreateInfo VkDescriptorPoolInlineUniformBlockCreateInfoEXT;
@@ -9766,7 +11193,7 @@ typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV {
#define VK_EXT_image_drm_format_modifier 1
-#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 1
+#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 2
#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier"
typedef struct VkDrmFormatModifierPropertiesEXT {
uint64_t drmFormatModifier;
@@ -9811,6 +11238,19 @@ typedef struct VkImageDrmFormatModifierPropertiesEXT {
uint64_t drmFormatModifier;
} VkImageDrmFormatModifierPropertiesEXT;
+typedef struct VkDrmFormatModifierProperties2EXT {
+ uint64_t drmFormatModifier;
+ uint32_t drmFormatModifierPlaneCount;
+ VkFormatFeatureFlags2 drmFormatModifierTilingFeatures;
+} VkDrmFormatModifierProperties2EXT;
+
+typedef struct VkDrmFormatModifierPropertiesList2EXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t drmFormatModifierCount;
+ VkDrmFormatModifierProperties2EXT* pDrmFormatModifierProperties;
+} VkDrmFormatModifierPropertiesList2EXT;
+
typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties);
#ifndef VK_NO_PROTOTYPES
@@ -10075,9 +11515,10 @@ typedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV;
typedef enum VkGeometryInstanceFlagBitsKHR {
VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001,
- VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = 0x00000002,
+ VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR = 0x00000002,
VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004,
VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008,
+ VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR,
VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR,
VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR,
VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR,
@@ -10411,19 +11852,9 @@ typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT {
#define VK_EXT_global_priority 1
#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2
#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority"
+typedef VkQueueGlobalPriorityKHR VkQueueGlobalPriorityEXT;
-typedef enum VkQueueGlobalPriorityEXT {
- VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128,
- VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256,
- VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512,
- VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024,
- VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF
-} VkQueueGlobalPriorityEXT;
-typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT {
- VkStructureType sType;
- const void* pNext;
- VkQueueGlobalPriorityEXT globalPriority;
-} VkDeviceQueueGlobalPriorityCreateInfoEXT;
+typedef VkDeviceQueueGlobalPriorityCreateInfoKHR VkDeviceQueueGlobalPriorityCreateInfoEXT;
@@ -10601,26 +12032,13 @@ typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT {
#define VK_EXT_pipeline_creation_feedback 1
#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1
#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback"
+typedef VkPipelineCreationFeedbackFlagBits VkPipelineCreationFeedbackFlagBitsEXT;
-typedef enum VkPipelineCreationFeedbackFlagBitsEXT {
- VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = 0x00000001,
- VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = 0x00000002,
- VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = 0x00000004,
- VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
-} VkPipelineCreationFeedbackFlagBitsEXT;
-typedef VkFlags VkPipelineCreationFeedbackFlagsEXT;
-typedef struct VkPipelineCreationFeedbackEXT {
- VkPipelineCreationFeedbackFlagsEXT flags;
- uint64_t duration;
-} VkPipelineCreationFeedbackEXT;
-
-typedef struct VkPipelineCreationFeedbackCreateInfoEXT {
- VkStructureType sType;
- const void* pNext;
- VkPipelineCreationFeedbackEXT* pPipelineCreationFeedback;
- uint32_t pipelineStageCreationFeedbackCount;
- VkPipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks;
-} VkPipelineCreationFeedbackCreateInfoEXT;
+typedef VkPipelineCreationFeedbackFlags VkPipelineCreationFeedbackFlagsEXT;
+
+typedef VkPipelineCreationFeedbackCreateInfo VkPipelineCreationFeedbackCreateInfoEXT;
+
+typedef VkPipelineCreationFeedback VkPipelineCreationFeedbackEXT;
@@ -10705,11 +12123,7 @@ VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV(
#define VK_NV_fragment_shader_barycentric 1
#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1
#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric"
-typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV {
- VkStructureType sType;
- void* pNext;
- VkBool32 fragmentShaderBarycentric;
-} VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV;
+typedef VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV;
@@ -10971,7 +12385,7 @@ VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD(
#define VK_EXT_fragment_density_map 1
-#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 1
+#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 2
#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map"
typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT {
VkStructureType sType;
@@ -11005,8 +12419,10 @@ typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLay
#define VK_GOOGLE_hlsl_functionality1 1
-#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION 1
-#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1"
+#define VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION 1
+#define VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1"
+#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION
+#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME
#define VK_GOOGLE_decorate_string 1
@@ -11017,27 +12433,11 @@ typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLay
#define VK_EXT_subgroup_size_control 1
#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2
#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control"
-typedef struct VkPhysicalDeviceSubgroupSizeControlFeaturesEXT {
- VkStructureType sType;
- void* pNext;
- VkBool32 subgroupSizeControl;
- VkBool32 computeFullSubgroups;
-} VkPhysicalDeviceSubgroupSizeControlFeaturesEXT;
+typedef VkPhysicalDeviceSubgroupSizeControlFeatures VkPhysicalDeviceSubgroupSizeControlFeaturesEXT;
-typedef struct VkPhysicalDeviceSubgroupSizeControlPropertiesEXT {
- VkStructureType sType;
- void* pNext;
- uint32_t minSubgroupSize;
- uint32_t maxSubgroupSize;
- uint32_t maxComputeWorkgroupSubgroups;
- VkShaderStageFlags requiredSubgroupSizeStages;
-} VkPhysicalDeviceSubgroupSizeControlPropertiesEXT;
+typedef VkPhysicalDeviceSubgroupSizeControlProperties VkPhysicalDeviceSubgroupSizeControlPropertiesEXT;
-typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT {
- VkStructureType sType;
- void* pNext;
- uint32_t requiredSubgroupSize;
-} VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT;
+typedef VkPipelineShaderStageRequiredSubgroupSizeCreateInfo VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT;
@@ -11154,35 +12554,19 @@ VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT(
#define VK_EXT_tooling_info 1
#define VK_EXT_TOOLING_INFO_SPEC_VERSION 1
#define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info"
+typedef VkToolPurposeFlagBits VkToolPurposeFlagBitsEXT;
-typedef enum VkToolPurposeFlagBitsEXT {
- VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = 0x00000001,
- VK_TOOL_PURPOSE_PROFILING_BIT_EXT = 0x00000002,
- VK_TOOL_PURPOSE_TRACING_BIT_EXT = 0x00000004,
- VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = 0x00000008,
- VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = 0x00000010,
- VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020,
- VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040,
- VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
-} VkToolPurposeFlagBitsEXT;
-typedef VkFlags VkToolPurposeFlagsEXT;
-typedef struct VkPhysicalDeviceToolPropertiesEXT {
- VkStructureType sType;
- void* pNext;
- char name[VK_MAX_EXTENSION_NAME_SIZE];
- char version[VK_MAX_EXTENSION_NAME_SIZE];
- VkToolPurposeFlagsEXT purposes;
- char description[VK_MAX_DESCRIPTION_SIZE];
- char layer[VK_MAX_EXTENSION_NAME_SIZE];
-} VkPhysicalDeviceToolPropertiesEXT;
+typedef VkToolPurposeFlags VkToolPurposeFlagsEXT;
-typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties);
+typedef VkPhysicalDeviceToolProperties VkPhysicalDeviceToolPropertiesEXT;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT(
VkPhysicalDevice physicalDevice,
uint32_t* pToolCount,
- VkPhysicalDeviceToolPropertiesEXT* pToolProperties);
+ VkPhysicalDeviceToolProperties* pToolProperties);
#endif
@@ -11588,14 +12972,32 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT(
#endif
+#define VK_EXT_shader_atomic_float2 1
+#define VK_EXT_SHADER_ATOMIC_FLOAT_2_SPEC_VERSION 1
+#define VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME "VK_EXT_shader_atomic_float2"
+typedef struct VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderBufferFloat16Atomics;
+ VkBool32 shaderBufferFloat16AtomicAdd;
+ VkBool32 shaderBufferFloat16AtomicMinMax;
+ VkBool32 shaderBufferFloat32AtomicMinMax;
+ VkBool32 shaderBufferFloat64AtomicMinMax;
+ VkBool32 shaderSharedFloat16Atomics;
+ VkBool32 shaderSharedFloat16AtomicAdd;
+ VkBool32 shaderSharedFloat16AtomicMinMax;
+ VkBool32 shaderSharedFloat32AtomicMinMax;
+ VkBool32 shaderSharedFloat64AtomicMinMax;
+ VkBool32 shaderImageFloat32AtomicMinMax;
+ VkBool32 sparseImageFloat32AtomicMinMax;
+} VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT;
+
+
+
#define VK_EXT_shader_demote_to_helper_invocation 1
#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1
#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation"
-typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT {
- VkStructureType sType;
- void* pNext;
- VkBool32 shaderDemoteToHelperInvocation;
-} VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT;
+typedef VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT;
@@ -11613,6 +13015,7 @@ typedef enum VkIndirectCommandsTokenTypeNV {
VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5,
VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6,
VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7,
+ VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV = 1000328000,
VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
} VkIndirectCommandsTokenTypeNV;
@@ -11817,19 +13220,12 @@ typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT {
VkBool32 texelBufferAlignment;
} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT;
-typedef struct VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT {
- VkStructureType sType;
- void* pNext;
- VkDeviceSize storageTexelBufferOffsetAlignmentBytes;
- VkBool32 storageTexelBufferOffsetSingleTexelAlignment;
- VkDeviceSize uniformTexelBufferOffsetAlignmentBytes;
- VkBool32 uniformTexelBufferOffsetSingleTexelAlignment;
-} VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT;
+typedef VkPhysicalDeviceTexelBufferAlignmentProperties VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT;
#define VK_QCOM_render_pass_transform 1
-#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 2
+#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 3
#define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME "VK_QCOM_render_pass_transform"
typedef struct VkRenderPassTransformBeginInfoQCOM {
VkStructureType sType;
@@ -11867,7 +13263,7 @@ typedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT {
typedef struct VkDeviceMemoryReportCallbackDataEXT {
VkStructureType sType;
- const void* pNext;
+ void* pNext;
VkDeviceMemoryReportFlagsEXT flags;
VkDeviceMemoryReportEventTypeEXT type;
uint64_t memoryObjectId;
@@ -11962,61 +13358,47 @@ typedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT {
#define VK_EXT_private_data 1
-VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlotEXT)
+typedef VkPrivateDataSlot VkPrivateDataSlotEXT;
+
#define VK_EXT_PRIVATE_DATA_SPEC_VERSION 1
#define VK_EXT_PRIVATE_DATA_EXTENSION_NAME "VK_EXT_private_data"
+typedef VkPrivateDataSlotCreateFlags VkPrivateDataSlotCreateFlagsEXT;
-typedef enum VkPrivateDataSlotCreateFlagBitsEXT {
- VK_PRIVATE_DATA_SLOT_CREATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
-} VkPrivateDataSlotCreateFlagBitsEXT;
-typedef VkFlags VkPrivateDataSlotCreateFlagsEXT;
-typedef struct VkPhysicalDevicePrivateDataFeaturesEXT {
- VkStructureType sType;
- void* pNext;
- VkBool32 privateData;
-} VkPhysicalDevicePrivateDataFeaturesEXT;
+typedef VkPhysicalDevicePrivateDataFeatures VkPhysicalDevicePrivateDataFeaturesEXT;
-typedef struct VkDevicePrivateDataCreateInfoEXT {
- VkStructureType sType;
- const void* pNext;
- uint32_t privateDataSlotRequestCount;
-} VkDevicePrivateDataCreateInfoEXT;
+typedef VkDevicePrivateDataCreateInfo VkDevicePrivateDataCreateInfoEXT;
-typedef struct VkPrivateDataSlotCreateInfoEXT {
- VkStructureType sType;
- const void* pNext;
- VkPrivateDataSlotCreateFlagsEXT flags;
-} VkPrivateDataSlotCreateInfoEXT;
+typedef VkPrivateDataSlotCreateInfo VkPrivateDataSlotCreateInfoEXT;
-typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot);
-typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator);
-typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data);
-typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData);
+typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot);
+typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator);
+typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data);
+typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT(
VkDevice device,
- const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+ const VkPrivateDataSlotCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- VkPrivateDataSlotEXT* pPrivateDataSlot);
+ VkPrivateDataSlot* pPrivateDataSlot);
VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT(
VkDevice device,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
const VkAllocationCallbacks* pAllocator);
VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT(
VkDevice device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t data);
VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT(
VkDevice device,
VkObjectType objectType,
uint64_t objectHandle,
- VkPrivateDataSlotEXT privateDataSlot,
+ VkPrivateDataSlot privateDataSlot,
uint64_t* pData);
#endif
@@ -12024,22 +13406,19 @@ VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT(
#define VK_EXT_pipeline_creation_cache_control 1
#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3
#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME "VK_EXT_pipeline_creation_cache_control"
-typedef struct VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT {
- VkStructureType sType;
- void* pNext;
- VkBool32 pipelineCreationCacheControl;
-} VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT;
+typedef VkPhysicalDevicePipelineCreationCacheControlFeatures VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT;
#define VK_NV_device_diagnostics_config 1
-#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 1
+#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 2
#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME "VK_NV_device_diagnostics_config"
typedef enum VkDeviceDiagnosticsConfigFlagBitsNV {
VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001,
VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002,
VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004,
+ VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_ERROR_REPORTING_BIT_NV = 0x00000008,
VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
} VkDeviceDiagnosticsConfigFlagBitsNV;
typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV;
@@ -12062,6 +13441,50 @@ typedef struct VkDeviceDiagnosticsConfigCreateInfoNV {
#define VK_QCOM_RENDER_PASS_STORE_OPS_EXTENSION_NAME "VK_QCOM_render_pass_store_ops"
+#define VK_EXT_graphics_pipeline_library 1
+#define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_SPEC_VERSION 1
+#define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_EXTENSION_NAME "VK_EXT_graphics_pipeline_library"
+
+typedef enum VkGraphicsPipelineLibraryFlagBitsEXT {
+ VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT = 0x00000001,
+ VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT = 0x00000002,
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT = 0x00000004,
+ VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT = 0x00000008,
+ VK_GRAPHICS_PIPELINE_LIBRARY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkGraphicsPipelineLibraryFlagBitsEXT;
+typedef VkFlags VkGraphicsPipelineLibraryFlagsEXT;
+typedef struct VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 graphicsPipelineLibrary;
+} VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT;
+
+typedef struct VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 graphicsPipelineLibraryFastLinking;
+ VkBool32 graphicsPipelineLibraryIndependentInterpolationDecoration;
+} VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT;
+
+typedef struct VkGraphicsPipelineLibraryCreateInfoEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkGraphicsPipelineLibraryFlagsEXT flags;
+} VkGraphicsPipelineLibraryCreateInfoEXT;
+
+
+
+#define VK_AMD_shader_early_and_late_fragment_tests 1
+#define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_SPEC_VERSION 1
+#define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_EXTENSION_NAME "VK_AMD_shader_early_and_late_fragment_tests"
+typedef struct VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderEarlyAndLateFragmentTests;
+} VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD;
+
+
+
#define VK_NV_fragment_shading_rate_enums 1
#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1
#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME "VK_NV_fragment_shading_rate_enums"
@@ -12202,7 +13625,7 @@ typedef struct VkAccelerationStructureMotionInstanceNV {
typedef struct VkPhysicalDeviceRayTracingMotionBlurFeaturesNV {
VkStructureType sType;
- const void* pNext;
+ void* pNext;
VkBool32 rayTracingMotionBlur;
VkBool32 rayTracingMotionBlurPipelineTraceRaysIndirect;
} VkPhysicalDeviceRayTracingMotionBlurFeaturesNV;
@@ -12254,11 +13677,104 @@ typedef struct VkCopyCommandTransformInfoQCOM {
#define VK_EXT_image_robustness 1
#define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1
#define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_image_robustness"
-typedef struct VkPhysicalDeviceImageRobustnessFeaturesEXT {
+typedef VkPhysicalDeviceImageRobustnessFeatures VkPhysicalDeviceImageRobustnessFeaturesEXT;
+
+
+
+#define VK_EXT_image_compression_control 1
+#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SPEC_VERSION 1
+#define VK_EXT_IMAGE_COMPRESSION_CONTROL_EXTENSION_NAME "VK_EXT_image_compression_control"
+
+typedef enum VkImageCompressionFlagBitsEXT {
+ VK_IMAGE_COMPRESSION_DEFAULT_EXT = 0,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_DEFAULT_EXT = 0x00000001,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_EXPLICIT_EXT = 0x00000002,
+ VK_IMAGE_COMPRESSION_DISABLED_EXT = 0x00000004,
+ VK_IMAGE_COMPRESSION_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkImageCompressionFlagBitsEXT;
+typedef VkFlags VkImageCompressionFlagsEXT;
+
+typedef enum VkImageCompressionFixedRateFlagBitsEXT {
+ VK_IMAGE_COMPRESSION_FIXED_RATE_NONE_EXT = 0,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_1BPC_BIT_EXT = 0x00000001,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_2BPC_BIT_EXT = 0x00000002,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_3BPC_BIT_EXT = 0x00000004,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_4BPC_BIT_EXT = 0x00000008,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_5BPC_BIT_EXT = 0x00000010,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_6BPC_BIT_EXT = 0x00000020,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_7BPC_BIT_EXT = 0x00000040,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_8BPC_BIT_EXT = 0x00000080,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_9BPC_BIT_EXT = 0x00000100,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_10BPC_BIT_EXT = 0x00000200,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_11BPC_BIT_EXT = 0x00000400,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_12BPC_BIT_EXT = 0x00000800,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_13BPC_BIT_EXT = 0x00001000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_14BPC_BIT_EXT = 0x00002000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_15BPC_BIT_EXT = 0x00004000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_16BPC_BIT_EXT = 0x00008000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_17BPC_BIT_EXT = 0x00010000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_18BPC_BIT_EXT = 0x00020000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_19BPC_BIT_EXT = 0x00040000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_20BPC_BIT_EXT = 0x00080000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_21BPC_BIT_EXT = 0x00100000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_22BPC_BIT_EXT = 0x00200000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_23BPC_BIT_EXT = 0x00400000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_24BPC_BIT_EXT = 0x00800000,
+ VK_IMAGE_COMPRESSION_FIXED_RATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkImageCompressionFixedRateFlagBitsEXT;
+typedef VkFlags VkImageCompressionFixedRateFlagsEXT;
+typedef struct VkPhysicalDeviceImageCompressionControlFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 imageCompressionControl;
+} VkPhysicalDeviceImageCompressionControlFeaturesEXT;
+
+typedef struct VkImageCompressionControlEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkImageCompressionFlagsEXT flags;
+ uint32_t compressionControlPlaneCount;
+ VkImageCompressionFixedRateFlagsEXT* pFixedRateFlags;
+} VkImageCompressionControlEXT;
+
+typedef struct VkSubresourceLayout2EXT {
+ VkStructureType sType;
+ void* pNext;
+ VkSubresourceLayout subresourceLayout;
+} VkSubresourceLayout2EXT;
+
+typedef struct VkImageSubresource2EXT {
+ VkStructureType sType;
+ void* pNext;
+ VkImageSubresource imageSubresource;
+} VkImageSubresource2EXT;
+
+typedef struct VkImageCompressionPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkImageCompressionFlagsEXT imageCompressionFlags;
+ VkImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags;
+} VkImageCompressionPropertiesEXT;
+
+typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2EXT)(VkDevice device, VkImage image, const VkImageSubresource2EXT* pSubresource, VkSubresourceLayout2EXT* pLayout);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2EXT(
+ VkDevice device,
+ VkImage image,
+ const VkImageSubresource2EXT* pSubresource,
+ VkSubresourceLayout2EXT* pLayout);
+#endif
+
+
+#define VK_EXT_attachment_feedback_loop_layout 1
+#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_SPEC_VERSION 2
+#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME "VK_EXT_attachment_feedback_loop_layout"
+typedef struct VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT {
VkStructureType sType;
void* pNext;
- VkBool32 robustImageAccess;
-} VkPhysicalDeviceImageRobustnessFeaturesEXT;
+ VkBool32 attachmentFeedbackLoopLayout;
+} VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT;
@@ -12274,6 +13790,32 @@ typedef struct VkPhysicalDevice4444FormatsFeaturesEXT {
+#define VK_ARM_rasterization_order_attachment_access 1
+#define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION 1
+#define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME "VK_ARM_rasterization_order_attachment_access"
+typedef struct VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 rasterizationOrderColorAttachmentAccess;
+ VkBool32 rasterizationOrderDepthAttachmentAccess;
+ VkBool32 rasterizationOrderStencilAttachmentAccess;
+} VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT;
+
+typedef VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM;
+
+
+
+#define VK_EXT_rgba10x6_formats 1
+#define VK_EXT_RGBA10X6_FORMATS_SPEC_VERSION 1
+#define VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME "VK_EXT_rgba10x6_formats"
+typedef struct VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 formatRgba10x6WithoutYCbCrSampler;
+} VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT;
+
+
+
#define VK_NV_acquire_winrt_display 1
#define VK_NV_ACQUIRE_WINRT_DISPLAY_SPEC_VERSION 1
#define VK_NV_ACQUIRE_WINRT_DISPLAY_EXTENSION_NAME "VK_NV_acquire_winrt_display"
@@ -12295,23 +13837,29 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetWinrtDisplayNV(
#define VK_VALVE_mutable_descriptor_type 1
#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1
#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_VALVE_mutable_descriptor_type"
-typedef struct VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE {
+typedef struct VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT {
VkStructureType sType;
void* pNext;
VkBool32 mutableDescriptorType;
-} VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE;
+} VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT;
-typedef struct VkMutableDescriptorTypeListVALVE {
+typedef VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE;
+
+typedef struct VkMutableDescriptorTypeListEXT {
uint32_t descriptorTypeCount;
const VkDescriptorType* pDescriptorTypes;
-} VkMutableDescriptorTypeListVALVE;
+} VkMutableDescriptorTypeListEXT;
-typedef struct VkMutableDescriptorTypeCreateInfoVALVE {
- VkStructureType sType;
- const void* pNext;
- uint32_t mutableDescriptorTypeListCount;
- const VkMutableDescriptorTypeListVALVE* pMutableDescriptorTypeLists;
-} VkMutableDescriptorTypeCreateInfoVALVE;
+typedef VkMutableDescriptorTypeListEXT VkMutableDescriptorTypeListVALVE;
+
+typedef struct VkMutableDescriptorTypeCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t mutableDescriptorTypeListCount;
+ const VkMutableDescriptorTypeListEXT* pMutableDescriptorTypeLists;
+} VkMutableDescriptorTypeCreateInfoEXT;
+
+typedef VkMutableDescriptorTypeCreateInfoEXT VkMutableDescriptorTypeCreateInfoVALVE;
@@ -12370,8 +13918,37 @@ typedef struct VkPhysicalDeviceDrmPropertiesEXT {
+#define VK_EXT_depth_clip_control 1
+#define VK_EXT_DEPTH_CLIP_CONTROL_SPEC_VERSION 1
+#define VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME "VK_EXT_depth_clip_control"
+typedef struct VkPhysicalDeviceDepthClipControlFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 depthClipControl;
+} VkPhysicalDeviceDepthClipControlFeaturesEXT;
+
+typedef struct VkPipelineViewportDepthClipControlCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 negativeOneToOne;
+} VkPipelineViewportDepthClipControlCreateInfoEXT;
+
+
+
+#define VK_EXT_primitive_topology_list_restart 1
+#define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_SPEC_VERSION 1
+#define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_EXTENSION_NAME "VK_EXT_primitive_topology_list_restart"
+typedef struct VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 primitiveTopologyListRestart;
+ VkBool32 primitiveTopologyPatchListRestart;
+} VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT;
+
+
+
#define VK_HUAWEI_subpass_shading 1
-#define VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION 0
+#define VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION 2
#define VK_HUAWEI_SUBPASS_SHADING_EXTENSION_NAME "VK_HUAWEI_subpass_shading"
typedef struct VkSubpassShadingPipelineCreateInfoHUAWEI {
VkStructureType sType;
@@ -12392,11 +13969,12 @@ typedef struct VkPhysicalDeviceSubpassShadingPropertiesHUAWEI {
uint32_t maxSubpassShadingWorkgroupSizeAspectRatio;
} VkPhysicalDeviceSubpassShadingPropertiesHUAWEI;
-typedef VkResult (VKAPI_PTR *PFN_vkGetSubpassShadingMaxWorkgroupSizeHUAWEI)(VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)(VkDevice device, VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize);
typedef void (VKAPI_PTR *PFN_vkCmdSubpassShadingHUAWEI)(VkCommandBuffer commandBuffer);
#ifndef VK_NO_PROTOTYPES
-VKAPI_ATTR VkResult VKAPI_CALL vkGetSubpassShadingMaxWorkgroupSizeHUAWEI(
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI(
+ VkDevice device,
VkRenderPass renderpass,
VkExtent2D* pMaxWorkgroupSize);
@@ -12405,6 +13983,103 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSubpassShadingHUAWEI(
#endif
+#define VK_HUAWEI_invocation_mask 1
+#define VK_HUAWEI_INVOCATION_MASK_SPEC_VERSION 1
+#define VK_HUAWEI_INVOCATION_MASK_EXTENSION_NAME "VK_HUAWEI_invocation_mask"
+typedef struct VkPhysicalDeviceInvocationMaskFeaturesHUAWEI {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 invocationMask;
+} VkPhysicalDeviceInvocationMaskFeaturesHUAWEI;
+
+typedef void (VKAPI_PTR *PFN_vkCmdBindInvocationMaskHUAWEI)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdBindInvocationMaskHUAWEI(
+ VkCommandBuffer commandBuffer,
+ VkImageView imageView,
+ VkImageLayout imageLayout);
+#endif
+
+
+#define VK_NV_external_memory_rdma 1
+typedef void* VkRemoteAddressNV;
+#define VK_NV_EXTERNAL_MEMORY_RDMA_SPEC_VERSION 1
+#define VK_NV_EXTERNAL_MEMORY_RDMA_EXTENSION_NAME "VK_NV_external_memory_rdma"
+typedef struct VkMemoryGetRemoteAddressInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceMemory memory;
+ VkExternalMemoryHandleTypeFlagBits handleType;
+} VkMemoryGetRemoteAddressInfoNV;
+
+typedef struct VkPhysicalDeviceExternalMemoryRDMAFeaturesNV {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 externalMemoryRDMA;
+} VkPhysicalDeviceExternalMemoryRDMAFeaturesNV;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryRemoteAddressNV)(VkDevice device, const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, VkRemoteAddressNV* pAddress);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryRemoteAddressNV(
+ VkDevice device,
+ const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo,
+ VkRemoteAddressNV* pAddress);
+#endif
+
+
+#define VK_EXT_pipeline_properties 1
+#define VK_EXT_PIPELINE_PROPERTIES_SPEC_VERSION 1
+#define VK_EXT_PIPELINE_PROPERTIES_EXTENSION_NAME "VK_EXT_pipeline_properties"
+typedef VkPipelineInfoKHR VkPipelineInfoEXT;
+
+typedef struct VkPipelinePropertiesIdentifierEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint8_t pipelineIdentifier[VK_UUID_SIZE];
+} VkPipelinePropertiesIdentifierEXT;
+
+typedef struct VkPhysicalDevicePipelinePropertiesFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 pipelinePropertiesIdentifier;
+} VkPhysicalDevicePipelinePropertiesFeaturesEXT;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelinePropertiesEXT)(VkDevice device, const VkPipelineInfoEXT* pPipelineInfo, VkBaseOutStructure* pPipelineProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelinePropertiesEXT(
+ VkDevice device,
+ const VkPipelineInfoEXT* pPipelineInfo,
+ VkBaseOutStructure* pPipelineProperties);
+#endif
+
+
+#define VK_EXT_multisampled_render_to_single_sampled 1
+#define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION 1
+#define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_EXTENSION_NAME "VK_EXT_multisampled_render_to_single_sampled"
+typedef struct VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 multisampledRenderToSingleSampled;
+} VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT;
+
+typedef struct VkSubpassResolvePerformanceQueryEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 optimal;
+} VkSubpassResolvePerformanceQueryEXT;
+
+typedef struct VkMultisampledRenderToSingleSampledInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 multisampledRenderToSingleSampledEnable;
+ VkSampleCountFlagBits rasterizationSamples;
+} VkMultisampledRenderToSingleSampledInfoEXT;
+
+
+
#define VK_EXT_extended_dynamic_state2 1
#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION 1
#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME "VK_EXT_extended_dynamic_state2"
@@ -12471,22 +14146,43 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetColorWrite
#endif
+#define VK_EXT_primitives_generated_query 1
+#define VK_EXT_PRIMITIVES_GENERATED_QUERY_SPEC_VERSION 1
+#define VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME "VK_EXT_primitives_generated_query"
+typedef struct VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 primitivesGeneratedQuery;
+ VkBool32 primitivesGeneratedQueryWithRasterizerDiscard;
+ VkBool32 primitivesGeneratedQueryWithNonZeroStreams;
+} VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT;
+
+
+
#define VK_EXT_global_priority_query 1
-#define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT 16U
#define VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION 1
#define VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME "VK_EXT_global_priority_query"
-typedef struct VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT {
+#define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT VK_MAX_GLOBAL_PRIORITY_SIZE_KHR
+typedef VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT;
+
+typedef VkQueueFamilyGlobalPriorityPropertiesKHR VkQueueFamilyGlobalPriorityPropertiesEXT;
+
+
+
+#define VK_EXT_image_view_min_lod 1
+#define VK_EXT_IMAGE_VIEW_MIN_LOD_SPEC_VERSION 1
+#define VK_EXT_IMAGE_VIEW_MIN_LOD_EXTENSION_NAME "VK_EXT_image_view_min_lod"
+typedef struct VkPhysicalDeviceImageViewMinLodFeaturesEXT {
VkStructureType sType;
void* pNext;
- VkBool32 globalPriorityQuery;
-} VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT;
+ VkBool32 minLod;
+} VkPhysicalDeviceImageViewMinLodFeaturesEXT;
-typedef struct VkQueueFamilyGlobalPriorityPropertiesEXT {
- VkStructureType sType;
- void* pNext;
- uint32_t priorityCount;
- VkQueueGlobalPriorityEXT priorities[VK_MAX_GLOBAL_PRIORITY_SIZE_EXT];
-} VkQueueFamilyGlobalPriorityPropertiesEXT;
+typedef struct VkImageViewMinLodCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ float minLod;
+} VkImageViewMinLodCreateInfoEXT;
@@ -12539,9 +14235,382 @@ VKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiIndexedEXT(
#endif
+#define VK_EXT_image_2d_view_of_3d 1
+#define VK_EXT_IMAGE_2D_VIEW_OF_3D_SPEC_VERSION 1
+#define VK_EXT_IMAGE_2D_VIEW_OF_3D_EXTENSION_NAME "VK_EXT_image_2d_view_of_3d"
+typedef struct VkPhysicalDeviceImage2DViewOf3DFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 image2DViewOf3D;
+ VkBool32 sampler2DViewOf3D;
+} VkPhysicalDeviceImage2DViewOf3DFeaturesEXT;
+
+
+
+#define VK_EXT_load_store_op_none 1
+#define VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION 1
+#define VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME "VK_EXT_load_store_op_none"
+
+
+#define VK_EXT_border_color_swizzle 1
+#define VK_EXT_BORDER_COLOR_SWIZZLE_SPEC_VERSION 1
+#define VK_EXT_BORDER_COLOR_SWIZZLE_EXTENSION_NAME "VK_EXT_border_color_swizzle"
+typedef struct VkPhysicalDeviceBorderColorSwizzleFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 borderColorSwizzle;
+ VkBool32 borderColorSwizzleFromImage;
+} VkPhysicalDeviceBorderColorSwizzleFeaturesEXT;
+
+typedef struct VkSamplerBorderColorComponentMappingCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkComponentMapping components;
+ VkBool32 srgb;
+} VkSamplerBorderColorComponentMappingCreateInfoEXT;
+
+
+
+#define VK_EXT_pageable_device_local_memory 1
+#define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_SPEC_VERSION 1
+#define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_EXTENSION_NAME "VK_EXT_pageable_device_local_memory"
+typedef struct VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 pageableDeviceLocalMemory;
+} VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT;
+
+typedef void (VKAPI_PTR *PFN_vkSetDeviceMemoryPriorityEXT)(VkDevice device, VkDeviceMemory memory, float priority);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkSetDeviceMemoryPriorityEXT(
+ VkDevice device,
+ VkDeviceMemory memory,
+ float priority);
+#endif
+
+
+#define VK_VALVE_descriptor_set_host_mapping 1
+#define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_SPEC_VERSION 1
+#define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_EXTENSION_NAME "VK_VALVE_descriptor_set_host_mapping"
+typedef struct VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 descriptorSetHostMapping;
+} VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE;
+
+typedef struct VkDescriptorSetBindingReferenceVALVE {
+ VkStructureType sType;
+ const void* pNext;
+ VkDescriptorSetLayout descriptorSetLayout;
+ uint32_t binding;
+} VkDescriptorSetBindingReferenceVALVE;
+
+typedef struct VkDescriptorSetLayoutHostMappingInfoVALVE {
+ VkStructureType sType;
+ void* pNext;
+ size_t descriptorOffset;
+ uint32_t descriptorSize;
+} VkDescriptorSetLayoutHostMappingInfoVALVE;
+
+typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)(VkDevice device, const VkDescriptorSetBindingReferenceVALVE* pBindingReference, VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping);
+typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetHostMappingVALVE)(VkDevice device, VkDescriptorSet descriptorSet, void** ppData);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutHostMappingInfoVALVE(
+ VkDevice device,
+ const VkDescriptorSetBindingReferenceVALVE* pBindingReference,
+ VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping);
+
+VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetHostMappingVALVE(
+ VkDevice device,
+ VkDescriptorSet descriptorSet,
+ void** ppData);
+#endif
+
+
+#define VK_EXT_depth_clamp_zero_one 1
+#define VK_EXT_DEPTH_CLAMP_ZERO_ONE_SPEC_VERSION 1
+#define VK_EXT_DEPTH_CLAMP_ZERO_ONE_EXTENSION_NAME "VK_EXT_depth_clamp_zero_one"
+typedef struct VkPhysicalDeviceDepthClampZeroOneFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 depthClampZeroOne;
+} VkPhysicalDeviceDepthClampZeroOneFeaturesEXT;
+
+
+
+#define VK_EXT_non_seamless_cube_map 1
+#define VK_EXT_NON_SEAMLESS_CUBE_MAP_SPEC_VERSION 1
+#define VK_EXT_NON_SEAMLESS_CUBE_MAP_EXTENSION_NAME "VK_EXT_non_seamless_cube_map"
+typedef struct VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 nonSeamlessCubeMap;
+} VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT;
+
+
+
+#define VK_QCOM_fragment_density_map_offset 1
+#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 1
+#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME "VK_QCOM_fragment_density_map_offset"
+typedef struct VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 fragmentDensityMapOffset;
+} VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM;
+
+typedef struct VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM {
+ VkStructureType sType;
+ void* pNext;
+ VkExtent2D fragmentDensityOffsetGranularity;
+} VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM;
+
+typedef struct VkSubpassFragmentDensityMapOffsetEndInfoQCOM {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t fragmentDensityOffsetCount;
+ const VkOffset2D* pFragmentDensityOffsets;
+} VkSubpassFragmentDensityMapOffsetEndInfoQCOM;
+
+
+
+#define VK_NV_linear_color_attachment 1
+#define VK_NV_LINEAR_COLOR_ATTACHMENT_SPEC_VERSION 1
+#define VK_NV_LINEAR_COLOR_ATTACHMENT_EXTENSION_NAME "VK_NV_linear_color_attachment"
+typedef struct VkPhysicalDeviceLinearColorAttachmentFeaturesNV {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 linearColorAttachment;
+} VkPhysicalDeviceLinearColorAttachmentFeaturesNV;
+
+
+
+#define VK_GOOGLE_surfaceless_query 1
+#define VK_GOOGLE_SURFACELESS_QUERY_SPEC_VERSION 2
+#define VK_GOOGLE_SURFACELESS_QUERY_EXTENSION_NAME "VK_GOOGLE_surfaceless_query"
+
+
+#define VK_EXT_image_compression_control_swapchain 1
+#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_SPEC_VERSION 1
+#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_EXTENSION_NAME "VK_EXT_image_compression_control_swapchain"
+typedef struct VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 imageCompressionControlSwapchain;
+} VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT;
+
+
+
+#define VK_QCOM_image_processing 1
+#define VK_QCOM_IMAGE_PROCESSING_SPEC_VERSION 1
+#define VK_QCOM_IMAGE_PROCESSING_EXTENSION_NAME "VK_QCOM_image_processing"
+typedef struct VkImageViewSampleWeightCreateInfoQCOM {
+ VkStructureType sType;
+ const void* pNext;
+ VkOffset2D filterCenter;
+ VkExtent2D filterSize;
+ uint32_t numPhases;
+} VkImageViewSampleWeightCreateInfoQCOM;
+
+typedef struct VkPhysicalDeviceImageProcessingFeaturesQCOM {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 textureSampleWeighted;
+ VkBool32 textureBoxFilter;
+ VkBool32 textureBlockMatch;
+} VkPhysicalDeviceImageProcessingFeaturesQCOM;
+
+typedef struct VkPhysicalDeviceImageProcessingPropertiesQCOM {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxWeightFilterPhases;
+ VkExtent2D maxWeightFilterDimension;
+ VkExtent2D maxBlockMatchRegion;
+ VkExtent2D maxBoxFilterBlockSize;
+} VkPhysicalDeviceImageProcessingPropertiesQCOM;
+
+
+
+#define VK_EXT_subpass_merge_feedback 1
+#define VK_EXT_SUBPASS_MERGE_FEEDBACK_SPEC_VERSION 2
+#define VK_EXT_SUBPASS_MERGE_FEEDBACK_EXTENSION_NAME "VK_EXT_subpass_merge_feedback"
+
+typedef enum VkSubpassMergeStatusEXT {
+ VK_SUBPASS_MERGE_STATUS_MERGED_EXT = 0,
+ VK_SUBPASS_MERGE_STATUS_DISALLOWED_EXT = 1,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SIDE_EFFECTS_EXT = 2,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SAMPLES_MISMATCH_EXT = 3,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_VIEWS_MISMATCH_EXT = 4,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_ALIASING_EXT = 5,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPENDENCIES_EXT = 6,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INCOMPATIBLE_INPUT_ATTACHMENT_EXT = 7,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_TOO_MANY_ATTACHMENTS_EXT = 8,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INSUFFICIENT_STORAGE_EXT = 9,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPTH_STENCIL_COUNT_EXT = 10,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_RESOLVE_ATTACHMENT_REUSE_EXT = 11,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SINGLE_SUBPASS_EXT = 12,
+ VK_SUBPASS_MERGE_STATUS_NOT_MERGED_UNSPECIFIED_EXT = 13,
+ VK_SUBPASS_MERGE_STATUS_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkSubpassMergeStatusEXT;
+typedef struct VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 subpassMergeFeedback;
+} VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT;
+
+typedef struct VkRenderPassCreationControlEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 disallowMerging;
+} VkRenderPassCreationControlEXT;
+
+typedef struct VkRenderPassCreationFeedbackInfoEXT {
+ uint32_t postMergeSubpassCount;
+} VkRenderPassCreationFeedbackInfoEXT;
+
+typedef struct VkRenderPassCreationFeedbackCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPassCreationFeedbackInfoEXT* pRenderPassFeedback;
+} VkRenderPassCreationFeedbackCreateInfoEXT;
+
+typedef struct VkRenderPassSubpassFeedbackInfoEXT {
+ VkSubpassMergeStatusEXT subpassMergeStatus;
+ char description[VK_MAX_DESCRIPTION_SIZE];
+ uint32_t postMergeIndex;
+} VkRenderPassSubpassFeedbackInfoEXT;
+
+typedef struct VkRenderPassSubpassFeedbackCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkRenderPassSubpassFeedbackInfoEXT* pSubpassFeedback;
+} VkRenderPassSubpassFeedbackCreateInfoEXT;
+
+
+
+#define VK_EXT_shader_module_identifier 1
+#define VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT 32U
+#define VK_EXT_SHADER_MODULE_IDENTIFIER_SPEC_VERSION 1
+#define VK_EXT_SHADER_MODULE_IDENTIFIER_EXTENSION_NAME "VK_EXT_shader_module_identifier"
+typedef struct VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderModuleIdentifier;
+} VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT;
+
+typedef struct VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint8_t shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE];
+} VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT;
+
+typedef struct VkPipelineShaderStageModuleIdentifierCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t identifierSize;
+ const uint8_t* pIdentifier;
+} VkPipelineShaderStageModuleIdentifierCreateInfoEXT;
+
+typedef struct VkShaderModuleIdentifierEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t identifierSize;
+ uint8_t identifier[VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT];
+} VkShaderModuleIdentifierEXT;
+
+typedef void (VKAPI_PTR *PFN_vkGetShaderModuleIdentifierEXT)(VkDevice device, VkShaderModule shaderModule, VkShaderModuleIdentifierEXT* pIdentifier);
+typedef void (VKAPI_PTR *PFN_vkGetShaderModuleCreateInfoIdentifierEXT)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModuleIdentifierEXT* pIdentifier);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkGetShaderModuleIdentifierEXT(
+ VkDevice device,
+ VkShaderModule shaderModule,
+ VkShaderModuleIdentifierEXT* pIdentifier);
+
+VKAPI_ATTR void VKAPI_CALL vkGetShaderModuleCreateInfoIdentifierEXT(
+ VkDevice device,
+ const VkShaderModuleCreateInfo* pCreateInfo,
+ VkShaderModuleIdentifierEXT* pIdentifier);
+#endif
+
+
+#define VK_EXT_rasterization_order_attachment_access 1
+#define VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION 1
+#define VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME "VK_EXT_rasterization_order_attachment_access"
+
+
+#define VK_EXT_legacy_dithering 1
+#define VK_EXT_LEGACY_DITHERING_SPEC_VERSION 1
+#define VK_EXT_LEGACY_DITHERING_EXTENSION_NAME "VK_EXT_legacy_dithering"
+typedef struct VkPhysicalDeviceLegacyDitheringFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 legacyDithering;
+} VkPhysicalDeviceLegacyDitheringFeaturesEXT;
+
+
+
+#define VK_QCOM_tile_properties 1
+#define VK_QCOM_TILE_PROPERTIES_SPEC_VERSION 1
+#define VK_QCOM_TILE_PROPERTIES_EXTENSION_NAME "VK_QCOM_tile_properties"
+typedef struct VkPhysicalDeviceTilePropertiesFeaturesQCOM {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 tileProperties;
+} VkPhysicalDeviceTilePropertiesFeaturesQCOM;
+
+typedef struct VkTilePropertiesQCOM {
+ VkStructureType sType;
+ void* pNext;
+ VkExtent3D tileSize;
+ VkExtent2D apronSize;
+ VkOffset2D origin;
+} VkTilePropertiesQCOM;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetFramebufferTilePropertiesQCOM)(VkDevice device, VkFramebuffer framebuffer, uint32_t* pPropertiesCount, VkTilePropertiesQCOM* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetDynamicRenderingTilePropertiesQCOM)(VkDevice device, const VkRenderingInfo* pRenderingInfo, VkTilePropertiesQCOM* pProperties);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetFramebufferTilePropertiesQCOM(
+ VkDevice device,
+ VkFramebuffer framebuffer,
+ uint32_t* pPropertiesCount,
+ VkTilePropertiesQCOM* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetDynamicRenderingTilePropertiesQCOM(
+ VkDevice device,
+ const VkRenderingInfo* pRenderingInfo,
+ VkTilePropertiesQCOM* pProperties);
+#endif
+
+
+#define VK_SEC_amigo_profiling 1
+#define VK_SEC_AMIGO_PROFILING_SPEC_VERSION 1
+#define VK_SEC_AMIGO_PROFILING_EXTENSION_NAME "VK_SEC_amigo_profiling"
+typedef struct VkPhysicalDeviceAmigoProfilingFeaturesSEC {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 amigoProfiling;
+} VkPhysicalDeviceAmigoProfilingFeaturesSEC;
+
+typedef struct VkAmigoProfilingSubmitInfoSEC {
+ VkStructureType sType;
+ const void* pNext;
+ uint64_t firstDrawTimestamp;
+ uint64_t swapBufferTimestamp;
+} VkAmigoProfilingSubmitInfoSEC;
+
+
+
+#define VK_EXT_mutable_descriptor_type 1
+#define VK_EXT_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1
+#define VK_EXT_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_EXT_mutable_descriptor_type"
+
+
#define VK_KHR_acceleration_structure 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR)
-#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 11
+#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 13
#define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_KHR_acceleration_structure"
typedef enum VkBuildAccelerationStructureModeKHR {
@@ -12978,6 +15047,87 @@ typedef struct VkPhysicalDeviceRayQueryFeaturesKHR {
} VkPhysicalDeviceRayQueryFeaturesKHR;
+
+#define VK_EXT_mesh_shader 1
+#define VK_EXT_MESH_SHADER_SPEC_VERSION 1
+#define VK_EXT_MESH_SHADER_EXTENSION_NAME "VK_EXT_mesh_shader"
+typedef struct VkPhysicalDeviceMeshShaderFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 taskShader;
+ VkBool32 meshShader;
+ VkBool32 multiviewMeshShader;
+ VkBool32 primitiveFragmentShadingRateMeshShader;
+ VkBool32 meshShaderQueries;
+} VkPhysicalDeviceMeshShaderFeaturesEXT;
+
+typedef struct VkPhysicalDeviceMeshShaderPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t maxTaskWorkGroupTotalCount;
+ uint32_t maxTaskWorkGroupCount[3];
+ uint32_t maxTaskWorkGroupInvocations;
+ uint32_t maxTaskWorkGroupSize[3];
+ uint32_t maxTaskPayloadSize;
+ uint32_t maxTaskSharedMemorySize;
+ uint32_t maxTaskPayloadAndSharedMemorySize;
+ uint32_t maxMeshWorkGroupTotalCount;
+ uint32_t maxMeshWorkGroupCount[3];
+ uint32_t maxMeshWorkGroupInvocations;
+ uint32_t maxMeshWorkGroupSize[3];
+ uint32_t maxMeshSharedMemorySize;
+ uint32_t maxMeshPayloadAndSharedMemorySize;
+ uint32_t maxMeshOutputMemorySize;
+ uint32_t maxMeshPayloadAndOutputMemorySize;
+ uint32_t maxMeshOutputComponents;
+ uint32_t maxMeshOutputVertices;
+ uint32_t maxMeshOutputPrimitives;
+ uint32_t maxMeshOutputLayers;
+ uint32_t maxMeshMultiviewViewCount;
+ uint32_t meshOutputPerVertexGranularity;
+ uint32_t meshOutputPerPrimitiveGranularity;
+ uint32_t maxPreferredTaskWorkGroupInvocations;
+ uint32_t maxPreferredMeshWorkGroupInvocations;
+ VkBool32 prefersLocalInvocationVertexOutput;
+ VkBool32 prefersLocalInvocationPrimitiveOutput;
+ VkBool32 prefersCompactVertexOutput;
+ VkBool32 prefersCompactPrimitiveOutput;
+} VkPhysicalDeviceMeshShaderPropertiesEXT;
+
+typedef struct VkDrawMeshTasksIndirectCommandEXT {
+ uint32_t groupCountX;
+ uint32_t groupCountY;
+ uint32_t groupCountZ;
+} VkDrawMeshTasksIndirectCommandEXT;
+
+typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksEXT)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectEXT)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountEXT)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectEXT(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride);
+
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountEXT(
+ VkCommandBuffer commandBuffer,
+ VkBuffer buffer,
+ VkDeviceSize offset,
+ VkBuffer countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride);
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/src/venus/vkr_allocator.c b/src/venus/vkr_allocator.c
new file mode 100644
index 00000000..b0144eab
--- /dev/null
+++ b/src/venus/vkr_allocator.c
@@ -0,0 +1,283 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vkr_allocator.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "util/list.h"
+#include "venus-protocol/vulkan.h"
+#include "virgl_resource.h"
+
+/* Assume that we will deal with at most 4 devices.
+ * This is to avoid per-device resource dynamic allocations.
+ * For now, `vkr_allocator` is designed for Mesa CI use which
+ * uses lavapipe as the only Vulkan driver, but allow logic to
+ * assume more for some leeway and felxibilty; especially if
+ * this allocator is expanded to use whatever devices available.
+ */
+#define VKR_ALLOCATOR_MAX_DEVICE_COUNT 4
+
+struct vkr_opaque_fd_mem_info {
+ VkDevice device;
+ VkDeviceMemory device_memory;
+ uint32_t res_id;
+ uint64_t size;
+
+ struct list_head head;
+};
+
+static struct vkr_allocator {
+ VkInstance instance;
+
+ VkPhysicalDevice physical_devices[VKR_ALLOCATOR_MAX_DEVICE_COUNT];
+ VkDevice devices[VKR_ALLOCATOR_MAX_DEVICE_COUNT];
+ uint8_t device_uuids[VKR_ALLOCATOR_MAX_DEVICE_COUNT][VK_UUID_SIZE];
+ uint32_t device_count;
+
+ struct list_head memories;
+} vkr_allocator;
+
+static bool vkr_allocator_initialized;
+
+static void
+vkr_allocator_free_memory(struct vkr_opaque_fd_mem_info *mem_info)
+{
+ vkFreeMemory(mem_info->device, mem_info->device_memory, NULL);
+ list_del(&mem_info->head);
+ free(mem_info);
+}
+
+static VkDevice
+vkr_allocator_get_device(struct virgl_resource *res)
+{
+ for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
+ if (memcmp(vkr_allocator.device_uuids[i], res->opaque_fd_metadata.device_uuid,
+ VK_UUID_SIZE) == 0)
+ return vkr_allocator.devices[i];
+ }
+
+ return VK_NULL_HANDLE;
+}
+
+static struct vkr_opaque_fd_mem_info *
+vkr_allocator_allocate_memory(struct virgl_resource *res)
+{
+ VkDevice dev_handle = vkr_allocator_get_device(res);
+ if (dev_handle == VK_NULL_HANDLE)
+ return NULL;
+
+ int fd = -1;
+ if (virgl_resource_export_fd(res, &fd) != VIRGL_RESOURCE_FD_OPAQUE) {
+ if (fd >= 0)
+ close(fd);
+ return NULL;
+ }
+
+ VkMemoryAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext =
+ &(VkImportMemoryFdInfoKHR){ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
+ .handleType =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
+ .fd = fd },
+ .allocationSize = res->opaque_fd_metadata.allocation_size,
+ .memoryTypeIndex = res->opaque_fd_metadata.memory_type_index
+ };
+
+ VkDeviceMemory mem_handle;
+ if (vkAllocateMemory(dev_handle, &alloc_info, NULL, &mem_handle) != VK_SUCCESS) {
+ close(fd);
+ return NULL;
+ }
+
+ struct vkr_opaque_fd_mem_info *mem_info = calloc(1, sizeof(*mem_info));
+ if (!mem_info) {
+ vkFreeMemory(dev_handle, mem_handle, NULL);
+ return NULL;
+ }
+
+ mem_info->device = dev_handle;
+ mem_info->device_memory = mem_handle;
+ mem_info->res_id = res->res_id;
+ mem_info->size = res->opaque_fd_metadata.allocation_size;
+
+ list_addtail(&mem_info->head, &vkr_allocator.memories);
+
+ return mem_info;
+}
+
+void
+vkr_allocator_fini(void)
+{
+ if (!vkr_allocator_initialized)
+ return;
+
+ struct vkr_opaque_fd_mem_info *mem_info, *mem_info_temp;
+ LIST_FOR_EACH_ENTRY_SAFE (mem_info, mem_info_temp, &vkr_allocator.memories, head)
+ vkr_allocator_free_memory(mem_info);
+
+ for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
+ vkDestroyDevice(vkr_allocator.devices[i], NULL);
+ }
+ vkDestroyInstance(vkr_allocator.instance, NULL);
+
+ memset(&vkr_allocator, 0, sizeof(vkr_allocator));
+
+ vkr_allocator_initialized = false;
+}
+
+int
+vkr_allocator_init(void)
+{
+ VkResult res;
+
+ VkApplicationInfo app_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .apiVersion = VK_API_VERSION_1_1,
+ };
+
+ VkInstanceCreateInfo inst_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &app_info,
+ };
+
+ res = vkCreateInstance(&inst_info, NULL, &vkr_allocator.instance);
+ if (res != VK_SUCCESS)
+ goto fail;
+
+ vkr_allocator.device_count = VKR_ALLOCATOR_MAX_DEVICE_COUNT;
+
+ res = vkEnumeratePhysicalDevices(vkr_allocator.instance, &vkr_allocator.device_count,
+ vkr_allocator.physical_devices);
+ if (res != VK_SUCCESS && res != VK_INCOMPLETE)
+ goto fail;
+
+ for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
+ VkPhysicalDevice physical_dev_handle = vkr_allocator.physical_devices[i];
+
+ VkPhysicalDeviceIDProperties id_props = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES
+ };
+ VkPhysicalDeviceProperties2 props2 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, .pNext = &id_props
+ };
+ vkGetPhysicalDeviceProperties2(physical_dev_handle, &props2);
+
+ memcpy(vkr_allocator.device_uuids[i], id_props.deviceUUID, VK_UUID_SIZE);
+
+ float priority = 1.0;
+ VkDeviceQueueCreateInfo queue_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ /* Use any queue since we dont really need it.
+ * We are guaranteed at least one by the spec */
+ .queueFamilyIndex = 0,
+ .queueCount = 1,
+ .pQueuePriorities = &priority
+ };
+
+ VkDeviceCreateInfo dev_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .queueCreateInfoCount = 1,
+ .pQueueCreateInfos = &queue_info,
+ };
+
+ res =
+ vkCreateDevice(physical_dev_handle, &dev_info, NULL, &vkr_allocator.devices[i]);
+ if (res != VK_SUCCESS)
+ goto fail;
+ }
+
+ list_inithead(&vkr_allocator.memories);
+
+ return 0;
+
+fail:
+ for (uint32_t i = 0; i < vkr_allocator.device_count; ++i) {
+ vkDestroyDevice(vkr_allocator.devices[i], NULL);
+ }
+ vkDestroyInstance(vkr_allocator.instance, NULL);
+
+ memset(&vkr_allocator, 0, sizeof(vkr_allocator));
+
+ return -1;
+}
+
+int
+vkr_allocator_resource_map(struct virgl_resource *res, void **map, uint64_t *out_size)
+{
+ if (!vkr_allocator_initialized) {
+ if (vkr_allocator_init())
+ return -EINVAL;
+ vkr_allocator_initialized = true;
+ }
+
+ assert(vkr_allocator_initialized);
+
+ struct vkr_opaque_fd_mem_info *mem_info = vkr_allocator_allocate_memory(res);
+ if (!mem_info)
+ return -EINVAL;
+
+ void *ptr;
+ if (vkMapMemory(mem_info->device, mem_info->device_memory, 0, mem_info->size, 0,
+ &ptr) != VK_SUCCESS) {
+ vkr_allocator_free_memory(mem_info);
+ return -EINVAL;
+ }
+
+ *map = ptr;
+ *out_size = mem_info->size;
+
+ return 0;
+}
+
+static struct vkr_opaque_fd_mem_info *
+vkr_allocator_get_mem_info(struct virgl_resource *res)
+{
+ struct vkr_opaque_fd_mem_info *mem_info, *mem_info_temp;
+ LIST_FOR_EACH_ENTRY_SAFE (mem_info, mem_info_temp, &vkr_allocator.memories, head)
+ if (mem_info->res_id == res->res_id)
+ return mem_info;
+
+ return NULL;
+}
+
+int
+vkr_allocator_resource_unmap(struct virgl_resource *res)
+{
+ assert(vkr_allocator_initialized);
+
+ struct vkr_opaque_fd_mem_info *mem_info = vkr_allocator_get_mem_info(res);
+ if (!mem_info)
+ return -EINVAL;
+
+ vkUnmapMemory(mem_info->device, mem_info->device_memory);
+
+ vkr_allocator_free_memory(mem_info);
+
+ return 0;
+}
diff --git a/src/venus/vkr_allocator.h b/src/venus/vkr_allocator.h
new file mode 100644
index 00000000..374102e9
--- /dev/null
+++ b/src/venus/vkr_allocator.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VKR_ALLOCATOR_H
+#define VKR_ALLOCATOR_H
+
+#include <stdint.h>
+
+struct virgl_resource;
+
+#ifdef ENABLE_VENUS
+
+int
+vkr_allocator_init(void);
+void
+vkr_allocator_fini(void);
+
+int
+vkr_allocator_resource_map(struct virgl_resource *res, void **map, uint64_t *out_size);
+int
+vkr_allocator_resource_unmap(struct virgl_resource *res);
+
+#else /* ENABLE_VENUS */
+
+#include "util/macros.h"
+
+static inline int
+vkr_allocator_init(void)
+{
+ return -1;
+}
+
+static inline void
+vkr_allocator_fini(void)
+{
+}
+
+static inline int
+vkr_allocator_resource_map(UNUSED struct virgl_resource *res,
+ UNUSED void **map,
+ UNUSED uint64_t *out_size)
+{
+ return -1;
+}
+
+static inline int
+vkr_allocator_resource_unmap(UNUSED struct virgl_resource *res)
+{
+ return -1;
+}
+
+#endif /* ENABLE_VENUS */
+
+#endif /* VKR_ALLOCATOR_H */
diff --git a/src/venus/vkr_buffer.c b/src/venus/vkr_buffer.c
index f28c6566..9e466f83 100644
--- a/src/venus/vkr_buffer.c
+++ b/src/venus/vkr_buffer.c
@@ -12,29 +12,32 @@ static void
vkr_dispatch_vkCreateBuffer(struct vn_dispatch_context *dispatch,
struct vn_command_vkCreateBuffer *args)
{
- struct vkr_context *ctx = dispatch->data;
-
- struct vkr_device *dev = vkr_device_from_handle(args->device);
-
-#ifdef FORCE_ENABLE_DMABUF
- VkExternalMemoryBufferCreateInfo local_external_info;
- if (dev->physical_device->EXT_external_memory_dma_buf) {
- VkExternalMemoryBufferCreateInfo *external_info = vkr_find_pnext(
- args->pCreateInfo->pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO);
- if (external_info) {
- external_info->handleTypes |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- } else {
- local_external_info = (const VkExternalMemoryBufferCreateInfo){
- .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
- .pNext = args->pCreateInfo->pNext,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- ((VkBufferCreateInfo *)args->pCreateInfo)->pNext = &local_external_info;
- }
- }
-#endif
-
- vkr_buffer_create_and_add(ctx, args);
+ /* XXX If VkExternalMemoryBufferCreateInfo is chained by the app, all is
+ * good. If it is not chained, we might still bind an external memory to
+ * the buffer, because vkr_dispatch_vkAllocateMemory makes any HOST_VISIBLE
+ * memory external. That is a spec violation.
+ *
+ * We could unconditionally chain VkExternalMemoryBufferCreateInfo. Or we
+ * could call vkGetPhysicalDeviceExternalBufferProperties and fail
+ * vkCreateBuffer if the buffer does not support external memory. But we
+ * would still end up with spec violation either way, while having a higher
+ * chance of causing compatibility issues.
+ *
+ * In practice, drivers usually ignore VkExternalMemoryBufferCreateInfo, or
+ * use it to filter out memory types in VkMemoryRequirements that do not
+ * support external memory. Binding an external memory to a buffer created
+ * without VkExternalMemoryBufferCreateInfo usually works.
+ *
+ * To formalize this, we are potentially looking for an extension that
+ * supports exporting memories without making them external. Because they
+ * are not external, they can be bound to buffers created without
+ * VkExternalMemoryBufferCreateInfo. And because they are not external, we
+ * need something that is not vkGetPhysicalDeviceExternalBufferProperties
+ * to determine the exportability. See
+ * vkr_physical_device_init_memory_properties as well.
+ */
+
+ vkr_buffer_create_and_add(dispatch->data, args);
}
static void
@@ -49,8 +52,11 @@ vkr_dispatch_vkGetBufferMemoryRequirements(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetBufferMemoryRequirements *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetBufferMemoryRequirements_args_handle(args);
- vkGetBufferMemoryRequirements(args->device, args->buffer, args->pMemoryRequirements);
+ vk->GetBufferMemoryRequirements(args->device, args->buffer, args->pMemoryRequirements);
}
static void
@@ -58,25 +64,34 @@ vkr_dispatch_vkGetBufferMemoryRequirements2(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetBufferMemoryRequirements2 *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetBufferMemoryRequirements2_args_handle(args);
- vkGetBufferMemoryRequirements2(args->device, args->pInfo, args->pMemoryRequirements);
+ vk->GetBufferMemoryRequirements2(args->device, args->pInfo, args->pMemoryRequirements);
}
static void
vkr_dispatch_vkBindBufferMemory(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkBindBufferMemory *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkBindBufferMemory_args_handle(args);
args->ret =
- vkBindBufferMemory(args->device, args->buffer, args->memory, args->memoryOffset);
+ vk->BindBufferMemory(args->device, args->buffer, args->memory, args->memoryOffset);
}
static void
vkr_dispatch_vkBindBufferMemory2(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkBindBufferMemory2 *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkBindBufferMemory2_args_handle(args);
- args->ret = vkBindBufferMemory2(args->device, args->bindInfoCount, args->pBindInfos);
+ args->ret = vk->BindBufferMemory2(args->device, args->bindInfoCount, args->pBindInfos);
}
static void
@@ -85,9 +100,10 @@ vkr_dispatch_vkGetBufferOpaqueCaptureAddress(
struct vn_command_vkGetBufferOpaqueCaptureAddress *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkGetBufferOpaqueCaptureAddress_args_handle(args);
- args->ret = dev->GetBufferOpaqueCaptureAddress(args->device, args->pInfo);
+ args->ret = vk->GetBufferOpaqueCaptureAddress(args->device, args->pInfo);
}
static void
@@ -95,9 +111,10 @@ vkr_dispatch_vkGetBufferDeviceAddress(UNUSED struct vn_dispatch_context *dispatc
struct vn_command_vkGetBufferDeviceAddress *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkGetBufferDeviceAddress_args_handle(args);
- args->ret = dev->GetBufferDeviceAddress(args->device, args->pInfo);
+ args->ret = vk->GetBufferDeviceAddress(args->device, args->pInfo);
}
static void
@@ -114,6 +131,19 @@ vkr_dispatch_vkDestroyBufferView(struct vn_dispatch_context *dispatch,
vkr_buffer_view_destroy_and_remove(dispatch->data, args);
}
+static void
+vkr_dispatch_vkGetDeviceBufferMemoryRequirements(
+ UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkGetDeviceBufferMemoryRequirements *args)
+{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
+ vn_replace_vkGetDeviceBufferMemoryRequirements_args_handle(args);
+ vk->GetDeviceBufferMemoryRequirements(args->device, args->pInfo,
+ args->pMemoryRequirements);
+}
+
void
vkr_context_init_buffer_dispatch(struct vkr_context *ctx)
{
@@ -130,6 +160,8 @@ vkr_context_init_buffer_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkGetBufferOpaqueCaptureAddress =
vkr_dispatch_vkGetBufferOpaqueCaptureAddress;
dispatch->dispatch_vkGetBufferDeviceAddress = vkr_dispatch_vkGetBufferDeviceAddress;
+ dispatch->dispatch_vkGetDeviceBufferMemoryRequirements =
+ vkr_dispatch_vkGetDeviceBufferMemoryRequirements;
}
void
diff --git a/src/venus/vkr_command_buffer.c b/src/venus/vkr_command_buffer.c
index 8ea426b1..abb2befe 100644
--- a/src/venus/vkr_command_buffer.c
+++ b/src/venus/vkr_command_buffer.c
@@ -7,6 +7,20 @@
#include "vkr_command_buffer_gen.h"
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#endif
+
+#define VKR_CMD_CALL(cmd_name, args, ...) \
+ do { \
+ struct vkr_command_buffer *_cmd = \
+ vkr_command_buffer_from_handle(args->commandBuffer); \
+ struct vn_device_proc_table *_vk = &_cmd->device->proc_table; \
+ \
+ vn_replace_vk##cmd_name##_args_handle(args); \
+ _vk->cmd_name(args->commandBuffer, ##__VA_ARGS__); \
+ } while (0)
+
static void
vkr_dispatch_vkCreateCommandPool(struct vn_dispatch_context *dispatch,
struct vn_command_vkCreateCommandPool *args)
@@ -28,8 +42,7 @@ vkr_dispatch_vkDestroyCommandPool(struct vn_dispatch_context *dispatch,
if (!pool)
return;
- vkr_context_remove_objects(ctx, &pool->command_buffers);
-
+ vkr_command_pool_release(ctx, pool);
vkr_command_pool_destroy_and_remove(ctx, args);
}
@@ -37,16 +50,22 @@ static void
vkr_dispatch_vkResetCommandPool(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkResetCommandPool *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkResetCommandPool_args_handle(args);
- args->ret = vkResetCommandPool(args->device, args->commandPool, args->flags);
+ args->ret = vk->ResetCommandPool(args->device, args->commandPool, args->flags);
}
static void
vkr_dispatch_vkTrimCommandPool(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkTrimCommandPool *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkTrimCommandPool_args_handle(args);
- vkTrimCommandPool(args->device, args->commandPool, args->flags);
+ vk->TrimCommandPool(args->device, args->commandPool, args->flags);
}
static void
@@ -91,262 +110,276 @@ static void
vkr_dispatch_vkResetCommandBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkResetCommandBuffer *args)
{
+ struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
+ struct vn_device_proc_table *vk = &cmd->device->proc_table;
+
vn_replace_vkResetCommandBuffer_args_handle(args);
- args->ret = vkResetCommandBuffer(args->commandBuffer, args->flags);
+ args->ret = vk->ResetCommandBuffer(args->commandBuffer, args->flags);
}
static void
vkr_dispatch_vkBeginCommandBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkBeginCommandBuffer *args)
{
+ struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
+ struct vn_device_proc_table *vk = &cmd->device->proc_table;
+
vn_replace_vkBeginCommandBuffer_args_handle(args);
- args->ret = vkBeginCommandBuffer(args->commandBuffer, args->pBeginInfo);
+ args->ret = vk->BeginCommandBuffer(args->commandBuffer, args->pBeginInfo);
}
static void
vkr_dispatch_vkEndCommandBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkEndCommandBuffer *args)
{
+ struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
+ struct vn_device_proc_table *vk = &cmd->device->proc_table;
+
vn_replace_vkEndCommandBuffer_args_handle(args);
- args->ret = vkEndCommandBuffer(args->commandBuffer);
+ args->ret = vk->EndCommandBuffer(args->commandBuffer);
}
static void
vkr_dispatch_vkCmdBindPipeline(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBindPipeline *args)
{
- vn_replace_vkCmdBindPipeline_args_handle(args);
- vkCmdBindPipeline(args->commandBuffer, args->pipelineBindPoint, args->pipeline);
+ VKR_CMD_CALL(CmdBindPipeline, args, args->pipelineBindPoint, args->pipeline);
}
static void
vkr_dispatch_vkCmdSetViewport(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetViewport *args)
{
- vn_replace_vkCmdSetViewport_args_handle(args);
- vkCmdSetViewport(args->commandBuffer, args->firstViewport, args->viewportCount,
- args->pViewports);
+ VKR_CMD_CALL(CmdSetViewport, args, args->firstViewport, args->viewportCount,
+ args->pViewports);
}
static void
vkr_dispatch_vkCmdSetScissor(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetScissor *args)
{
- vn_replace_vkCmdSetScissor_args_handle(args);
- vkCmdSetScissor(args->commandBuffer, args->firstScissor, args->scissorCount,
- args->pScissors);
+ VKR_CMD_CALL(CmdSetScissor, args, args->firstScissor, args->scissorCount,
+ args->pScissors);
}
static void
vkr_dispatch_vkCmdSetLineWidth(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetLineWidth *args)
{
- vn_replace_vkCmdSetLineWidth_args_handle(args);
- vkCmdSetLineWidth(args->commandBuffer, args->lineWidth);
+ VKR_CMD_CALL(CmdSetLineWidth, args, args->lineWidth);
}
static void
vkr_dispatch_vkCmdSetDepthBias(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetDepthBias *args)
{
- vn_replace_vkCmdSetDepthBias_args_handle(args);
- vkCmdSetDepthBias(args->commandBuffer, args->depthBiasConstantFactor,
- args->depthBiasClamp, args->depthBiasSlopeFactor);
+ VKR_CMD_CALL(CmdSetDepthBias, args, args->depthBiasConstantFactor,
+ args->depthBiasClamp, args->depthBiasSlopeFactor);
}
static void
vkr_dispatch_vkCmdSetBlendConstants(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetBlendConstants *args)
{
- vn_replace_vkCmdSetBlendConstants_args_handle(args);
- vkCmdSetBlendConstants(args->commandBuffer, args->blendConstants);
+ VKR_CMD_CALL(CmdSetBlendConstants, args, args->blendConstants);
}
static void
vkr_dispatch_vkCmdSetDepthBounds(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetDepthBounds *args)
{
- vn_replace_vkCmdSetDepthBounds_args_handle(args);
- vkCmdSetDepthBounds(args->commandBuffer, args->minDepthBounds, args->maxDepthBounds);
+ VKR_CMD_CALL(CmdSetDepthBounds, args, args->minDepthBounds, args->maxDepthBounds);
}
static void
vkr_dispatch_vkCmdSetStencilCompareMask(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetStencilCompareMask *args)
{
- vn_replace_vkCmdSetStencilCompareMask_args_handle(args);
- vkCmdSetStencilCompareMask(args->commandBuffer, args->faceMask, args->compareMask);
+ VKR_CMD_CALL(CmdSetStencilCompareMask, args, args->faceMask, args->compareMask);
}
static void
vkr_dispatch_vkCmdSetStencilWriteMask(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetStencilWriteMask *args)
{
- vn_replace_vkCmdSetStencilWriteMask_args_handle(args);
- vkCmdSetStencilWriteMask(args->commandBuffer, args->faceMask, args->writeMask);
+ VKR_CMD_CALL(CmdSetStencilWriteMask, args, args->faceMask, args->writeMask);
}
static void
vkr_dispatch_vkCmdSetStencilReference(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetStencilReference *args)
{
- vn_replace_vkCmdSetStencilReference_args_handle(args);
- vkCmdSetStencilReference(args->commandBuffer, args->faceMask, args->reference);
+ VKR_CMD_CALL(CmdSetStencilReference, args, args->faceMask, args->reference);
}
static void
vkr_dispatch_vkCmdBindDescriptorSets(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBindDescriptorSets *args)
{
- vn_replace_vkCmdBindDescriptorSets_args_handle(args);
- vkCmdBindDescriptorSets(args->commandBuffer, args->pipelineBindPoint, args->layout,
- args->firstSet, args->descriptorSetCount,
- args->pDescriptorSets, args->dynamicOffsetCount,
- args->pDynamicOffsets);
+ VKR_CMD_CALL(CmdBindDescriptorSets, args, args->pipelineBindPoint, args->layout,
+ args->firstSet, args->descriptorSetCount, args->pDescriptorSets,
+ args->dynamicOffsetCount, args->pDynamicOffsets);
}
static void
vkr_dispatch_vkCmdBindIndexBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBindIndexBuffer *args)
{
- vn_replace_vkCmdBindIndexBuffer_args_handle(args);
- vkCmdBindIndexBuffer(args->commandBuffer, args->buffer, args->offset, args->indexType);
+ VKR_CMD_CALL(CmdBindIndexBuffer, args, args->buffer, args->offset, args->indexType);
}
static void
vkr_dispatch_vkCmdBindVertexBuffers(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBindVertexBuffers *args)
{
- vn_replace_vkCmdBindVertexBuffers_args_handle(args);
- vkCmdBindVertexBuffers(args->commandBuffer, args->firstBinding, args->bindingCount,
- args->pBuffers, args->pOffsets);
+ VKR_CMD_CALL(CmdBindVertexBuffers, args, args->firstBinding, args->bindingCount,
+ args->pBuffers, args->pOffsets);
}
static void
vkr_dispatch_vkCmdDraw(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDraw *args)
{
- vn_replace_vkCmdDraw_args_handle(args);
- vkCmdDraw(args->commandBuffer, args->vertexCount, args->instanceCount,
- args->firstVertex, args->firstInstance);
+ VKR_CMD_CALL(CmdDraw, args, args->vertexCount, args->instanceCount, args->firstVertex,
+ args->firstInstance);
}
static void
vkr_dispatch_vkCmdDrawIndexed(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDrawIndexed *args)
{
- vn_replace_vkCmdDrawIndexed_args_handle(args);
- vkCmdDrawIndexed(args->commandBuffer, args->indexCount, args->instanceCount,
- args->firstIndex, args->vertexOffset, args->firstInstance);
+ VKR_CMD_CALL(CmdDrawIndexed, args, args->indexCount, args->instanceCount,
+ args->firstIndex, args->vertexOffset, args->firstInstance);
}
static void
vkr_dispatch_vkCmdDrawIndirect(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDrawIndirect *args)
{
- vn_replace_vkCmdDrawIndirect_args_handle(args);
- vkCmdDrawIndirect(args->commandBuffer, args->buffer, args->offset, args->drawCount,
- args->stride);
+ VKR_CMD_CALL(CmdDrawIndirect, args, args->buffer, args->offset, args->drawCount,
+ args->stride);
}
static void
vkr_dispatch_vkCmdDrawIndexedIndirect(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDrawIndexedIndirect *args)
{
- vn_replace_vkCmdDrawIndexedIndirect_args_handle(args);
- vkCmdDrawIndexedIndirect(args->commandBuffer, args->buffer, args->offset,
- args->drawCount, args->stride);
+ VKR_CMD_CALL(CmdDrawIndexedIndirect, args, args->buffer, args->offset, args->drawCount,
+ args->stride);
}
static void
vkr_dispatch_vkCmdDispatch(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDispatch *args)
{
- vn_replace_vkCmdDispatch_args_handle(args);
- vkCmdDispatch(args->commandBuffer, args->groupCountX, args->groupCountY,
- args->groupCountZ);
+ VKR_CMD_CALL(CmdDispatch, args, args->groupCountX, args->groupCountY,
+ args->groupCountZ);
}
static void
vkr_dispatch_vkCmdDispatchIndirect(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDispatchIndirect *args)
{
- vn_replace_vkCmdDispatchIndirect_args_handle(args);
- vkCmdDispatchIndirect(args->commandBuffer, args->buffer, args->offset);
+ VKR_CMD_CALL(CmdDispatchIndirect, args, args->buffer, args->offset);
}
static void
vkr_dispatch_vkCmdCopyBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdCopyBuffer *args)
{
- vn_replace_vkCmdCopyBuffer_args_handle(args);
- vkCmdCopyBuffer(args->commandBuffer, args->srcBuffer, args->dstBuffer,
- args->regionCount, args->pRegions);
+ VKR_CMD_CALL(CmdCopyBuffer, args, args->srcBuffer, args->dstBuffer, args->regionCount,
+ args->pRegions);
+}
+
+static void
+vkr_dispatch_vkCmdCopyBuffer2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdCopyBuffer2 *args)
+{
+ VKR_CMD_CALL(CmdCopyBuffer2, args, args->pCopyBufferInfo);
}
static void
vkr_dispatch_vkCmdCopyImage(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdCopyImage *args)
{
- vn_replace_vkCmdCopyImage_args_handle(args);
- vkCmdCopyImage(args->commandBuffer, args->srcImage, args->srcImageLayout,
- args->dstImage, args->dstImageLayout, args->regionCount,
- args->pRegions);
+ VKR_CMD_CALL(CmdCopyImage, args, args->srcImage, args->srcImageLayout, args->dstImage,
+ args->dstImageLayout, args->regionCount, args->pRegions);
+}
+
+static void
+vkr_dispatch_vkCmdCopyImage2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdCopyImage2 *args)
+{
+ VKR_CMD_CALL(CmdCopyImage2, args, args->pCopyImageInfo);
}
static void
vkr_dispatch_vkCmdBlitImage(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBlitImage *args)
{
- vn_replace_vkCmdBlitImage_args_handle(args);
- vkCmdBlitImage(args->commandBuffer, args->srcImage, args->srcImageLayout,
- args->dstImage, args->dstImageLayout, args->regionCount, args->pRegions,
- args->filter);
+ VKR_CMD_CALL(CmdBlitImage, args, args->srcImage, args->srcImageLayout, args->dstImage,
+ args->dstImageLayout, args->regionCount, args->pRegions, args->filter);
+}
+
+static void
+vkr_dispatch_vkCmdBlitImage2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdBlitImage2 *args)
+{
+ VKR_CMD_CALL(CmdBlitImage2, args, args->pBlitImageInfo);
}
static void
vkr_dispatch_vkCmdCopyBufferToImage(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdCopyBufferToImage *args)
{
- vn_replace_vkCmdCopyBufferToImage_args_handle(args);
- vkCmdCopyBufferToImage(args->commandBuffer, args->srcBuffer, args->dstImage,
- args->dstImageLayout, args->regionCount, args->pRegions);
+ VKR_CMD_CALL(CmdCopyBufferToImage, args, args->srcBuffer, args->dstImage,
+ args->dstImageLayout, args->regionCount, args->pRegions);
+}
+
+static void
+vkr_dispatch_vkCmdCopyBufferToImage2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdCopyBufferToImage2 *args)
+{
+ VKR_CMD_CALL(CmdCopyBufferToImage2, args, args->pCopyBufferToImageInfo);
}
static void
vkr_dispatch_vkCmdCopyImageToBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdCopyImageToBuffer *args)
{
- vn_replace_vkCmdCopyImageToBuffer_args_handle(args);
- vkCmdCopyImageToBuffer(args->commandBuffer, args->srcImage, args->srcImageLayout,
- args->dstBuffer, args->regionCount, args->pRegions);
+ VKR_CMD_CALL(CmdCopyImageToBuffer, args, args->srcImage, args->srcImageLayout,
+ args->dstBuffer, args->regionCount, args->pRegions);
+}
+
+static void
+vkr_dispatch_vkCmdCopyImageToBuffer2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdCopyImageToBuffer2 *args)
+{
+ VKR_CMD_CALL(CmdCopyImageToBuffer2, args, args->pCopyImageToBufferInfo);
}
static void
vkr_dispatch_vkCmdUpdateBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdUpdateBuffer *args)
{
- vn_replace_vkCmdUpdateBuffer_args_handle(args);
- vkCmdUpdateBuffer(args->commandBuffer, args->dstBuffer, args->dstOffset,
- args->dataSize, args->pData);
+ VKR_CMD_CALL(CmdUpdateBuffer, args, args->dstBuffer, args->dstOffset, args->dataSize,
+ args->pData);
}
static void
vkr_dispatch_vkCmdFillBuffer(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdFillBuffer *args)
{
- vn_replace_vkCmdFillBuffer_args_handle(args);
- vkCmdFillBuffer(args->commandBuffer, args->dstBuffer, args->dstOffset, args->size,
- args->data);
+ VKR_CMD_CALL(CmdFillBuffer, args, args->dstBuffer, args->dstOffset, args->size,
+ args->data);
}
static void
vkr_dispatch_vkCmdClearColorImage(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdClearColorImage *args)
{
- vn_replace_vkCmdClearColorImage_args_handle(args);
- vkCmdClearColorImage(args->commandBuffer, args->image, args->imageLayout, args->pColor,
- args->rangeCount, args->pRanges);
+ VKR_CMD_CALL(CmdClearColorImage, args, args->image, args->imageLayout, args->pColor,
+ args->rangeCount, args->pRanges);
}
static void
@@ -354,216 +387,187 @@ vkr_dispatch_vkCmdClearDepthStencilImage(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdClearDepthStencilImage *args)
{
- vn_replace_vkCmdClearDepthStencilImage_args_handle(args);
- vkCmdClearDepthStencilImage(args->commandBuffer, args->image, args->imageLayout,
- args->pDepthStencil, args->rangeCount, args->pRanges);
+ VKR_CMD_CALL(CmdClearDepthStencilImage, args, args->image, args->imageLayout,
+ args->pDepthStencil, args->rangeCount, args->pRanges);
}
static void
vkr_dispatch_vkCmdClearAttachments(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdClearAttachments *args)
{
- vn_replace_vkCmdClearAttachments_args_handle(args);
- vkCmdClearAttachments(args->commandBuffer, args->attachmentCount, args->pAttachments,
- args->rectCount, args->pRects);
+ VKR_CMD_CALL(CmdClearAttachments, args, args->attachmentCount, args->pAttachments,
+ args->rectCount, args->pRects);
}
static void
vkr_dispatch_vkCmdResolveImage(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdResolveImage *args)
{
- vn_replace_vkCmdResolveImage_args_handle(args);
- vkCmdResolveImage(args->commandBuffer, args->srcImage, args->srcImageLayout,
- args->dstImage, args->dstImageLayout, args->regionCount,
- args->pRegions);
+ VKR_CMD_CALL(CmdResolveImage, args, args->srcImage, args->srcImageLayout,
+ args->dstImage, args->dstImageLayout, args->regionCount, args->pRegions);
+}
+
+static void
+vkr_dispatch_vkCmdResolveImage2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdResolveImage2 *args)
+{
+ VKR_CMD_CALL(CmdResolveImage2, args, args->pResolveImageInfo);
}
static void
vkr_dispatch_vkCmdSetEvent(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetEvent *args)
{
- vn_replace_vkCmdSetEvent_args_handle(args);
- vkCmdSetEvent(args->commandBuffer, args->event, args->stageMask);
+ VKR_CMD_CALL(CmdSetEvent, args, args->event, args->stageMask);
}
static void
vkr_dispatch_vkCmdResetEvent(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdResetEvent *args)
{
- vn_replace_vkCmdResetEvent_args_handle(args);
- vkCmdResetEvent(args->commandBuffer, args->event, args->stageMask);
+ VKR_CMD_CALL(CmdResetEvent, args, args->event, args->stageMask);
}
static void
vkr_dispatch_vkCmdWaitEvents(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdWaitEvents *args)
{
- vn_replace_vkCmdWaitEvents_args_handle(args);
- vkCmdWaitEvents(args->commandBuffer, args->eventCount, args->pEvents,
- args->srcStageMask, args->dstStageMask, args->memoryBarrierCount,
- args->pMemoryBarriers, args->bufferMemoryBarrierCount,
- args->pBufferMemoryBarriers, args->imageMemoryBarrierCount,
- args->pImageMemoryBarriers);
+ VKR_CMD_CALL(CmdWaitEvents, args, args->eventCount, args->pEvents, args->srcStageMask,
+ args->dstStageMask, args->memoryBarrierCount, args->pMemoryBarriers,
+ args->bufferMemoryBarrierCount, args->pBufferMemoryBarriers,
+ args->imageMemoryBarrierCount, args->pImageMemoryBarriers);
}
static void
vkr_dispatch_vkCmdPipelineBarrier(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdPipelineBarrier *args)
{
- vn_replace_vkCmdPipelineBarrier_args_handle(args);
- vkCmdPipelineBarrier(args->commandBuffer, args->srcStageMask, args->dstStageMask,
- args->dependencyFlags, args->memoryBarrierCount,
- args->pMemoryBarriers, args->bufferMemoryBarrierCount,
- args->pBufferMemoryBarriers, args->imageMemoryBarrierCount,
- args->pImageMemoryBarriers);
+ VKR_CMD_CALL(CmdPipelineBarrier, args, args->srcStageMask, args->dstStageMask,
+ args->dependencyFlags, args->memoryBarrierCount, args->pMemoryBarriers,
+ args->bufferMemoryBarrierCount, args->pBufferMemoryBarriers,
+ args->imageMemoryBarrierCount, args->pImageMemoryBarriers);
}
static void
vkr_dispatch_vkCmdBeginQuery(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBeginQuery *args)
{
- vn_replace_vkCmdBeginQuery_args_handle(args);
- vkCmdBeginQuery(args->commandBuffer, args->queryPool, args->query, args->flags);
+ VKR_CMD_CALL(CmdBeginQuery, args, args->queryPool, args->query, args->flags);
}
static void
vkr_dispatch_vkCmdEndQuery(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdEndQuery *args)
{
- vn_replace_vkCmdEndQuery_args_handle(args);
- vkCmdEndQuery(args->commandBuffer, args->queryPool, args->query);
+ VKR_CMD_CALL(CmdEndQuery, args, args->queryPool, args->query);
}
static void
vkr_dispatch_vkCmdResetQueryPool(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdResetQueryPool *args)
{
- vn_replace_vkCmdResetQueryPool_args_handle(args);
- vkCmdResetQueryPool(args->commandBuffer, args->queryPool, args->firstQuery,
- args->queryCount);
+ VKR_CMD_CALL(CmdResetQueryPool, args, args->queryPool, args->firstQuery,
+ args->queryCount);
}
static void
vkr_dispatch_vkCmdWriteTimestamp(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdWriteTimestamp *args)
{
- vn_replace_vkCmdWriteTimestamp_args_handle(args);
- vkCmdWriteTimestamp(args->commandBuffer, args->pipelineStage, args->queryPool,
- args->query);
+ VKR_CMD_CALL(CmdWriteTimestamp, args, args->pipelineStage, args->queryPool,
+ args->query);
}
static void
vkr_dispatch_vkCmdCopyQueryPoolResults(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdCopyQueryPoolResults *args)
{
- vn_replace_vkCmdCopyQueryPoolResults_args_handle(args);
- vkCmdCopyQueryPoolResults(args->commandBuffer, args->queryPool, args->firstQuery,
- args->queryCount, args->dstBuffer, args->dstOffset,
- args->stride, args->flags);
+ VKR_CMD_CALL(CmdCopyQueryPoolResults, args, args->queryPool, args->firstQuery,
+ args->queryCount, args->dstBuffer, args->dstOffset, args->stride,
+ args->flags);
}
static void
vkr_dispatch_vkCmdPushConstants(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdPushConstants *args)
{
- vn_replace_vkCmdPushConstants_args_handle(args);
- vkCmdPushConstants(args->commandBuffer, args->layout, args->stageFlags, args->offset,
- args->size, args->pValues);
+ VKR_CMD_CALL(CmdPushConstants, args, args->layout, args->stageFlags, args->offset,
+ args->size, args->pValues);
}
static void
vkr_dispatch_vkCmdBeginRenderPass(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBeginRenderPass *args)
{
- vn_replace_vkCmdBeginRenderPass_args_handle(args);
- vkCmdBeginRenderPass(args->commandBuffer, args->pRenderPassBegin, args->contents);
+ VKR_CMD_CALL(CmdBeginRenderPass, args, args->pRenderPassBegin, args->contents);
}
static void
vkr_dispatch_vkCmdNextSubpass(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdNextSubpass *args)
{
- vn_replace_vkCmdNextSubpass_args_handle(args);
- vkCmdNextSubpass(args->commandBuffer, args->contents);
+ VKR_CMD_CALL(CmdNextSubpass, args, args->contents);
}
static void
vkr_dispatch_vkCmdEndRenderPass(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdEndRenderPass *args)
{
- vn_replace_vkCmdEndRenderPass_args_handle(args);
- vkCmdEndRenderPass(args->commandBuffer);
+ VKR_CMD_CALL(CmdEndRenderPass, args);
}
static void
vkr_dispatch_vkCmdExecuteCommands(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdExecuteCommands *args)
{
- vn_replace_vkCmdExecuteCommands_args_handle(args);
- vkCmdExecuteCommands(args->commandBuffer, args->commandBufferCount,
- args->pCommandBuffers);
+ VKR_CMD_CALL(CmdExecuteCommands, args, args->commandBufferCount,
+ args->pCommandBuffers);
}
static void
vkr_dispatch_vkCmdSetDeviceMask(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdSetDeviceMask *args)
{
- vn_replace_vkCmdSetDeviceMask_args_handle(args);
- vkCmdSetDeviceMask(args->commandBuffer, args->deviceMask);
+ VKR_CMD_CALL(CmdSetDeviceMask, args, args->deviceMask);
}
static void
vkr_dispatch_vkCmdDispatchBase(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDispatchBase *args)
{
- vn_replace_vkCmdDispatchBase_args_handle(args);
- vkCmdDispatchBase(args->commandBuffer, args->baseGroupX, args->baseGroupY,
- args->baseGroupZ, args->groupCountX, args->groupCountY,
- args->groupCountZ);
+ VKR_CMD_CALL(CmdDispatchBase, args, args->baseGroupX, args->baseGroupY,
+ args->baseGroupZ, args->groupCountX, args->groupCountY,
+ args->groupCountZ);
}
static void
vkr_dispatch_vkCmdBeginRenderPass2(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBeginRenderPass2 *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdBeginRenderPass2_args_handle(args);
- cmd->device->CmdBeginRenderPass2(args->commandBuffer, args->pRenderPassBegin,
- args->pSubpassBeginInfo);
+ VKR_CMD_CALL(CmdBeginRenderPass2, args, args->pRenderPassBegin,
+ args->pSubpassBeginInfo);
}
static void
vkr_dispatch_vkCmdNextSubpass2(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdNextSubpass2 *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdNextSubpass2_args_handle(args);
- cmd->device->CmdNextSubpass2(args->commandBuffer, args->pSubpassBeginInfo,
- args->pSubpassEndInfo);
+ VKR_CMD_CALL(CmdNextSubpass2, args, args->pSubpassBeginInfo, args->pSubpassEndInfo);
}
static void
vkr_dispatch_vkCmdEndRenderPass2(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdEndRenderPass2 *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdEndRenderPass2_args_handle(args);
- cmd->device->CmdEndRenderPass2(args->commandBuffer, args->pSubpassEndInfo);
+ VKR_CMD_CALL(CmdEndRenderPass2, args, args->pSubpassEndInfo);
}
static void
vkr_dispatch_vkCmdDrawIndirectCount(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDrawIndirectCount *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdDrawIndirectCount_args_handle(args);
- cmd->device->CmdDrawIndirectCount(args->commandBuffer, args->buffer, args->offset,
- args->countBuffer, args->countBufferOffset,
- args->maxDrawCount, args->stride);
+ VKR_CMD_CALL(CmdDrawIndirectCount, args, args->buffer, args->offset, args->countBuffer,
+ args->countBufferOffset, args->maxDrawCount, args->stride);
}
static void
@@ -571,12 +575,17 @@ vkr_dispatch_vkCmdDrawIndexedIndirectCount(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDrawIndexedIndirectCount *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
+ VKR_CMD_CALL(CmdDrawIndexedIndirectCount, args, args->buffer, args->offset,
+ args->countBuffer, args->countBufferOffset, args->maxDrawCount,
+ args->stride);
+}
- vn_replace_vkCmdDrawIndexedIndirectCount_args_handle(args);
- cmd->device->CmdDrawIndexedIndirectCount(
- args->commandBuffer, args->buffer, args->offset, args->countBuffer,
- args->countBufferOffset, args->maxDrawCount, args->stride);
+static void
+vkr_dispatch_vkCmdSetLineStippleEXT(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetLineStippleEXT *args)
+{
+ VKR_CMD_CALL(CmdSetLineStippleEXT, args, args->lineStippleFactor,
+ args->lineStipplePattern);
}
static void
@@ -584,12 +593,8 @@ vkr_dispatch_vkCmdBindTransformFeedbackBuffersEXT(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBindTransformFeedbackBuffersEXT *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdBindTransformFeedbackBuffersEXT_args_handle(args);
- cmd->device->cmd_bind_transform_feedback_buffers(
- args->commandBuffer, args->firstBinding, args->bindingCount, args->pBuffers,
- args->pOffsets, args->pSizes);
+ VKR_CMD_CALL(CmdBindTransformFeedbackBuffersEXT, args, args->firstBinding,
+ args->bindingCount, args->pBuffers, args->pOffsets, args->pSizes);
}
static void
@@ -597,12 +602,9 @@ vkr_dispatch_vkCmdBeginTransformFeedbackEXT(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBeginTransformFeedbackEXT *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdBeginTransformFeedbackEXT_args_handle(args);
- cmd->device->cmd_begin_transform_feedback(
- args->commandBuffer, args->firstCounterBuffer, args->counterBufferCount,
- args->pCounterBuffers, args->pCounterBufferOffsets);
+ VKR_CMD_CALL(CmdBeginTransformFeedbackEXT, args, args->firstCounterBuffer,
+ args->counterBufferCount, args->pCounterBuffers,
+ args->pCounterBufferOffsets);
}
static void
@@ -610,34 +612,24 @@ vkr_dispatch_vkCmdEndTransformFeedbackEXT(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdEndTransformFeedbackEXT *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdEndTransformFeedbackEXT_args_handle(args);
- cmd->device->cmd_end_transform_feedback(
- args->commandBuffer, args->firstCounterBuffer, args->counterBufferCount,
- args->pCounterBuffers, args->pCounterBufferOffsets);
+ VKR_CMD_CALL(CmdEndTransformFeedbackEXT, args, args->firstCounterBuffer,
+ args->counterBufferCount, args->pCounterBuffers,
+ args->pCounterBufferOffsets);
}
static void
vkr_dispatch_vkCmdBeginQueryIndexedEXT(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdBeginQueryIndexedEXT *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdBeginQueryIndexedEXT_args_handle(args);
- cmd->device->cmd_begin_query_indexed(args->commandBuffer, args->queryPool, args->query,
- args->flags, args->index);
+ VKR_CMD_CALL(CmdBeginQueryIndexedEXT, args, args->queryPool, args->query, args->flags,
+ args->index);
}
static void
vkr_dispatch_vkCmdEndQueryIndexedEXT(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdEndQueryIndexedEXT *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
-
- vn_replace_vkCmdEndQueryIndexedEXT_args_handle(args);
- cmd->device->cmd_end_query_indexed(args->commandBuffer, args->queryPool, args->query,
- args->index);
+ VKR_CMD_CALL(CmdEndQueryIndexedEXT, args, args->queryPool, args->query, args->index);
}
static void
@@ -645,12 +637,225 @@ vkr_dispatch_vkCmdDrawIndirectByteCountEXT(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkCmdDrawIndirectByteCountEXT *args)
{
- struct vkr_command_buffer *cmd = vkr_command_buffer_from_handle(args->commandBuffer);
+ VKR_CMD_CALL(CmdDrawIndirectByteCountEXT, args, args->instanceCount,
+ args->firstInstance, args->counterBuffer, args->counterBufferOffset,
+ args->counterOffset, args->vertexStride);
+}
+
+static void
+vkr_dispatch_vkCmdBindVertexBuffers2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdBindVertexBuffers2 *args)
+{
+ VKR_CMD_CALL(CmdBindVertexBuffers2, args, args->firstBinding, args->bindingCount,
+ args->pBuffers, args->pOffsets, args->pSizes, args->pStrides);
+}
+
+static void
+vkr_dispatch_vkCmdSetCullMode(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetCullMode *args)
+{
+ VKR_CMD_CALL(CmdSetCullMode, args, args->cullMode);
+}
- vn_replace_vkCmdDrawIndirectByteCountEXT_args_handle(args);
- cmd->device->cmd_draw_indirect_byte_count(
- args->commandBuffer, args->instanceCount, args->firstInstance, args->counterBuffer,
- args->counterBufferOffset, args->counterOffset, args->vertexStride);
+static void
+vkr_dispatch_vkCmdSetDepthBoundsTestEnable(
+ UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetDepthBoundsTestEnable *args)
+{
+ VKR_CMD_CALL(CmdSetDepthBoundsTestEnable, args, args->depthBoundsTestEnable);
+}
+
+static void
+vkr_dispatch_vkCmdSetDepthCompareOp(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetDepthCompareOp *args)
+{
+ VKR_CMD_CALL(CmdSetDepthCompareOp, args, args->depthCompareOp);
+}
+
+static void
+vkr_dispatch_vkCmdSetDepthTestEnable(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetDepthTestEnable *args)
+{
+ VKR_CMD_CALL(CmdSetDepthTestEnable, args, args->depthTestEnable);
+}
+
+static void
+vkr_dispatch_vkCmdSetDepthWriteEnable(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetDepthWriteEnable *args)
+{
+ VKR_CMD_CALL(CmdSetDepthWriteEnable, args, args->depthWriteEnable);
+}
+
+static void
+vkr_dispatch_vkCmdSetFrontFace(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetFrontFace *args)
+{
+ VKR_CMD_CALL(CmdSetFrontFace, args, args->frontFace);
+}
+
+static void
+vkr_dispatch_vkCmdSetPrimitiveTopology(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetPrimitiveTopology *args)
+{
+ VKR_CMD_CALL(CmdSetPrimitiveTopology, args, args->primitiveTopology);
+}
+
+static void
+vkr_dispatch_vkCmdSetScissorWithCount(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetScissorWithCount *args)
+{
+ VKR_CMD_CALL(CmdSetScissorWithCount, args, args->scissorCount, args->pScissors);
+}
+
+static void
+vkr_dispatch_vkCmdSetStencilOp(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetStencilOp *args)
+{
+ VKR_CMD_CALL(CmdSetStencilOp, args, args->faceMask, args->failOp, args->passOp,
+ args->depthFailOp, args->compareOp);
+}
+
+static void
+vkr_dispatch_vkCmdSetStencilTestEnable(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetStencilTestEnable *args)
+{
+ VKR_CMD_CALL(CmdSetStencilTestEnable, args, args->stencilTestEnable);
+}
+
+static void
+vkr_dispatch_vkCmdSetViewportWithCount(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetViewportWithCount *args)
+{
+ VKR_CMD_CALL(CmdSetViewportWithCount, args, args->viewportCount, args->pViewports);
+}
+
+static void
+vkr_dispatch_vkCmdSetDepthBiasEnable(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetDepthBiasEnable *args)
+{
+ VKR_CMD_CALL(CmdSetDepthBiasEnable, args, args->depthBiasEnable);
+}
+
+static void
+vkr_dispatch_vkCmdSetLogicOpEXT(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetLogicOpEXT *args)
+{
+ VKR_CMD_CALL(CmdSetLogicOpEXT, args, args->logicOp);
+}
+
+static void
+vkr_dispatch_vkCmdSetPatchControlPointsEXT(
+ UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetPatchControlPointsEXT *args)
+{
+ VKR_CMD_CALL(CmdSetPatchControlPointsEXT, args, args->patchControlPoints);
+}
+
+static void
+vkr_dispatch_vkCmdSetPrimitiveRestartEnable(
+ UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetPrimitiveRestartEnable *args)
+{
+ VKR_CMD_CALL(CmdSetPrimitiveRestartEnable, args, args->primitiveRestartEnable);
+}
+
+static void
+vkr_dispatch_vkCmdSetRasterizerDiscardEnable(
+ UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdSetRasterizerDiscardEnable *args)
+{
+ VKR_CMD_CALL(CmdSetRasterizerDiscardEnable, args, args->rasterizerDiscardEnable);
+}
+
+static void
+vkr_dispatch_vkCmdBeginConditionalRenderingEXT(
+ UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdBeginConditionalRenderingEXT *args)
+{
+ VKR_CMD_CALL(CmdBeginConditionalRenderingEXT, args, args->pConditionalRenderingBegin);
+}
+
+static void
+vkr_dispatch_vkCmdEndConditionalRenderingEXT(
+ UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdEndConditionalRenderingEXT *args)
+{
+ VKR_CMD_CALL(CmdEndConditionalRenderingEXT, args);
+}
+
+static void
+vkr_dispatch_vkCmdBeginRendering(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdBeginRendering *args)
+{
+ VKR_CMD_CALL(CmdBeginRendering, args, args->pRenderingInfo);
+}
+
+static void
+vkr_dispatch_vkCmdEndRendering(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdEndRendering *args)
+{
+ VKR_CMD_CALL(CmdEndRendering, args);
+}
+
+static void
+vkr_dispatch_vkCmdPipelineBarrier2(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdPipelineBarrier2 *args)
+{
+ VKR_CMD_CALL(CmdPipelineBarrier2, args, args->pDependencyInfo);
+}
+
+static void
+vkr_dispatch_vkCmdResetEvent2(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdResetEvent2 *args)
+{
+ VKR_CMD_CALL(CmdResetEvent2, args, args->event, args->stageMask);
+}
+
+static void
+vkr_dispatch_vkCmdSetEvent2(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdSetEvent2 *args)
+{
+ VKR_CMD_CALL(CmdSetEvent2, args, args->event, args->pDependencyInfo);
+}
+
+static void
+vkr_dispatch_vkCmdWaitEvents2(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdWaitEvents2 *args)
+{
+ VKR_CMD_CALL(CmdWaitEvents2, args, args->eventCount, args->pEvents,
+ args->pDependencyInfos);
+}
+
+static void
+vkr_dispatch_vkCmdWriteTimestamp2(UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkCmdWriteTimestamp2 *args)
+{
+ VKR_CMD_CALL(CmdWriteTimestamp2, args, args->stage, args->queryPool, args->query);
+}
+
+static void
+vkr_dispatch_vkCmdDrawMultiEXT(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdDrawMultiEXT *args)
+{
+ VKR_CMD_CALL(CmdDrawMultiEXT, args, args->drawCount, args->pVertexInfo,
+ args->instanceCount, args->firstInstance, args->stride);
+}
+
+static void
+vkr_dispatch_vkCmdDrawMultiIndexedEXT(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdDrawMultiIndexedEXT *args)
+{
+ VKR_CMD_CALL(CmdDrawMultiIndexedEXT, args, args->drawCount, args->pIndexInfo,
+ args->instanceCount, args->firstInstance, args->stride,
+ args->pVertexOffset);
+}
+
+static void
+vkr_dispatch_vkCmdPushDescriptorSetKHR(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkCmdPushDescriptorSetKHR *args)
+{
+ VKR_CMD_CALL(CmdPushDescriptorSetKHR, args, args->pipelineBindPoint, args->layout,
+ args->set, args->descriptorWriteCount, args->pDescriptorWrites);
}
void
@@ -696,10 +901,15 @@ vkr_context_init_command_buffer_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkCmdDispatch = vkr_dispatch_vkCmdDispatch;
dispatch->dispatch_vkCmdDispatchIndirect = vkr_dispatch_vkCmdDispatchIndirect;
dispatch->dispatch_vkCmdCopyBuffer = vkr_dispatch_vkCmdCopyBuffer;
+ dispatch->dispatch_vkCmdCopyBuffer2 = vkr_dispatch_vkCmdCopyBuffer2;
dispatch->dispatch_vkCmdCopyImage = vkr_dispatch_vkCmdCopyImage;
+ dispatch->dispatch_vkCmdCopyImage2 = vkr_dispatch_vkCmdCopyImage2;
dispatch->dispatch_vkCmdBlitImage = vkr_dispatch_vkCmdBlitImage;
+ dispatch->dispatch_vkCmdBlitImage2 = vkr_dispatch_vkCmdBlitImage2;
dispatch->dispatch_vkCmdCopyBufferToImage = vkr_dispatch_vkCmdCopyBufferToImage;
+ dispatch->dispatch_vkCmdCopyBufferToImage2 = vkr_dispatch_vkCmdCopyBufferToImage2;
dispatch->dispatch_vkCmdCopyImageToBuffer = vkr_dispatch_vkCmdCopyImageToBuffer;
+ dispatch->dispatch_vkCmdCopyImageToBuffer2 = vkr_dispatch_vkCmdCopyImageToBuffer2;
dispatch->dispatch_vkCmdUpdateBuffer = vkr_dispatch_vkCmdUpdateBuffer;
dispatch->dispatch_vkCmdFillBuffer = vkr_dispatch_vkCmdFillBuffer;
dispatch->dispatch_vkCmdClearColorImage = vkr_dispatch_vkCmdClearColorImage;
@@ -707,6 +917,7 @@ vkr_context_init_command_buffer_dispatch(struct vkr_context *ctx)
vkr_dispatch_vkCmdClearDepthStencilImage;
dispatch->dispatch_vkCmdClearAttachments = vkr_dispatch_vkCmdClearAttachments;
dispatch->dispatch_vkCmdResolveImage = vkr_dispatch_vkCmdResolveImage;
+ dispatch->dispatch_vkCmdResolveImage2 = vkr_dispatch_vkCmdResolveImage2;
dispatch->dispatch_vkCmdSetEvent = vkr_dispatch_vkCmdSetEvent;
dispatch->dispatch_vkCmdResetEvent = vkr_dispatch_vkCmdResetEvent;
dispatch->dispatch_vkCmdWaitEvents = vkr_dispatch_vkCmdWaitEvents;
@@ -730,6 +941,8 @@ vkr_context_init_command_buffer_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkCmdDrawIndexedIndirectCount =
vkr_dispatch_vkCmdDrawIndexedIndirectCount;
+ dispatch->dispatch_vkCmdSetLineStippleEXT = vkr_dispatch_vkCmdSetLineStippleEXT;
+
dispatch->dispatch_vkCmdBindTransformFeedbackBuffersEXT =
vkr_dispatch_vkCmdBindTransformFeedbackBuffersEXT;
dispatch->dispatch_vkCmdBeginTransformFeedbackEXT =
@@ -740,4 +953,53 @@ vkr_context_init_command_buffer_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkCmdEndQueryIndexedEXT = vkr_dispatch_vkCmdEndQueryIndexedEXT;
dispatch->dispatch_vkCmdDrawIndirectByteCountEXT =
vkr_dispatch_vkCmdDrawIndirectByteCountEXT;
+
+ dispatch->dispatch_vkCmdBindVertexBuffers2 = vkr_dispatch_vkCmdBindVertexBuffers2;
+ dispatch->dispatch_vkCmdSetCullMode = vkr_dispatch_vkCmdSetCullMode;
+ dispatch->dispatch_vkCmdSetDepthBoundsTestEnable =
+ vkr_dispatch_vkCmdSetDepthBoundsTestEnable;
+ dispatch->dispatch_vkCmdSetDepthCompareOp = vkr_dispatch_vkCmdSetDepthCompareOp;
+ dispatch->dispatch_vkCmdSetDepthTestEnable = vkr_dispatch_vkCmdSetDepthTestEnable;
+ dispatch->dispatch_vkCmdSetDepthWriteEnable = vkr_dispatch_vkCmdSetDepthWriteEnable;
+ dispatch->dispatch_vkCmdSetFrontFace = vkr_dispatch_vkCmdSetFrontFace;
+ dispatch->dispatch_vkCmdSetPrimitiveTopology = vkr_dispatch_vkCmdSetPrimitiveTopology;
+ dispatch->dispatch_vkCmdSetScissorWithCount = vkr_dispatch_vkCmdSetScissorWithCount;
+ dispatch->dispatch_vkCmdSetStencilOp = vkr_dispatch_vkCmdSetStencilOp;
+ dispatch->dispatch_vkCmdSetStencilTestEnable = vkr_dispatch_vkCmdSetStencilTestEnable;
+ dispatch->dispatch_vkCmdSetViewportWithCount = vkr_dispatch_vkCmdSetViewportWithCount;
+
+ /* VK_KHR_dynamic_rendering */
+ dispatch->dispatch_vkCmdBeginRendering = vkr_dispatch_vkCmdBeginRendering;
+ dispatch->dispatch_vkCmdEndRendering = vkr_dispatch_vkCmdEndRendering;
+
+ /* VK_KHR_synchronization2 */
+ dispatch->dispatch_vkCmdPipelineBarrier2 = vkr_dispatch_vkCmdPipelineBarrier2;
+ dispatch->dispatch_vkCmdResetEvent2 = vkr_dispatch_vkCmdResetEvent2;
+ dispatch->dispatch_vkCmdSetEvent2 = vkr_dispatch_vkCmdSetEvent2;
+ dispatch->dispatch_vkCmdWaitEvents2 = vkr_dispatch_vkCmdWaitEvents2;
+ dispatch->dispatch_vkCmdWriteTimestamp2 = vkr_dispatch_vkCmdWriteTimestamp2;
+
+ /* VK_EXT_extended_dynamic_state2 */
+ dispatch->dispatch_vkCmdSetRasterizerDiscardEnable =
+ vkr_dispatch_vkCmdSetRasterizerDiscardEnable;
+ dispatch->dispatch_vkCmdSetPrimitiveRestartEnable =
+ vkr_dispatch_vkCmdSetPrimitiveRestartEnable;
+ dispatch->dispatch_vkCmdSetPatchControlPointsEXT =
+ vkr_dispatch_vkCmdSetPatchControlPointsEXT;
+ dispatch->dispatch_vkCmdSetLogicOpEXT = vkr_dispatch_vkCmdSetLogicOpEXT;
+ dispatch->dispatch_vkCmdSetDepthBiasEnable = vkr_dispatch_vkCmdSetDepthBiasEnable;
+
+ /* VK_EXT_conditional_rendering */
+ dispatch->dispatch_vkCmdBeginConditionalRenderingEXT =
+ vkr_dispatch_vkCmdBeginConditionalRenderingEXT;
+ dispatch->dispatch_vkCmdEndConditionalRenderingEXT =
+ vkr_dispatch_vkCmdEndConditionalRenderingEXT;
+
+ /* VK_EXT_multi_draw */
+ dispatch->dispatch_vkCmdDrawMultiEXT = vkr_dispatch_vkCmdDrawMultiEXT;
+ dispatch->dispatch_vkCmdDrawMultiIndexedEXT = vkr_dispatch_vkCmdDrawMultiIndexedEXT;
+
+ /* VK_KHR_push_descriptor */
+ dispatch->dispatch_vkCmdPushDescriptorSetKHR = vkr_dispatch_vkCmdPushDescriptorSetKHR;
+ dispatch->dispatch_vkCmdPushDescriptorSetWithTemplateKHR = NULL;
}
diff --git a/src/venus/vkr_command_buffer.h b/src/venus/vkr_command_buffer.h
index 9f10a1ee..90b6d384 100644
--- a/src/venus/vkr_command_buffer.h
+++ b/src/venus/vkr_command_buffer.h
@@ -8,6 +8,8 @@
#include "vkr_common.h"
+#include "vkr_context.h"
+
struct vkr_command_pool {
struct vkr_object base;
@@ -28,4 +30,10 @@ vkr_context_init_command_pool_dispatch(struct vkr_context *ctx);
void
vkr_context_init_command_buffer_dispatch(struct vkr_context *ctx);
+static inline void
+vkr_command_pool_release(struct vkr_context *ctx, struct vkr_command_pool *pool)
+{
+ vkr_context_remove_objects(ctx, &pool->command_buffers);
+}
+
#endif /* VKR_COMMAND_BUFFER_H */
diff --git a/src/venus/vkr_common.c b/src/venus/vkr_common.c
index 8a85e135..e3c33627 100644
--- a/src/venus/vkr_common.c
+++ b/src/venus/vkr_common.c
@@ -8,9 +8,121 @@
#include <stdarg.h>
#include <stdio.h>
+#include "venus-protocol/vn_protocol_renderer_info.h"
+
#include "vkr_context.h"
#include "vkr_cs.h"
+static const struct vn_info_extension_table vkr_extension_table = {
+ /* Venus extensions */
+ .EXT_command_serialization = true,
+ .MESA_venus_protocol = true,
+ /* promoted to VK_VERSION_1_1 */
+ .KHR_16bit_storage = true,
+ .KHR_bind_memory2 = true,
+ .KHR_dedicated_allocation = true,
+ .KHR_descriptor_update_template = true,
+ .KHR_device_group = true,
+ .KHR_device_group_creation = true,
+ .KHR_external_fence = true,
+ .KHR_external_fence_capabilities = true,
+ .KHR_external_memory = true,
+ .KHR_external_memory_capabilities = true,
+ .KHR_external_semaphore = true,
+ .KHR_external_semaphore_capabilities = true,
+ .KHR_get_memory_requirements2 = true,
+ .KHR_get_physical_device_properties2 = true,
+ .KHR_maintenance1 = true,
+ .KHR_maintenance2 = true,
+ .KHR_maintenance3 = true,
+ .KHR_multiview = true,
+ .KHR_relaxed_block_layout = true,
+ .KHR_sampler_ycbcr_conversion = true,
+ .KHR_shader_draw_parameters = true,
+ .KHR_storage_buffer_storage_class = true,
+ .KHR_variable_pointers = true,
+ /* promoted to VK_VERSION_1_2 */
+ .KHR_8bit_storage = true,
+ .KHR_buffer_device_address = true,
+ .KHR_create_renderpass2 = true,
+ .KHR_depth_stencil_resolve = true,
+ .KHR_draw_indirect_count = true,
+ .KHR_driver_properties = true,
+ .KHR_image_format_list = true,
+ .KHR_imageless_framebuffer = true,
+ .KHR_sampler_mirror_clamp_to_edge = true,
+ .KHR_separate_depth_stencil_layouts = true,
+ .KHR_shader_atomic_int64 = true,
+ .KHR_shader_float16_int8 = true,
+ .KHR_shader_float_controls = true,
+ .KHR_shader_subgroup_extended_types = true,
+ .KHR_spirv_1_4 = true,
+ .KHR_timeline_semaphore = true,
+ .KHR_uniform_buffer_standard_layout = true,
+ .KHR_vulkan_memory_model = true,
+ .EXT_descriptor_indexing = true,
+ .EXT_host_query_reset = true,
+ .EXT_sampler_filter_minmax = true,
+ .EXT_scalar_block_layout = true,
+ .EXT_separate_stencil_usage = true,
+ .EXT_shader_viewport_index_layer = true,
+ /* promoted to VK_VERSION_1_3 */
+ .KHR_copy_commands2 = true,
+ .KHR_dynamic_rendering = true,
+ .KHR_format_feature_flags2 = false,
+ .KHR_maintenance4 = true,
+ .KHR_shader_integer_dot_product = true,
+ .KHR_shader_non_semantic_info = true,
+ .KHR_shader_terminate_invocation = true,
+ .KHR_synchronization2 = true,
+ .KHR_zero_initialize_workgroup_memory = true,
+ .EXT_4444_formats = true,
+ .EXT_extended_dynamic_state = true,
+ .EXT_extended_dynamic_state2 = true,
+ .EXT_image_robustness = true,
+ .EXT_inline_uniform_block = true,
+ .EXT_pipeline_creation_cache_control = true,
+ .EXT_pipeline_creation_feedback = true,
+ /* TODO(VK_EXT_private_data): Support natively in the guest */
+ .EXT_private_data = true,
+ .EXT_shader_demote_to_helper_invocation = true,
+ .EXT_subgroup_size_control = true,
+ .EXT_texel_buffer_alignment = true,
+ .EXT_texture_compression_astc_hdr = true,
+ .EXT_tooling_info = false, /* implementation in driver */
+ .EXT_ycbcr_2plane_444_formats = true,
+ /* KHR extensions */
+ .KHR_external_fence_fd = true,
+ .KHR_external_memory_fd = true,
+ .KHR_external_semaphore_fd = true,
+ .KHR_push_descriptor = true,
+ /* EXT extensions */
+ .EXT_calibrated_timestamps = true,
+ .EXT_conservative_rasterization = true,
+ .EXT_conditional_rendering = true,
+ .EXT_custom_border_color = true,
+ .EXT_depth_clip_control = true,
+ .EXT_depth_clip_enable = true,
+ .EXT_external_memory_dma_buf = true,
+ .EXT_image_drm_format_modifier = true,
+ .EXT_image_view_min_lod = true,
+ .EXT_index_type_uint8 = true,
+ .EXT_line_rasterization = true,
+ .EXT_multi_draw = true,
+ .EXT_mutable_descriptor_type = true,
+ .EXT_pci_bus_info = true,
+ .EXT_primitive_topology_list_restart = true,
+ .EXT_primitives_generated_query = true,
+ .EXT_provoking_vertex = true,
+ .EXT_queue_family_foreign = true,
+ .EXT_robustness2 = true,
+ .EXT_shader_stencil_export = true,
+ .EXT_transform_feedback = true,
+ .EXT_vertex_attribute_divisor = true,
+ /* vendor extensions */
+ .VALVE_mutable_descriptor_type = true,
+};
+
void
vkr_log(const char *fmt, ...)
{
@@ -44,7 +156,31 @@ vkr_log(const char *fmt, ...)
line[len++] = '\n';
line[len] = '\0';
- virgl_log(line);
+ virgl_log("%s", line);
+}
+
+void
+vkr_extension_table_init(struct vn_info_extension_table *table,
+ const char *const *exts,
+ uint32_t count)
+{
+ memset(table, 0, sizeof(*table));
+ for (uint32_t i = 0; i < count; i++) {
+ const int32_t index = vn_info_extension_index(exts[i]);
+ if (index >= 0)
+ table->enabled[index] = true;
+ }
+}
+
+uint32_t
+vkr_extension_get_spec_version(const char *name)
+{
+ const int32_t index = vn_info_extension_index(name);
+ if (index < 0 || !vkr_extension_table.enabled[index])
+ return 0;
+
+ const struct vn_info_extension *ext = vn_info_extension_get(index);
+ return ext->spec_version;
}
void
@@ -94,5 +230,5 @@ object_array_init(struct vkr_context *ctx,
arr->objects[i] = obj;
}
- return arr;
+ return true;
}
diff --git a/src/venus/vkr_common.h b/src/venus/vkr_common.h
index 2abc8d57..df1ca325 100644
--- a/src/venus/vkr_common.h
+++ b/src/venus/vkr_common.h
@@ -10,6 +10,7 @@
#include <assert.h>
#include <errno.h>
+#include <inttypes.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stddef.h>
@@ -18,14 +19,14 @@
#include <string.h>
#include "c11/threads.h"
-#include "os/os_misc.h"
-#include "os/os_thread.h"
#include "pipe/p_compiler.h"
+#include "util/hash_table.h"
+#include "util/os_misc.h"
#include "util/u_double_list.h"
-#include "util/u_hash_table.h"
#include "util/u_math.h"
#include "util/u_memory.h"
#include "util/u_pointer.h"
+#include "util/u_thread.h"
#include "venus-protocol/vulkan.h"
#include "virgl_util.h"
#include "virglrenderer.h"
@@ -33,15 +34,8 @@
#include "vkr_renderer.h"
-/*
- * TODO what extensions do we need from the host driver?
- *
- * We don't check vkGetPhysicalDeviceExternalBufferProperties, etc. yet. Even
- * if we did, silently adding external memory info to vkCreateBuffer or
- * vkCreateImage could change the results of vkGetBufferMemoryRequirements or
- * vkGetImageMemoryRequirements and confuse the guest.
- */
-#define FORCE_ENABLE_DMABUF
+/* cap instance and device api versions to this */
+#define VKR_MAX_API_VERSION VK_API_VERSION_1_3
#define VKR_DEBUG(category) (unlikely(vkr_debug_flags & VKR_DEBUG_##category))
@@ -65,6 +59,7 @@
.begin = (offset), .end = (offset) + (size) \
}
+struct vn_info_extension_table;
struct vkr_context;
struct vkr_instance;
struct vkr_physical_device;
@@ -163,6 +158,23 @@ extern uint32_t vkr_debug_flags;
void
vkr_log(const char *fmt, ...);
+static inline uint32_t
+vkr_api_version_cap_minor(uint32_t version, uint32_t cap)
+{
+ assert(VK_API_VERSION_MAJOR(version) == VK_API_VERSION_MAJOR(cap));
+ if (VK_API_VERSION_MINOR(version) > VK_API_VERSION_MINOR(cap))
+ version = cap - VK_API_VERSION_PATCH(cap) + VK_API_VERSION_PATCH(version);
+ return version;
+}
+
+void
+vkr_extension_table_init(struct vn_info_extension_table *table,
+ const char *const *exts,
+ uint32_t count);
+
+uint32_t
+vkr_extension_get_spec_version(const char *name);
+
bool
object_array_init(struct vkr_context *ctx,
struct object_array *arr,
@@ -176,13 +188,28 @@ void
object_array_fini(struct object_array *arr);
static inline void *
-vkr_find_pnext(const void *chain, VkStructureType type)
+vkr_find_struct(const void *chain, VkStructureType type)
+{
+ VkBaseOutStructure *s = (VkBaseOutStructure *)chain;
+ while (s) {
+ if (s->sType == type)
+ return s;
+ s = s->pNext;
+ }
+ return NULL;
+}
+
+/*
+ * Find struct in the pNext of chain and return its previous struct.
+ */
+static inline void *
+vkr_find_prev_struct(const void *chain, VkStructureType type)
{
- VkBaseOutStructure *pnext = (VkBaseOutStructure *)chain;
- while (pnext) {
- if (pnext->sType == type)
- return pnext;
- pnext = pnext->pNext;
+ VkBaseOutStructure *prev = (VkBaseOutStructure *)chain;
+ while (prev->pNext) {
+ if (prev->pNext->sType == type)
+ return prev;
+ prev = prev->pNext;
}
return NULL;
}
@@ -258,7 +285,7 @@ vkr_region_size(const struct vkr_region *region)
static inline bool
vkr_region_is_aligned(const struct vkr_region *region, size_t align)
{
- assert(align && util_is_power_of_two(align));
+ assert(util_is_power_of_two_nonzero(align));
return !((region->begin | region->end) & (align - 1));
}
diff --git a/src/venus/vkr_context.c b/src/venus/vkr_context.c
index 1e301d53..26dfaa0d 100644
--- a/src/venus/vkr_context.c
+++ b/src/venus/vkr_context.c
@@ -5,10 +5,16 @@
#include "vkr_context.h"
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
#include "pipe/p_state.h"
+#include "util/anon_file.h"
#include "venus-protocol/vn_protocol_renderer_dispatches.h"
-#include "virgl_protocol.h" /* for transfer_mode */
-#include "vrend_iov.h"
+
+#define XXH_INLINE_ALL
+#include "util/xxhash.h"
#include "vkr_buffer.h"
#include "vkr_command_buffer.h"
@@ -114,28 +120,65 @@ vkr_context_init_dispatch(struct vkr_context *ctx)
vkr_context_init_command_buffer_dispatch(ctx);
}
+static struct vkr_cpu_sync *
+vkr_alloc_cpu_sync(uint32_t flags, uint32_t ring_idx, uint64_t fence_id)
+{
+ struct vkr_cpu_sync *sync;
+ sync = malloc(sizeof(*sync));
+ if (!sync)
+ return NULL;
+
+ sync->flags = flags;
+ sync->fence_id = fence_id;
+ sync->ring_idx = ring_idx;
+ list_inithead(&sync->head);
+
+ return sync;
+}
+
static int
vkr_context_submit_fence_locked(struct virgl_context *base,
uint32_t flags,
- uint64_t queue_id,
- void *fence_cookie)
+ uint32_t ring_idx,
+ uint64_t fence_id)
{
struct vkr_context *ctx = (struct vkr_context *)base;
- struct vkr_queue *queue;
VkResult result;
- queue = util_hash_table_get_u64(ctx->object_table, queue_id);
- if (!queue)
+ if (ring_idx >= ARRAY_SIZE(ctx->sync_queues)) {
+ vkr_log("invalid sync ring_idx %u", ring_idx);
return -EINVAL;
+ }
+
+ if (ring_idx == 0) {
+ if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
+ ctx->base.fence_retire(&ctx->base, ring_idx, fence_id);
+ } else {
+ struct vkr_cpu_sync *sync = vkr_alloc_cpu_sync(flags, ring_idx, fence_id);
+ if (!sync)
+ return -ENOMEM;
+
+ list_addtail(&sync->head, &ctx->signaled_cpu_syncs);
+ }
+ return 0;
+ } else if (!ctx->sync_queues[ring_idx]) {
+ vkr_log("invalid ring_idx %u", ring_idx);
+ return -EINVAL;
+ }
+
+ struct vkr_queue *queue = ctx->sync_queues[ring_idx];
struct vkr_device *dev = queue->device;
+ struct vn_device_proc_table *vk = &dev->proc_table;
struct vkr_queue_sync *sync =
- vkr_device_alloc_queue_sync(dev, flags, queue->base.id, fence_cookie);
+ vkr_device_alloc_queue_sync(dev, flags, ring_idx, fence_id);
if (!sync)
return -ENOMEM;
- result = vkQueueSubmit(queue->base.handle.queue, 0, NULL, sync->fence);
- if (result != VK_SUCCESS) {
+ result = vk->QueueSubmit(queue->base.handle.queue, 0, NULL, sync->fence);
+ if (result == VK_ERROR_DEVICE_LOST) {
+ sync->device_lost = true;
+ } else if (result != VK_SUCCESS) {
vkr_device_free_queue_sync(dev, sync);
return -1;
}
@@ -158,14 +201,18 @@ vkr_context_submit_fence_locked(struct virgl_context *base,
static int
vkr_context_submit_fence(struct virgl_context *base,
uint32_t flags,
- uint64_t queue_id,
- void *fence_cookie)
+ uint32_t ring_idx,
+ uint64_t fence_id)
{
struct vkr_context *ctx = (struct vkr_context *)base;
int ret;
+ /* always merge fences */
+ assert(!(flags & ~VIRGL_RENDERER_FENCE_FLAG_MERGEABLE));
+ flags = VIRGL_RENDERER_FENCE_FLAG_MERGEABLE;
+
mtx_lock(&ctx->mutex);
- ret = vkr_context_submit_fence_locked(base, flags, queue_id, fence_cookie);
+ ret = vkr_context_submit_fence_locked(base, flags, ring_idx, fence_id);
mtx_unlock(&ctx->mutex);
return ret;
}
@@ -181,12 +228,20 @@ vkr_context_retire_fences_locked(struct virgl_context *base)
/* retire syncs from destroyed devices */
LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &ctx->signaled_syncs, head) {
- /* queue_id might have already get reused but is opaque to the clients */
- ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
+ /* ring_idx might have already get reused but is opaque to the clients */
+ ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
free(sync);
}
list_inithead(&ctx->signaled_syncs);
+ /* retire syncs from CPU timeline */
+ struct vkr_cpu_sync *cpu_sync, *cpu_sync_tmp;
+ LIST_FOR_EACH_ENTRY_SAFE (cpu_sync, cpu_sync_tmp, &ctx->signaled_cpu_syncs, head) {
+ ctx->base.fence_retire(&ctx->base, cpu_sync->ring_idx, cpu_sync->fence_id);
+ free(cpu_sync);
+ }
+ list_inithead(&ctx->signaled_cpu_syncs);
+
/* flush first and once because the per-queue sync threads might write to
* it any time
*/
@@ -201,7 +256,7 @@ vkr_context_retire_fences_locked(struct virgl_context *base)
vkr_queue_get_signaled_syncs(queue, &retired_syncs, &queue_empty);
LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &retired_syncs, head) {
- ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
+ ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
vkr_device_free_queue_sync(dev, sync);
}
@@ -241,7 +296,7 @@ vkr_context_submit_cmd(struct virgl_context *base, const void *buffer, size_t si
/* CS error is considered fatal (destroy the context?) */
if (vkr_cs_decoder_get_fatal(&ctx->decoder)) {
mtx_unlock(&ctx->mutex);
- return EINVAL;
+ return -EINVAL;
}
vkr_cs_decoder_set_stream(&ctx->decoder, buffer, size);
@@ -249,7 +304,7 @@ vkr_context_submit_cmd(struct virgl_context *base, const void *buffer, size_t si
while (vkr_cs_decoder_has_command(&ctx->decoder)) {
vn_dispatch_command(&ctx->dispatch);
if (vkr_cs_decoder_get_fatal(&ctx->decoder)) {
- ret = EINVAL;
+ ret = -EINVAL;
break;
}
}
@@ -264,35 +319,52 @@ vkr_context_submit_cmd(struct virgl_context *base, const void *buffer, size_t si
static int
vkr_context_get_blob_locked(struct virgl_context *base,
uint64_t blob_id,
+ uint64_t blob_size,
uint32_t flags,
struct virgl_context_blob *blob)
{
struct vkr_context *ctx = (struct vkr_context *)base;
struct vkr_device_memory *mem;
enum virgl_resource_fd_type fd_type = VIRGL_RESOURCE_FD_INVALID;
+ int fd = -1;
- mem = util_hash_table_get_u64(ctx->object_table, blob_id);
+ /* blob_id == 0 does not refer to an existing VkDeviceMemory, but implies a
+ * shm allocation. It serves a similar purpose as iov does, but it is
+ * logically contiguous and it can be exported.
+ */
+ if (!blob_id && flags == VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
+ fd = os_create_anonymous_file(blob_size, "vkr-shmem");
+ if (fd < 0)
+ return -ENOMEM;
+
+ blob->type = VIRGL_RESOURCE_FD_SHM;
+ blob->u.fd = fd;
+ blob->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
+ return 0;
+ }
+
+ mem = vkr_context_get_object(ctx, blob_id);
if (!mem || mem->base.type != VK_OBJECT_TYPE_DEVICE_MEMORY)
- return EINVAL;
+ return -EINVAL;
/* a memory can only be exported once; we don't want two resources to point
* to the same storage.
*/
if (mem->exported)
- return EINVAL;
+ return -EINVAL;
if (!mem->valid_fd_types)
- return EINVAL;
+ return -EINVAL;
if (flags & VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
const bool host_visible = mem->property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
if (!host_visible)
- return EINVAL;
+ return -EINVAL;
}
if (flags & VIRGL_RENDERER_BLOB_FLAG_USE_CROSS_DEVICE) {
if (!(mem->valid_fd_types & (1 << VIRGL_RESOURCE_FD_DMABUF)))
- return EINVAL;
+ return -EINVAL;
fd_type = VIRGL_RESOURCE_FD_DMABUF;
}
@@ -305,30 +377,39 @@ vkr_context_get_blob_locked(struct virgl_context *base,
fd_type = VIRGL_RESOURCE_FD_OPAQUE;
}
- int fd = -1;
if (fd_type != VIRGL_RESOURCE_FD_INVALID) {
VkExternalMemoryHandleTypeFlagBits handle_type;
+ int ret;
+
switch (fd_type) {
case VIRGL_RESOURCE_FD_DMABUF:
handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
break;
case VIRGL_RESOURCE_FD_OPAQUE:
handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ assert(sizeof(blob->opaque_fd_metadata.driver_uuid) == VK_UUID_SIZE);
+ memcpy(blob->opaque_fd_metadata.driver_uuid,
+ mem->device->physical_device->id_properties.driverUUID, VK_UUID_SIZE);
+ memcpy(blob->opaque_fd_metadata.device_uuid,
+ mem->device->physical_device->id_properties.deviceUUID, VK_UUID_SIZE);
+ blob->opaque_fd_metadata.allocation_size = mem->allocation_size;
+ blob->opaque_fd_metadata.memory_type_index = mem->memory_type_index;
break;
default:
- return EINVAL;
+ return -EINVAL;
+ }
+
+ ret = vkr_device_memory_export_fd(mem, handle_type, &fd);
+ if (ret)
+ return ret;
+
+ if (fd_type == VIRGL_RESOURCE_FD_DMABUF &&
+ (uint64_t)lseek(fd, 0, SEEK_END) < blob_size) {
+ close(fd);
+ return -EINVAL;
}
- VkResult result = ctx->instance->get_memory_fd(
- mem->device,
- &(VkMemoryGetFdInfoKHR){
- .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
- .memory = mem->base.handle.device_memory,
- .handleType = handle_type,
- },
- &fd);
- if (result != VK_SUCCESS)
- return EINVAL;
+ mem->exported = true;
}
blob->type = fd_type;
@@ -350,14 +431,14 @@ vkr_context_get_blob_locked(struct virgl_context *base,
blob->map_info = VIRGL_RENDERER_MAP_CACHE_NONE;
}
- blob->renderer_data = mem;
-
return 0;
}
static int
vkr_context_get_blob(struct virgl_context *base,
+ UNUSED uint32_t res_id,
uint64_t blob_id,
+ uint64_t blob_size,
uint32_t flags,
struct virgl_context_blob *blob)
{
@@ -365,109 +446,22 @@ vkr_context_get_blob(struct virgl_context *base,
int ret;
mtx_lock(&ctx->mutex);
- ret = vkr_context_get_blob_locked(base, blob_id, flags, blob);
- /* XXX unlock in vkr_context_get_blob_done on success */
- if (ret)
- mtx_unlock(&ctx->mutex);
-
- return ret;
-}
-
-static void
-vkr_context_get_blob_done(struct virgl_context *base,
- uint32_t res_id,
- struct virgl_context_blob *blob)
-{
- struct vkr_context *ctx = (struct vkr_context *)base;
- struct vkr_device_memory *mem = blob->renderer_data;
-
- mem->exported = true;
- mem->exported_res_id = res_id;
- list_add(&mem->exported_head, &ctx->newly_exported_memories);
-
- /* XXX locked in vkr_context_get_blob */
+ ret = vkr_context_get_blob_locked(base, blob_id, blob_size, flags, blob);
mtx_unlock(&ctx->mutex);
-}
-
-static int
-vkr_context_transfer_3d_locked(struct virgl_context *base,
- struct virgl_resource *res,
- const struct vrend_transfer_info *info,
- int transfer_mode)
-{
- struct vkr_context *ctx = (struct vkr_context *)base;
- struct vkr_resource_attachment *att;
- const struct iovec *iov;
- int iov_count;
-
- if (info->level || info->stride || info->layer_stride)
- return EINVAL;
-
- if (info->iovec) {
- iov = info->iovec;
- iov_count = info->iovec_cnt;
- } else {
- iov = res->iov;
- iov_count = res->iov_count;
- }
-
- if (!iov || !iov_count)
- return 0;
- att = util_hash_table_get(ctx->resource_table, uintptr_to_pointer(res->res_id));
- if (!att)
- return EINVAL;
-
- assert(att->resource == res);
-
- /* TODO transfer via dmabuf (and find a solution to coherency issues) */
- if (LIST_IS_EMPTY(&att->memories)) {
- vkr_log("unable to transfer without VkDeviceMemory (TODO)");
- return EINVAL;
- }
-
- struct vkr_device_memory *mem =
- LIST_ENTRY(struct vkr_device_memory, att->memories.next, exported_head);
- const VkMappedMemoryRange range = {
- .sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
- .memory = mem->base.handle.device_memory,
- .offset = info->box->x,
- .size = info->box->width,
- };
-
- void *ptr;
- VkResult result =
- vkMapMemory(mem->device, range.memory, range.offset, range.size, 0, &ptr);
- if (result != VK_SUCCESS)
- return EINVAL;
-
- if (transfer_mode == VIRGL_TRANSFER_TO_HOST) {
- vrend_read_from_iovec(iov, iov_count, range.offset, ptr, range.size);
- vkFlushMappedMemoryRanges(mem->device, 1, &range);
- } else {
- vkInvalidateMappedMemoryRanges(mem->device, 1, &range);
- vrend_write_to_iovec(iov, iov_count, range.offset, ptr, range.size);
- }
-
- vkUnmapMemory(mem->device, range.memory);
-
- return 0;
+ return ret;
}
static int
vkr_context_transfer_3d(struct virgl_context *base,
struct virgl_resource *res,
- const struct vrend_transfer_info *info,
- int transfer_mode)
+ UNUSED const struct vrend_transfer_info *info,
+ UNUSED int transfer_mode)
{
struct vkr_context *ctx = (struct vkr_context *)base;
- int ret;
-
- mtx_lock(&ctx->mutex);
- ret = vkr_context_transfer_3d_locked(base, res, info, transfer_mode);
- mtx_unlock(&ctx->mutex);
- return ret;
+ vkr_log("no transfer support for ctx %d and res %d", ctx->base.ctx_id, res->res_id);
+ return -1;
}
static void
@@ -476,7 +470,7 @@ vkr_context_attach_resource_locked(struct virgl_context *base, struct virgl_reso
struct vkr_context *ctx = (struct vkr_context *)base;
struct vkr_resource_attachment *att;
- att = util_hash_table_get(ctx->resource_table, uintptr_to_pointer(res->res_id));
+ att = vkr_context_get_resource(ctx, res->res_id);
if (att) {
assert(att->resource == res);
return;
@@ -486,27 +480,29 @@ vkr_context_attach_resource_locked(struct virgl_context *base, struct virgl_reso
if (!att)
return;
- /* TODO When in multi-process mode, we cannot share a virgl_resource as-is
- * to another process. The resource must have a valid fd, and only the fd
- * and the iov can be sent the other process.
- *
- * For vrend-to-vkr sharing, we can get the fd from pipe_resource.
- */
+ void *mmap_ptr = NULL;
+ if (res->fd_type == VIRGL_RESOURCE_FD_SHM) {
+ mmap_ptr =
+ mmap(NULL, res->map_size, PROT_WRITE | PROT_READ, MAP_SHARED, res->fd, 0);
+ if (mmap_ptr == MAP_FAILED) {
+ free(att);
+ return;
+ }
+ }
att->resource = res;
- list_inithead(&att->memories);
- /* associate a memory with the resource, if any */
- struct vkr_device_memory *mem;
- LIST_FOR_EACH_ENTRY (mem, &ctx->newly_exported_memories, exported_head) {
- if (mem->exported_res_id == res->res_id) {
- list_del(&mem->exported_head);
- list_addtail(&mem->exported_head, &att->memories);
- break;
- }
+ if (mmap_ptr) {
+ att->shm_iov.iov_base = mmap_ptr;
+ att->shm_iov.iov_len = res->map_size;
+ att->iov = &att->shm_iov;
+ att->iov_count = 1;
+ } else {
+ att->iov = res->iov;
+ att->iov_count = res->iov_count;
}
- util_hash_table_set(ctx->resource_table, uintptr_to_pointer(res->res_id), att);
+ vkr_context_add_resource(ctx, att);
}
static void
@@ -524,7 +520,38 @@ vkr_context_detach_resource(struct virgl_context *base, struct virgl_resource *r
struct vkr_context *ctx = (struct vkr_context *)base;
mtx_lock(&ctx->mutex);
- util_hash_table_remove(ctx->resource_table, uintptr_to_pointer(res->res_id));
+
+ const struct vkr_resource_attachment *att = ctx->encoder.stream.attachment;
+ if (att && att->resource == res) {
+ /* TODO vkSetReplyCommandStreamMESA should support res_id 0 to unset.
+ * Until then, and until we can ignore older guests, treat this as
+ * non-fatal
+ */
+ vkr_cs_encoder_set_stream(&ctx->encoder, NULL, 0, 0);
+ }
+
+ struct vkr_ring *ring, *ring_tmp;
+ LIST_FOR_EACH_ENTRY_SAFE (ring, ring_tmp, &ctx->rings, head) {
+ if (ring->attachment->resource != res)
+ continue;
+
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+ mtx_unlock(&ctx->mutex);
+
+ vkr_ring_stop(ring);
+
+ mtx_lock(&ctx->mutex);
+ vkr_ring_destroy(ring);
+ }
+
+ if (res->fd_type == VIRGL_RESOURCE_FD_SHM) {
+ struct vkr_resource_attachment *att = vkr_context_get_resource(ctx, res->res_id);
+ if (att)
+ munmap(att->shm_iov.iov_base, att->shm_iov.iov_len);
+ }
+
+ vkr_context_remove_resource(ctx, res->res_id);
+
mtx_unlock(&ctx->mutex);
}
@@ -549,13 +576,17 @@ vkr_context_destroy(struct virgl_context *base)
vkr_instance_destroy(ctx, ctx->instance);
}
- util_hash_table_destroy(ctx->resource_table);
- util_hash_table_destroy_u64(ctx->object_table);
+ _mesa_hash_table_destroy(ctx->resource_table, vkr_context_free_resource);
+ _mesa_hash_table_destroy(ctx->object_table, vkr_context_free_object);
struct vkr_queue_sync *sync, *tmp;
LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &ctx->signaled_syncs, head)
free(sync);
+ struct vkr_queue_sync *cpu_sync, *cpu_sync_tmp;
+ LIST_FOR_EACH_ENTRY_SAFE (cpu_sync, cpu_sync_tmp, &ctx->signaled_cpu_syncs, head)
+ free(cpu_sync);
+
if (ctx->fence_eventfd >= 0)
close(ctx->fence_eventfd);
@@ -574,7 +605,6 @@ vkr_context_init_base(struct vkr_context *ctx)
ctx->base.detach_resource = vkr_context_detach_resource;
ctx->base.transfer_3d = vkr_context_transfer_3d;
ctx->base.get_blob = vkr_context_get_blob;
- ctx->base.get_blob_done = vkr_context_get_blob_done;
ctx->base.submit_cmd = vkr_context_submit_cmd;
ctx->base.get_fencing_fd = vkr_context_get_fencing_fd;
@@ -582,22 +612,29 @@ vkr_context_init_base(struct vkr_context *ctx)
ctx->base.submit_fence = vkr_context_submit_fence;
}
-static void
-destroy_func_object(void *val)
+static uint32_t
+vkr_hash_u64(const void *key)
{
- struct vkr_object *obj = val;
- free(obj);
+ return XXH32(key, sizeof(uint64_t), 0);
}
-static void
-destroy_func_resource(void *val)
+static bool
+vkr_key_u64_equal(const void *key1, const void *key2)
{
- struct vkr_resource_attachment *att = val;
- struct vkr_device_memory *mem, *tmp;
+ return *(const uint64_t *)key1 == *(const uint64_t *)key2;
+}
- LIST_FOR_EACH_ENTRY_SAFE (mem, tmp, &att->memories, exported_head)
- list_delinit(&mem->exported_head);
+void
+vkr_context_free_object(struct hash_entry *entry)
+{
+ struct vkr_object *obj = entry->data;
+ free(obj);
+}
+void
+vkr_context_free_resource(struct hash_entry *entry)
+{
+ struct vkr_resource_attachment *att = entry->data;
free(att);
}
@@ -606,17 +643,13 @@ vkr_context_create(size_t debug_len, const char *debug_name)
{
struct vkr_context *ctx;
- /* TODO inject a proxy context when multi-process */
-
ctx = calloc(1, sizeof(*ctx));
if (!ctx)
return NULL;
ctx->debug_name = malloc(debug_len + 1);
- if (!ctx->debug_name) {
- free(ctx);
- return NULL;
- }
+ if (!ctx->debug_name)
+ goto err_debug_name;
memcpy(ctx->debug_name, debug_name, debug_len);
ctx->debug_name[debug_len] = '\0';
@@ -631,21 +664,17 @@ vkr_context_create(size_t debug_len, const char *debug_name)
if (VKR_DEBUG(VALIDATE))
ctx->validate_level = VKR_CONTEXT_VALIDATE_FULL;
- if (mtx_init(&ctx->mutex, mtx_plain) != thrd_success) {
- free(ctx->debug_name);
- free(ctx);
- return NULL;
- }
+ if (mtx_init(&ctx->mutex, mtx_plain) != thrd_success)
+ goto err_mtx_init;
- list_inithead(&ctx->rings);
+ ctx->object_table = _mesa_hash_table_create(NULL, vkr_hash_u64, vkr_key_u64_equal);
+ if (!ctx->object_table)
+ goto err_ctx_object_table;
- ctx->object_table = util_hash_table_create_u64(destroy_func_object);
ctx->resource_table =
- util_hash_table_create(hash_func_u32, compare_func, destroy_func_resource);
- if (!ctx->object_table || !ctx->resource_table)
- goto fail;
-
- list_inithead(&ctx->newly_exported_memories);
+ _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
+ if (!ctx->resource_table)
+ goto err_ctx_resource_table;
vkr_cs_decoder_init(&ctx->decoder, ctx->object_table);
vkr_cs_encoder_init(&ctx->encoder, &ctx->decoder.fatal_error);
@@ -657,23 +686,27 @@ vkr_context_create(size_t debug_len, const char *debug_name)
!(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)) {
ctx->fence_eventfd = create_eventfd(0);
if (ctx->fence_eventfd < 0)
- goto fail;
+ goto err_eventfd;
} else {
ctx->fence_eventfd = -1;
}
+ list_inithead(&ctx->rings);
list_inithead(&ctx->busy_queues);
list_inithead(&ctx->signaled_syncs);
+ list_inithead(&ctx->signaled_cpu_syncs);
return &ctx->base;
-fail:
- if (ctx->object_table)
- util_hash_table_destroy_u64(ctx->object_table);
- if (ctx->resource_table)
- util_hash_table_destroy(ctx->resource_table);
+err_eventfd:
+ _mesa_hash_table_destroy(ctx->resource_table, vkr_context_free_resource);
+err_ctx_resource_table:
+ _mesa_hash_table_destroy(ctx->object_table, vkr_context_free_object);
+err_ctx_object_table:
mtx_destroy(&ctx->mutex);
+err_mtx_init:
free(ctx->debug_name);
+err_debug_name:
free(ctx);
return NULL;
}
diff --git a/src/venus/vkr_context.h b/src/venus/vkr_context.h
index 233205b0..c6f7e11c 100644
--- a/src/venus/vkr_context.h
+++ b/src/venus/vkr_context.h
@@ -10,6 +10,7 @@
#include "venus-protocol/vn_protocol_renderer_defines.h"
#include "virgl_context.h"
+#include "vrend_iov.h"
#include "vkr_cs.h"
@@ -19,15 +20,17 @@ struct virgl_resource;
* When a virgl_resource is attached in vkr_context_attach_resource, a
* vkr_resource_attachment is created. A vkr_resource_attachment is valid
* until the resource it tracks is detached.
- *
- * To support transfers to resources not backed by coherent dma-bufs, we
- * associate a vkr_resource_attachment with a (list of) vkr_device_memory.
- * This way, we can find a vkr_device_memory from a vkr_resource_attachment
- * and do transfers using VkDeviceMemory.
*/
struct vkr_resource_attachment {
struct virgl_resource *resource;
- struct list_head memories;
+
+ /* if VIRGL_RESOURCE_FD_SHM, this is the mapping of the shm and iov below
+ * points to this
+ */
+ struct iovec shm_iov;
+
+ const struct iovec *iov;
+ int iov_count;
};
enum vkr_context_validate_level {
@@ -39,6 +42,14 @@ enum vkr_context_validate_level {
VKR_CONTEXT_VALIDATE_FULL,
};
+struct vkr_cpu_sync {
+ uint32_t flags;
+ uint32_t ring_idx;
+ uint64_t fence_id;
+
+ struct list_head head;
+};
+
struct vkr_context {
struct virgl_context base;
@@ -49,9 +60,8 @@ struct vkr_context {
mtx_t mutex;
struct list_head rings;
- struct util_hash_table_u64 *object_table;
- struct util_hash_table *resource_table;
- struct list_head newly_exported_memories;
+ struct hash_table *object_table;
+ struct hash_table *resource_table;
struct vkr_cs_encoder encoder;
struct vkr_cs_decoder decoder;
@@ -60,15 +70,46 @@ struct vkr_context {
int fence_eventfd;
struct list_head busy_queues;
struct list_head signaled_syncs;
+ struct list_head signaled_cpu_syncs;
+
+ struct vkr_queue *sync_queues[64];
struct vkr_instance *instance;
char *instance_name;
};
+void
+vkr_context_free_resource(struct hash_entry *entry);
+
+static inline void
+vkr_context_add_resource(struct vkr_context *ctx, struct vkr_resource_attachment *att)
+{
+ assert(!_mesa_hash_table_search(ctx->resource_table, &att->resource->res_id));
+ _mesa_hash_table_insert(ctx->resource_table, &att->resource->res_id, att);
+}
+
+static inline void
+vkr_context_remove_resource(struct vkr_context *ctx, uint32_t res_id)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->resource_table, &res_id);
+ if (likely(entry)) {
+ vkr_context_free_resource(entry);
+ _mesa_hash_table_remove(ctx->resource_table, entry);
+ }
+}
+
+static inline struct vkr_resource_attachment *
+vkr_context_get_resource(struct vkr_context *ctx, uint32_t res_id)
+{
+ const struct hash_entry *entry = _mesa_hash_table_search(ctx->resource_table, &res_id);
+ return likely(entry) ? entry->data : NULL;
+}
+
static inline bool
vkr_context_validate_object_id(struct vkr_context *ctx, vkr_object_id id)
{
- if (unlikely(!id || util_hash_table_get_u64(ctx->object_table, id))) {
+ if (unlikely(!id || _mesa_hash_table_search(ctx->object_table, &id))) {
+ vkr_log("invalid object id %" PRIu64, id);
vkr_cs_decoder_set_fatal(&ctx->decoder);
return false;
}
@@ -89,23 +130,29 @@ vkr_context_alloc_object(UNUSED struct vkr_context *ctx,
return vkr_object_alloc(size, type, id);
}
+void
+vkr_context_free_object(struct hash_entry *entry);
+
static inline void
vkr_context_add_object(struct vkr_context *ctx, struct vkr_object *obj)
{
assert(vkr_is_recognized_object_type(obj->type));
assert(obj->id);
- assert(!util_hash_table_get_u64(ctx->object_table, obj->id));
+ assert(!_mesa_hash_table_search(ctx->object_table, &obj->id));
- util_hash_table_set_u64(ctx->object_table, obj->id, obj);
+ _mesa_hash_table_insert(ctx->object_table, &obj->id, obj);
}
static inline void
vkr_context_remove_object(struct vkr_context *ctx, struct vkr_object *obj)
{
- assert(util_hash_table_get_u64(ctx->object_table, obj->id));
+ assert(_mesa_hash_table_search(ctx->object_table, &obj->id));
- /* this frees obj */
- util_hash_table_remove_u64(ctx->object_table, obj->id);
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->object_table, &obj->id);
+ if (likely(entry)) {
+ vkr_context_free_object(entry);
+ _mesa_hash_table_remove(ctx->object_table, entry);
+ }
}
static inline void
@@ -117,6 +164,13 @@ vkr_context_remove_objects(struct vkr_context *ctx, struct list_head *objects)
/* objects should be reinitialized if to be reused */
}
+static inline void *
+vkr_context_get_object(struct vkr_context *ctx, vkr_object_id obj_id)
+{
+ const struct hash_entry *entry = _mesa_hash_table_search(ctx->object_table, &obj_id);
+ return likely(entry) ? entry->data : NULL;
+}
+
static inline const char *
vkr_context_get_name(const struct vkr_context *ctx)
{
diff --git a/src/venus/vkr_cs.c b/src/venus/vkr_cs.c
index 6748e9e1..1eeb2688 100644
--- a/src/venus/vkr_cs.c
+++ b/src/venus/vkr_cs.c
@@ -7,15 +7,26 @@
#include "vrend_iov.h"
+#include "vkr_context.h"
+
void
vkr_cs_encoder_set_stream(struct vkr_cs_encoder *enc,
- const struct iovec *iov,
- int iov_count,
+ const struct vkr_resource_attachment *att,
size_t offset,
size_t size)
{
- enc->stream.iov = iov;
- enc->stream.iov_count = iov_count;
+ if (!att) {
+ memset(&enc->stream, 0, sizeof(enc->stream));
+ enc->remaining_size = 0;
+ enc->next_iov = 0;
+ enc->cur = NULL;
+ enc->end = NULL;
+ return;
+ }
+
+ enc->stream.attachment = att;
+ enc->stream.iov = att->iov;
+ enc->stream.iov_count = att->iov_count;
enc->stream.offset = offset;
enc->stream.size = size;
/* clear cache */
@@ -88,6 +99,7 @@ vkr_cs_encoder_seek_stream(struct vkr_cs_encoder *enc, size_t pos)
size_t iov_offset;
if (pos > enc->stream.size ||
!vkr_cs_encoder_translate_stream_offset(enc, offset, &iov_index, &iov_offset)) {
+ vkr_log("failed to seek the reply stream to %zu", pos);
vkr_cs_encoder_set_fatal(enc);
return;
}
@@ -147,6 +159,7 @@ vkr_cs_encoder_write_internal(struct vkr_cs_encoder *enc,
size_t ptr_size;
uint8_t *ptr = vkr_cs_encoder_get_ptr(enc, val_size, &ptr_size);
if (unlikely(!ptr)) {
+ vkr_log("failed to write value to the reply stream");
vkr_cs_encoder_set_fatal(enc);
return;
}
@@ -160,6 +173,7 @@ vkr_cs_encoder_write_internal(struct vkr_cs_encoder *enc,
size_t ptr_size;
const void *ptr = vkr_cs_encoder_get_ptr(enc, pad_size, &ptr_size);
if (unlikely(!ptr)) {
+ vkr_log("failed to write padding to the reply stream");
vkr_cs_encoder_set_fatal(enc);
return;
}
@@ -168,8 +182,7 @@ vkr_cs_encoder_write_internal(struct vkr_cs_encoder *enc,
}
void
-vkr_cs_decoder_init(struct vkr_cs_decoder *dec,
- const struct util_hash_table_u64 *object_table)
+vkr_cs_decoder_init(struct vkr_cs_decoder *dec, const struct hash_table *object_table)
{
memset(dec, 0, sizeof(*dec));
dec->object_table = object_table;
diff --git a/src/venus/vkr_cs.h b/src/venus/vkr_cs.h
index d39474ca..4e2daa6b 100644
--- a/src/venus/vkr_cs.h
+++ b/src/venus/vkr_cs.h
@@ -8,7 +8,12 @@
#include "vkr_common.h"
-#define VKR_CS_DECODER_TEMP_POOL_MAX_SIZE (64u * 1024 * 1024)
+/* This is to avoid integer overflows and to catch bogus allocations (e.g.,
+ * the guest driver encodes an uninitialized value). In practice, the largest
+ * allocations we've seen are from vkGetPipelineCacheData and are dozens of
+ * MBs.
+ */
+#define VKR_CS_DECODER_TEMP_POOL_MAX_SIZE (1u * 1024 * 1024 * 1024)
struct iovec;
@@ -16,6 +21,7 @@ struct vkr_cs_encoder {
bool *fatal_error;
struct {
+ const struct vkr_resource_attachment *attachment;
const struct iovec *iov;
int iov_count;
size_t offset;
@@ -39,6 +45,14 @@ struct vkr_cs_decoder_saved_state {
uint8_t *pool_reset_to;
};
+/*
+ * We usually need many small allocations during decoding. Those allocations
+ * are suballocated from the temp pool.
+ *
+ * After a command is decoded, vkr_cs_decoder_reset_temp_pool is called to
+ * reset pool->cur. After an entire command stream is decoded,
+ * vkr_cs_decoder_gc_temp_pool is called to garbage collect pool->buffers.
+ */
struct vkr_cs_decoder_temp_pool {
uint8_t **buffers;
uint32_t buffer_count;
@@ -52,7 +66,7 @@ struct vkr_cs_decoder_temp_pool {
};
struct vkr_cs_decoder {
- const struct util_hash_table_u64 *object_table;
+ const struct hash_table *object_table;
bool fatal_error;
struct vkr_cs_decoder_temp_pool temp_pool;
@@ -79,8 +93,7 @@ vkr_cs_encoder_set_fatal(const struct vkr_cs_encoder *enc)
void
vkr_cs_encoder_set_stream(struct vkr_cs_encoder *enc,
- const struct iovec *iov,
- int iov_count,
+ const struct vkr_resource_attachment *att,
size_t offset,
size_t size);
@@ -112,8 +125,7 @@ vkr_cs_encoder_write(struct vkr_cs_encoder *enc,
}
void
-vkr_cs_decoder_init(struct vkr_cs_decoder *dec,
- const struct util_hash_table_u64 *object_table);
+vkr_cs_decoder_init(struct vkr_cs_decoder *dec, const struct hash_table *object_table);
void
vkr_cs_decoder_fini(struct vkr_cs_decoder *dec);
@@ -161,6 +173,7 @@ vkr_cs_decoder_peek_internal(const struct vkr_cs_decoder *dec,
assert(val_size <= size);
if (unlikely(size > (size_t)(dec->end - dec->cur))) {
+ vkr_log("failed to peek %zu bytes", size);
vkr_cs_decoder_set_fatal(dec);
memset(val, 0, val_size);
return false;
@@ -197,9 +210,16 @@ vkr_cs_decoder_lookup_object(const struct vkr_cs_decoder *dec,
if (!id)
return NULL;
- obj = util_hash_table_get_u64((struct util_hash_table_u64 *)dec->object_table, id);
- if (!obj || obj->type != type)
+ const struct hash_entry *entry =
+ _mesa_hash_table_search((struct hash_table *)dec->object_table, &id);
+ obj = likely(entry) ? entry->data : NULL;
+ if (unlikely(!obj || obj->type != type)) {
+ if (obj)
+ vkr_log("object %" PRIu64 " has type %d, not %d", id, obj->type, type);
+ else
+ vkr_log("failed to look up object %" PRIu64, id);
vkr_cs_decoder_set_fatal(dec);
+ }
return obj;
}
@@ -221,6 +241,7 @@ vkr_cs_decoder_alloc_temp(struct vkr_cs_decoder *dec, size_t size)
if (unlikely(size > (size_t)(pool->end - pool->cur))) {
if (!vkr_cs_decoder_alloc_temp_internal(dec, size)) {
+ vkr_log("failed to suballocate %zu bytes from the temp pool", size);
vkr_cs_decoder_set_fatal(dec);
return NULL;
}
@@ -229,7 +250,7 @@ vkr_cs_decoder_alloc_temp(struct vkr_cs_decoder *dec, size_t size)
/* align to 64-bit after we know size is at most
* VKR_CS_DECODER_TEMP_POOL_MAX_SIZE and cannot overflow
*/
- size = (size + 7) & ~7;
+ size = align64(size, 8);
assert(size <= (size_t)(pool->end - pool->cur));
void *ptr = pool->cur;
diff --git a/src/venus/vkr_descriptor_set.c b/src/venus/vkr_descriptor_set.c
index a30a0653..815764fa 100644
--- a/src/venus/vkr_descriptor_set.c
+++ b/src/venus/vkr_descriptor_set.c
@@ -12,8 +12,11 @@ vkr_dispatch_vkGetDescriptorSetLayoutSupport(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetDescriptorSetLayoutSupport *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetDescriptorSetLayoutSupport_args_handle(args);
- vkGetDescriptorSetLayoutSupport(args->device, args->pCreateInfo, args->pSupport);
+ vk->GetDescriptorSetLayoutSupport(args->device, args->pCreateInfo, args->pSupport);
}
static void
@@ -57,8 +60,7 @@ vkr_dispatch_vkDestroyDescriptorPool(struct vn_dispatch_context *dispatch,
if (!pool)
return;
- vkr_context_remove_objects(ctx, &pool->descriptor_sets);
-
+ vkr_descriptor_pool_release(ctx, pool);
vkr_descriptor_pool_destroy_and_remove(ctx, args);
}
@@ -66,6 +68,9 @@ static void
vkr_dispatch_vkResetDescriptorPool(struct vn_dispatch_context *dispatch,
struct vn_command_vkResetDescriptorPool *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
struct vkr_context *ctx = dispatch->data;
struct vkr_descriptor_pool *pool =
@@ -76,9 +81,9 @@ vkr_dispatch_vkResetDescriptorPool(struct vn_dispatch_context *dispatch,
}
vn_replace_vkResetDescriptorPool_args_handle(args);
- args->ret = vkResetDescriptorPool(args->device, args->descriptorPool, args->flags);
+ args->ret = vk->ResetDescriptorPool(args->device, args->descriptorPool, args->flags);
- vkr_context_remove_objects(ctx, &pool->descriptor_sets);
+ vkr_descriptor_pool_release(ctx, pool);
list_inithead(&pool->descriptor_sets);
}
@@ -131,10 +136,13 @@ static void
vkr_dispatch_vkUpdateDescriptorSets(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkUpdateDescriptorSets *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkUpdateDescriptorSets_args_handle(args);
- vkUpdateDescriptorSets(args->device, args->descriptorWriteCount,
- args->pDescriptorWrites, args->descriptorCopyCount,
- args->pDescriptorCopies);
+ vk->UpdateDescriptorSets(args->device, args->descriptorWriteCount,
+ args->pDescriptorWrites, args->descriptorCopyCount,
+ args->pDescriptorCopies);
}
static void
diff --git a/src/venus/vkr_descriptor_set.h b/src/venus/vkr_descriptor_set.h
index 1a7f15d7..1a31cfa9 100644
--- a/src/venus/vkr_descriptor_set.h
+++ b/src/venus/vkr_descriptor_set.h
@@ -8,6 +8,8 @@
#include "vkr_common.h"
+#include "vkr_context.h"
+
struct vkr_descriptor_set_layout {
struct vkr_object base;
};
@@ -50,4 +52,10 @@ vkr_context_init_descriptor_set_dispatch(struct vkr_context *ctx);
void
vkr_context_init_descriptor_update_template_dispatch(struct vkr_context *ctx);
+static inline void
+vkr_descriptor_pool_release(struct vkr_context *ctx, struct vkr_descriptor_pool *pool)
+{
+ vkr_context_remove_objects(ctx, &pool->descriptor_sets);
+}
+
#endif /* VKR_DESCRIPTOR_SET_H */
diff --git a/src/venus/vkr_device.c b/src/venus/vkr_device.c
index a15d431a..4d7aea77 100644
--- a/src/venus/vkr_device.c
+++ b/src/venus/vkr_device.c
@@ -20,6 +20,7 @@ vkr_device_create_queues(struct vkr_context *ctx,
uint32_t create_info_count,
const VkDeviceQueueCreateInfo *create_infos)
{
+ struct vn_device_proc_table *vk = &dev->proc_table;
list_inithead(&dev->queues);
for (uint32_t i = 0; i < create_info_count; i++) {
@@ -32,7 +33,16 @@ vkr_device_create_queues(struct vkr_context *ctx,
.queueIndex = j,
};
VkQueue handle = VK_NULL_HANDLE;
- vkGetDeviceQueue2(dev->base.handle.device, &info, &handle);
+ /* There was a bug in spec which forbids usage of vkGetDeviceQueue2
+ * with flags set to zero. It was fixed in spec version 1.1.130.
+ * Work around drivers that are implementing this buggy behavior
+ */
+ if (info.flags) {
+ vk->GetDeviceQueue2(dev->base.handle.device, &info, &handle);
+ } else {
+ vk->GetDeviceQueue(dev->base.handle.device, info.queueFamilyIndex,
+ info.queueIndex, &handle);
+ }
struct vkr_queue *queue = vkr_queue_create(
ctx, dev, info.flags, info.queueFamilyIndex, info.queueIndex, handle);
@@ -53,94 +63,16 @@ vkr_device_create_queues(struct vkr_context *ctx,
}
static void
-vkr_device_init_entry_points(struct vkr_device *dev, uint32_t api_version)
+vkr_device_init_proc_table(struct vkr_device *dev,
+ uint32_t api_version,
+ const char *const *exts,
+ uint32_t count)
{
- VkDevice handle = dev->base.handle.device;
- if (api_version >= VK_API_VERSION_1_2) {
- dev->GetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)vkGetDeviceProcAddr(
- handle, "vkGetSemaphoreCounterValue");
- dev->WaitSemaphores =
- (PFN_vkWaitSemaphores)vkGetDeviceProcAddr(handle, "vkWaitSemaphores");
- dev->SignalSemaphore =
- (PFN_vkSignalSemaphore)vkGetDeviceProcAddr(handle, "vkSignalSemaphore");
- dev->GetDeviceMemoryOpaqueCaptureAddress =
- (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)vkGetDeviceProcAddr(
- handle, "vkGetDeviceMemoryOpaqueCaptureAddress");
- dev->GetBufferOpaqueCaptureAddress =
- (PFN_vkGetBufferOpaqueCaptureAddress)vkGetDeviceProcAddr(
- handle, "vkGetBufferOpaqueCaptureAddress");
- dev->GetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)vkGetDeviceProcAddr(
- handle, "vkGetBufferDeviceAddress");
- dev->ResetQueryPool =
- (PFN_vkResetQueryPool)vkGetDeviceProcAddr(handle, "vkResetQueryPool");
- dev->CreateRenderPass2 =
- (PFN_vkCreateRenderPass2)vkGetDeviceProcAddr(handle, "vkCreateRenderPass2");
- dev->CmdBeginRenderPass2 =
- (PFN_vkCmdBeginRenderPass2)vkGetDeviceProcAddr(handle, "vkCmdBeginRenderPass2");
- dev->CmdNextSubpass2 =
- (PFN_vkCmdNextSubpass2)vkGetDeviceProcAddr(handle, "vkCmdNextSubpass2");
- dev->CmdEndRenderPass2 =
- (PFN_vkCmdEndRenderPass2)vkGetDeviceProcAddr(handle, "vkCmdEndRenderPass2");
- dev->CmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)vkGetDeviceProcAddr(
- handle, "vkCmdDrawIndirectCount");
- dev->CmdDrawIndexedIndirectCount =
- (PFN_vkCmdDrawIndexedIndirectCount)vkGetDeviceProcAddr(
- handle, "vkCmdDrawIndexedIndirectCount");
- } else {
- dev->GetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)vkGetDeviceProcAddr(
- handle, "vkGetSemaphoreCounterValueKHR");
- dev->WaitSemaphores =
- (PFN_vkWaitSemaphores)vkGetDeviceProcAddr(handle, "vkWaitSemaphoresKHR");
- dev->SignalSemaphore =
- (PFN_vkSignalSemaphore)vkGetDeviceProcAddr(handle, "vkSignalSemaphoreKHR");
- dev->GetDeviceMemoryOpaqueCaptureAddress =
- (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)vkGetDeviceProcAddr(
- handle, "vkGetDeviceMemoryOpaqueCaptureAddressKHR");
- dev->GetBufferOpaqueCaptureAddress =
- (PFN_vkGetBufferOpaqueCaptureAddress)vkGetDeviceProcAddr(
- handle, "vkGetBufferOpaqueCaptureAddressKHR");
- dev->GetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)vkGetDeviceProcAddr(
- handle, "vkGetBufferDeviceAddressKHR");
- dev->ResetQueryPool =
- (PFN_vkResetQueryPool)vkGetDeviceProcAddr(handle, "vkResetQueryPoolEXT");
- dev->CreateRenderPass2 =
- (PFN_vkCreateRenderPass2)vkGetDeviceProcAddr(handle, "vkCreateRenderPass2KHR");
- dev->CmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)vkGetDeviceProcAddr(
- handle, "vkCmdBeginRenderPass2KHR");
- dev->CmdNextSubpass2 =
- (PFN_vkCmdNextSubpass2)vkGetDeviceProcAddr(handle, "vkCmdNextSubpass2KHR");
- dev->CmdEndRenderPass2 =
- (PFN_vkCmdEndRenderPass2)vkGetDeviceProcAddr(handle, "vkCmdEndRenderPass2KHR");
- dev->CmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)vkGetDeviceProcAddr(
- handle, "vkCmdDrawIndirectCountKHR");
- dev->CmdDrawIndexedIndirectCount =
- (PFN_vkCmdDrawIndexedIndirectCount)vkGetDeviceProcAddr(
- handle, "vkCmdDrawIndexedIndirectCountKHR");
- }
+ struct vn_info_extension_table ext_table;
+ vkr_extension_table_init(&ext_table, exts, count);
- dev->cmd_bind_transform_feedback_buffers =
- (PFN_vkCmdBindTransformFeedbackBuffersEXT)vkGetDeviceProcAddr(
- handle, "vkCmdBindTransformFeedbackBuffersEXT");
- dev->cmd_begin_transform_feedback =
- (PFN_vkCmdBeginTransformFeedbackEXT)vkGetDeviceProcAddr(
- handle, "vkCmdBeginTransformFeedbackEXT");
- dev->cmd_end_transform_feedback =
- (PFN_vkCmdEndTransformFeedbackEXT)vkGetDeviceProcAddr(
- handle, "vkCmdEndTransformFeedbackEXT");
- dev->cmd_begin_query_indexed = (PFN_vkCmdBeginQueryIndexedEXT)vkGetDeviceProcAddr(
- handle, "vkCmdBeginQueryIndexedEXT");
- dev->cmd_end_query_indexed =
- (PFN_vkCmdEndQueryIndexedEXT)vkGetDeviceProcAddr(handle, "vkCmdEndQueryIndexedEXT");
- dev->cmd_draw_indirect_byte_count =
- (PFN_vkCmdDrawIndirectByteCountEXT)vkGetDeviceProcAddr(
- handle, "vkCmdDrawIndirectByteCountEXT");
-
- dev->get_image_drm_format_modifier_properties =
- (PFN_vkGetImageDrmFormatModifierPropertiesEXT)vkGetDeviceProcAddr(
- handle, "vkGetImageDrmFormatModifierPropertiesEXT");
-
- dev->get_memory_fd_properties = (PFN_vkGetMemoryFdPropertiesKHR)vkGetDeviceProcAddr(
- handle, "vkGetMemoryFdPropertiesKHR");
+ vn_util_init_device_proc_table(dev->base.handle.device, api_version, &ext_table,
+ &dev->proc_table);
}
static void
@@ -196,10 +128,14 @@ vkr_dispatch_vkCreateDevice(struct vn_dispatch_context *dispatch,
return;
}
- free(exts);
-
dev->physical_device = physical_dev;
+ vkr_device_init_proc_table(dev, physical_dev->api_version,
+ args->pCreateInfo->ppEnabledExtensionNames,
+ args->pCreateInfo->enabledExtensionCount);
+
+ free(exts);
+
args->ret = vkr_device_create_queues(ctx, dev, args->pCreateInfo->queueCreateInfoCount,
args->pCreateInfo->pQueueCreateInfos);
if (args->ret != VK_SUCCESS) {
@@ -208,8 +144,6 @@ vkr_dispatch_vkCreateDevice(struct vn_dispatch_context *dispatch,
return;
}
- vkr_device_init_entry_points(dev, physical_dev->api_version);
-
mtx_init(&dev->free_sync_mutex, mtx_plain);
list_inithead(&dev->free_syncs);
@@ -225,91 +159,83 @@ vkr_device_object_destroy(struct vkr_context *ctx,
struct vkr_device *dev,
struct vkr_object *obj)
{
+ struct vn_device_proc_table *vk = &dev->proc_table;
VkDevice device = dev->base.handle.device;
assert(vkr_device_should_track_object(obj));
switch (obj->type) {
case VK_OBJECT_TYPE_SEMAPHORE:
- vkDestroySemaphore(device, obj->handle.semaphore, NULL);
+ vk->DestroySemaphore(device, obj->handle.semaphore, NULL);
break;
case VK_OBJECT_TYPE_FENCE:
- vkDestroyFence(device, obj->handle.fence, NULL);
+ vk->DestroyFence(device, obj->handle.fence, NULL);
break;
case VK_OBJECT_TYPE_DEVICE_MEMORY:
- vkFreeMemory(device, obj->handle.device_memory, NULL);
-
- /* remove device memory from exported or attachment list */
- list_del(&((struct vkr_device_memory *)obj)->exported_head);
+ vk->FreeMemory(device, obj->handle.device_memory, NULL);
+ vkr_device_memory_release((struct vkr_device_memory *)obj);
break;
case VK_OBJECT_TYPE_BUFFER:
- vkDestroyBuffer(device, obj->handle.buffer, NULL);
+ vk->DestroyBuffer(device, obj->handle.buffer, NULL);
break;
case VK_OBJECT_TYPE_IMAGE:
- vkDestroyImage(device, obj->handle.image, NULL);
+ vk->DestroyImage(device, obj->handle.image, NULL);
break;
case VK_OBJECT_TYPE_EVENT:
- vkDestroyEvent(device, obj->handle.event, NULL);
+ vk->DestroyEvent(device, obj->handle.event, NULL);
break;
case VK_OBJECT_TYPE_QUERY_POOL:
- vkDestroyQueryPool(device, obj->handle.query_pool, NULL);
+ vk->DestroyQueryPool(device, obj->handle.query_pool, NULL);
break;
case VK_OBJECT_TYPE_BUFFER_VIEW:
- vkDestroyBufferView(device, obj->handle.buffer_view, NULL);
+ vk->DestroyBufferView(device, obj->handle.buffer_view, NULL);
break;
case VK_OBJECT_TYPE_IMAGE_VIEW:
- vkDestroyImageView(device, obj->handle.image_view, NULL);
+ vk->DestroyImageView(device, obj->handle.image_view, NULL);
break;
case VK_OBJECT_TYPE_SHADER_MODULE:
- vkDestroyShaderModule(device, obj->handle.shader_module, NULL);
+ vk->DestroyShaderModule(device, obj->handle.shader_module, NULL);
break;
case VK_OBJECT_TYPE_PIPELINE_CACHE:
- vkDestroyPipelineCache(device, obj->handle.pipeline_cache, NULL);
+ vk->DestroyPipelineCache(device, obj->handle.pipeline_cache, NULL);
break;
case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
- vkDestroyPipelineLayout(device, obj->handle.pipeline_layout, NULL);
+ vk->DestroyPipelineLayout(device, obj->handle.pipeline_layout, NULL);
break;
case VK_OBJECT_TYPE_RENDER_PASS:
- vkDestroyRenderPass(device, obj->handle.render_pass, NULL);
+ vk->DestroyRenderPass(device, obj->handle.render_pass, NULL);
break;
case VK_OBJECT_TYPE_PIPELINE:
- vkDestroyPipeline(device, obj->handle.pipeline, NULL);
+ vk->DestroyPipeline(device, obj->handle.pipeline, NULL);
break;
case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
- vkDestroyDescriptorSetLayout(device, obj->handle.descriptor_set_layout, NULL);
+ vk->DestroyDescriptorSetLayout(device, obj->handle.descriptor_set_layout, NULL);
break;
case VK_OBJECT_TYPE_SAMPLER:
- vkDestroySampler(device, obj->handle.sampler, NULL);
+ vk->DestroySampler(device, obj->handle.sampler, NULL);
break;
case VK_OBJECT_TYPE_DESCRIPTOR_POOL: {
- /* Destroying VkDescriptorPool frees all VkDescriptorSet objects that were allocated
- * from it.
- */
- vkDestroyDescriptorPool(device, obj->handle.descriptor_pool, NULL);
-
- struct vkr_descriptor_pool *pool = (struct vkr_descriptor_pool *)obj;
- vkr_context_remove_objects(ctx, &pool->descriptor_sets);
+ /* Destroying VkDescriptorPool frees all VkDescriptorSet allocated inside. */
+ vk->DestroyDescriptorPool(device, obj->handle.descriptor_pool, NULL);
+ vkr_descriptor_pool_release(ctx, (struct vkr_descriptor_pool *)obj);
break;
}
case VK_OBJECT_TYPE_FRAMEBUFFER:
- vkDestroyFramebuffer(device, obj->handle.framebuffer, NULL);
+ vk->DestroyFramebuffer(device, obj->handle.framebuffer, NULL);
break;
case VK_OBJECT_TYPE_COMMAND_POOL: {
- /* Destroying VkCommandPool frees all VkCommandBuffer objects that were allocated
- * from it.
- */
- vkDestroyCommandPool(device, obj->handle.command_pool, NULL);
-
- struct vkr_command_pool *pool = (struct vkr_command_pool *)obj;
- vkr_context_remove_objects(ctx, &pool->command_buffers);
+ /* Destroying VkCommandPool frees all VkCommandBuffer allocated inside. */
+ vk->DestroyCommandPool(device, obj->handle.command_pool, NULL);
+ vkr_command_pool_release(ctx, (struct vkr_command_pool *)obj);
break;
}
case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION:
- vkDestroySamplerYcbcrConversion(device, obj->handle.sampler_ycbcr_conversion, NULL);
+ vk->DestroySamplerYcbcrConversion(device, obj->handle.sampler_ycbcr_conversion,
+ NULL);
break;
case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE:
- vkDestroyDescriptorUpdateTemplate(device, obj->handle.descriptor_update_template,
- NULL);
+ vk->DestroyDescriptorUpdateTemplate(device, obj->handle.descriptor_update_template,
+ NULL);
break;
default:
vkr_log("Unhandled vkr_object(%p) with VkObjectType(%u)", obj, (uint32_t)obj->type);
@@ -323,12 +249,13 @@ vkr_device_object_destroy(struct vkr_context *ctx,
void
vkr_device_destroy(struct vkr_context *ctx, struct vkr_device *dev)
{
+ struct vn_device_proc_table *vk = &dev->proc_table;
VkDevice device = dev->base.handle.device;
if (!LIST_IS_EMPTY(&dev->objects))
vkr_log("destroying device with valid objects");
- VkResult result = vkDeviceWaitIdle(device);
+ VkResult result = vk->DeviceWaitIdle(device);
if (result != VK_SUCCESS)
vkr_log("vkDeviceWaitIdle(%p) failed(%d)", dev, (int32_t)result);
@@ -344,13 +271,13 @@ vkr_device_destroy(struct vkr_context *ctx, struct vkr_device *dev)
struct vkr_queue_sync *sync, *sync_tmp;
LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &dev->free_syncs, head) {
- vkDestroyFence(dev->base.handle.device, sync->fence, NULL);
+ vk->DestroyFence(dev->base.handle.device, sync->fence, NULL);
free(sync);
}
mtx_destroy(&dev->free_sync_mutex);
- vkDestroyDevice(device, NULL);
+ vk->DestroyDevice(device, NULL);
list_del(&dev->base.track_head);
@@ -376,10 +303,13 @@ vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetDeviceGroupPeerMemoryFeatures *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetDeviceGroupPeerMemoryFeatures_args_handle(args);
- vkGetDeviceGroupPeerMemoryFeatures(args->device, args->heapIndex,
- args->localDeviceIndex, args->remoteDeviceIndex,
- args->pPeerMemoryFeatures);
+ vk->GetDeviceGroupPeerMemoryFeatures(args->device, args->heapIndex,
+ args->localDeviceIndex, args->remoteDeviceIndex,
+ args->pPeerMemoryFeatures);
}
static void
@@ -391,6 +321,20 @@ vkr_dispatch_vkDeviceWaitIdle(struct vn_dispatch_context *dispatch,
vkr_cs_decoder_set_fatal(&ctx->decoder);
}
+static void
+vkr_dispatch_vkGetCalibratedTimestampsEXT(
+ UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkGetCalibratedTimestampsEXT *args)
+{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
+ vn_replace_vkGetCalibratedTimestampsEXT_args_handle(args);
+ args->ret = vk->GetCalibratedTimestampsEXT(args->device, args->timestampCount,
+ args->pTimestampInfos, args->pTimestamps,
+ args->pMaxDeviation);
+}
+
void
vkr_context_init_device_dispatch(struct vkr_context *ctx)
{
@@ -402,4 +346,6 @@ vkr_context_init_device_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkGetDeviceGroupPeerMemoryFeatures =
vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures;
dispatch->dispatch_vkDeviceWaitIdle = vkr_dispatch_vkDeviceWaitIdle;
+ dispatch->dispatch_vkGetCalibratedTimestampsEXT =
+ vkr_dispatch_vkGetCalibratedTimestampsEXT;
}
diff --git a/src/venus/vkr_device.h b/src/venus/vkr_device.h
index 0b4eb3f7..c7a2a5ce 100644
--- a/src/venus/vkr_device.h
+++ b/src/venus/vkr_device.h
@@ -8,6 +8,8 @@
#include "vkr_common.h"
+#include "venus-protocol/vn_protocol_renderer_util.h"
+
#include "vkr_context.h"
struct vkr_device {
@@ -15,31 +17,7 @@ struct vkr_device {
struct vkr_physical_device *physical_device;
- /* Vulkan 1.2 */
- PFN_vkGetSemaphoreCounterValue GetSemaphoreCounterValue;
- PFN_vkWaitSemaphores WaitSemaphores;
- PFN_vkSignalSemaphore SignalSemaphore;
- PFN_vkGetDeviceMemoryOpaqueCaptureAddress GetDeviceMemoryOpaqueCaptureAddress;
- PFN_vkGetBufferOpaqueCaptureAddress GetBufferOpaqueCaptureAddress;
- PFN_vkGetBufferDeviceAddress GetBufferDeviceAddress;
- PFN_vkResetQueryPool ResetQueryPool;
- PFN_vkCreateRenderPass2 CreateRenderPass2;
- PFN_vkCmdBeginRenderPass2 CmdBeginRenderPass2;
- PFN_vkCmdNextSubpass2 CmdNextSubpass2;
- PFN_vkCmdEndRenderPass2 CmdEndRenderPass2;
- PFN_vkCmdDrawIndirectCount CmdDrawIndirectCount;
- PFN_vkCmdDrawIndexedIndirectCount CmdDrawIndexedIndirectCount;
-
- PFN_vkCmdBindTransformFeedbackBuffersEXT cmd_bind_transform_feedback_buffers;
- PFN_vkCmdBeginTransformFeedbackEXT cmd_begin_transform_feedback;
- PFN_vkCmdEndTransformFeedbackEXT cmd_end_transform_feedback;
- PFN_vkCmdBeginQueryIndexedEXT cmd_begin_query_indexed;
- PFN_vkCmdEndQueryIndexedEXT cmd_end_query_indexed;
- PFN_vkCmdDrawIndirectByteCountEXT cmd_draw_indirect_byte_count;
-
- PFN_vkGetImageDrmFormatModifierPropertiesEXT get_image_drm_format_modifier_properties;
-
- PFN_vkGetMemoryFdPropertiesKHR get_memory_fd_properties;
+ struct vn_device_proc_table proc_table;
struct list_head queues;
diff --git a/src/venus/vkr_device_memory.c b/src/venus/vkr_device_memory.c
index 516c55dc..a014b742 100644
--- a/src/venus/vkr_device_memory.c
+++ b/src/venus/vkr_device_memory.c
@@ -5,6 +5,8 @@
#include "vkr_device_memory.h"
+#include <gbm.h>
+
#include "venus-protocol/vn_protocol_renderer_transport.h"
#include "vkr_device_memory_gen.h"
@@ -37,100 +39,210 @@ vkr_get_fd_handle_type_from_virgl_fd_type(
return true;
}
-static void
-vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch,
- struct vn_command_vkAllocateMemory *args)
+static bool
+vkr_get_fd_info_from_resource_info(struct vkr_context *ctx,
+ struct vkr_physical_device *physical_dev,
+ const VkImportMemoryResourceInfoMESA *res_info,
+ VkImportMemoryFdInfoKHR *out)
{
- struct vkr_context *ctx = dispatch->data;
+ struct vkr_resource_attachment *att = NULL;
+ enum virgl_resource_fd_type fd_type;
+ int fd = -1;
+ VkExternalMemoryHandleTypeFlagBits handle_type;
- struct vkr_device *dev = vkr_device_from_handle(args->device);
+ att = vkr_context_get_resource(ctx, res_info->resourceId);
+ if (!att) {
+ vkr_log("failed to import resource: invalid res_id %u", res_info->resourceId);
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+ return false;
+ }
-#ifdef FORCE_ENABLE_DMABUF
- VkExportMemoryAllocateInfo local_export_info;
- if (dev->physical_device->EXT_external_memory_dma_buf) {
- VkExportMemoryAllocateInfo *export_info = vkr_find_pnext(
- args->pAllocateInfo->pNext, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
- if (export_info) {
- export_info->handleTypes |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- } else {
- local_export_info = (const VkExportMemoryAllocateInfo){
- .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
- .pNext = args->pAllocateInfo->pNext,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_export_info;
- }
+ fd_type = virgl_resource_export_fd(att->resource, &fd);
+ if (fd_type == VIRGL_RESOURCE_FD_INVALID)
+ return false;
+
+ if (!vkr_get_fd_handle_type_from_virgl_fd_type(physical_dev, fd_type, &handle_type)) {
+ close(fd);
+ return false;
}
-#endif
- /* translate VkImportMemoryResourceInfoMESA into VkImportMemoryFdInfoKHR */
- VkImportMemoryResourceInfoMESA *import_resource_info = NULL;
- VkImportMemoryFdInfoKHR import_fd_info = {
+ *out = (VkImportMemoryFdInfoKHR){
.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
- .fd = -1,
+ .pNext = res_info->pNext,
+ .fd = fd,
+ .handleType = handle_type,
};
- VkBaseInStructure *pprev = (VkBaseInStructure *)args->pAllocateInfo;
- while (pprev->pNext) {
- if (pprev->pNext->sType == VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA) {
- import_resource_info = (VkImportMemoryResourceInfoMESA *)pprev->pNext;
- import_fd_info.pNext = pprev->pNext->pNext;
- pprev->pNext = (const struct VkBaseInStructure *)&import_fd_info;
- break;
- }
- pprev = (VkBaseInStructure *)pprev->pNext;
- }
- if (import_resource_info) {
- uint32_t res_id = import_resource_info->resourceId;
- struct vkr_resource_attachment *att =
- util_hash_table_get(ctx->resource_table, uintptr_to_pointer(res_id));
- if (!att) {
- vkr_cs_decoder_set_fatal(&ctx->decoder);
- return;
- }
+ return true;
+}
- enum virgl_resource_fd_type fd_type =
- virgl_resource_export_fd(att->resource, &import_fd_info.fd);
- if (!vkr_get_fd_handle_type_from_virgl_fd_type(dev->physical_device, fd_type,
- &import_fd_info.handleType)) {
- close(import_fd_info.fd);
- args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE;
- return;
- }
+static VkResult
+vkr_get_fd_info_from_allocation_info(struct vkr_physical_device *physical_dev,
+ const VkMemoryAllocateInfo *alloc_info,
+ struct gbm_bo **out_gbm_bo,
+ VkImportMemoryFdInfoKHR *out_fd_info)
+{
+#ifdef MINIGBM
+ const uint32_t gbm_bo_use_flags =
+ GBM_BO_USE_LINEAR | GBM_BO_USE_SW_READ_RARELY | GBM_BO_USE_SW_WRITE_RARELY;
+#else
+ const uint32_t gbm_bo_use_flags = GBM_BO_USE_LINEAR;
+#endif
+
+ struct gbm_bo *gbm_bo;
+ int fd = -1;
+
+ assert(physical_dev->gbm_device);
+
+ /*
+ * Reject here for simplicity. Letting VkPhysicalDeviceVulkan11Properties return
+ * min(maxMemoryAllocationSize, UINT32_MAX) will affect unmappable scenarios.
+ */
+ if (alloc_info->allocationSize > UINT32_MAX)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ /* 4K alignment is used on all implementations we support. */
+ gbm_bo =
+ gbm_bo_create(physical_dev->gbm_device, align(alloc_info->allocationSize, 4096), 1,
+ GBM_FORMAT_R8, gbm_bo_use_flags);
+ if (!gbm_bo)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ /* gbm_bo_get_fd returns negative error code on failure */
+ fd = gbm_bo_get_fd(gbm_bo);
+ if (fd < 0) {
+ gbm_bo_destroy(gbm_bo);
+ return fd == -EMFILE ? VK_ERROR_TOO_MANY_OBJECTS : VK_ERROR_OUT_OF_HOST_MEMORY;
}
- const VkPhysicalDeviceMemoryProperties *mem_props =
- &dev->physical_device->memory_properties;
- const uint32_t mt_index = args->pAllocateInfo->memoryTypeIndex;
- const uint32_t property_flags = mem_props->memoryTypes[mt_index].propertyFlags;
+ *out_gbm_bo = gbm_bo;
+ *out_fd_info = (VkImportMemoryFdInfoKHR){
+ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
+ .pNext = alloc_info->pNext,
+ .fd = fd,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ };
+ return VK_SUCCESS;
+}
- /* get valid fd types */
+static void
+vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch,
+ struct vn_command_vkAllocateMemory *args)
+{
+ struct vkr_context *ctx = dispatch->data;
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vkr_physical_device *physical_dev = dev->physical_device;
+ VkBaseInStructure *prev_of_res_info = NULL;
+ VkImportMemoryResourceInfoMESA *res_info = NULL;
+ VkImportMemoryFdInfoKHR local_import_info = { .fd = -1 };
+ VkExportMemoryAllocateInfo *export_info = vkr_find_struct(
+ args->pAllocateInfo->pNext, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
+ const bool no_dma_buf_export =
+ !export_info ||
+ !(export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ struct vkr_device_memory *mem = NULL;
+ const uint32_t mem_type_index = args->pAllocateInfo->memoryTypeIndex;
+ const uint32_t property_flags =
+ physical_dev->memory_properties.memoryTypes[mem_type_index].propertyFlags;
uint32_t valid_fd_types = 0;
- const VkBaseInStructure *pnext = args->pAllocateInfo->pNext;
- while (pnext) {
- if (pnext->sType == VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO) {
- const VkExportMemoryAllocateInfo *export = (const void *)pnext;
+ struct gbm_bo *gbm_bo = NULL;
+
+ /* translate VkImportMemoryResourceInfoMESA into VkImportMemoryFdInfoKHR in place */
+ prev_of_res_info = vkr_find_prev_struct(
+ args->pAllocateInfo, VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA);
+ if (prev_of_res_info) {
+ res_info = (VkImportMemoryResourceInfoMESA *)prev_of_res_info->pNext;
+ if (!vkr_get_fd_info_from_resource_info(ctx, physical_dev, res_info,
+ &local_import_info)) {
+ args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE;
+ return;
+ }
- if (export->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
- valid_fd_types |= 1 << VIRGL_RESOURCE_FD_OPAQUE;
- if (export->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
- valid_fd_types |= 1 << VIRGL_RESOURCE_FD_DMABUF;
+ prev_of_res_info->pNext = (const struct VkBaseInStructure *)&local_import_info;
+ }
- break;
+ /* XXX Force dma_buf/opaque fd export or gbm bo import until a new extension that
+ * supports direct export from host visible memory
+ *
+ * Most VkImage and VkBuffer are non-external while most VkDeviceMemory are external
+ * if allocated with a host visible memory type. We still violate the spec by binding
+ * external memory to non-external image or buffer, which needs spec changes with a
+ * new extension.
+ *
+ * Skip forcing external if a valid VkImportMemoryResourceInfoMESA is provided, since
+ * the mapping will be directly set up from the existing virgl resource.
+ */
+ VkExportMemoryAllocateInfo local_export_info;
+ if ((property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) && !res_info) {
+ /* An implementation can support dma_buf import along with opaque fd export/import.
+ * If the client driver is using external memory and requesting dma_buf, without
+ * dma_buf fd export support, we must use gbm bo import path instead of forcing
+ * opaque fd export. e.g. the client driver uses external memory for wsi image.
+ */
+ if (dev->physical_device->is_dma_buf_fd_export_supported ||
+ (dev->physical_device->is_opaque_fd_export_supported && no_dma_buf_export)) {
+ VkExternalMemoryHandleTypeFlagBits handle_type =
+ dev->physical_device->is_dma_buf_fd_export_supported
+ ? VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
+ : VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ if (export_info) {
+ export_info->handleTypes |= handle_type;
+ } else {
+ local_export_info = (const VkExportMemoryAllocateInfo){
+ .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
+ .pNext = args->pAllocateInfo->pNext,
+ .handleTypes = handle_type,
+ };
+ export_info = &local_export_info;
+ ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_export_info;
+ }
+ } else if (dev->physical_device->EXT_external_memory_dma_buf) {
+ /* Allocate gbm bo to force dma_buf fd import. */
+ VkResult result;
+
+ if (export_info) {
+ /* Strip export info since valid_fd_types can only be dma_buf here. */
+ VkBaseInStructure *prev_of_export_info = vkr_find_prev_struct(
+ args->pAllocateInfo, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
+
+ prev_of_export_info->pNext = export_info->pNext;
+ export_info = NULL;
+ }
+
+ result = vkr_get_fd_info_from_allocation_info(physical_dev, args->pAllocateInfo,
+ &gbm_bo, &local_import_info);
+ if (result != VK_SUCCESS) {
+ args->ret = result;
+ return;
+ }
+
+ ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_import_info;
+
+ valid_fd_types = 1 << VIRGL_RESOURCE_FD_DMABUF;
}
- pnext = pnext->pNext;
}
- struct vkr_device_memory *mem = vkr_device_memory_create_and_add(ctx, args);
+ if (export_info) {
+ if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+ valid_fd_types |= 1 << VIRGL_RESOURCE_FD_OPAQUE;
+ if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
+ valid_fd_types |= 1 << VIRGL_RESOURCE_FD_DMABUF;
+ }
+
+ mem = vkr_device_memory_create_and_add(ctx, args);
if (!mem) {
- if (import_resource_info)
- close(import_fd_info.fd);
+ if (local_import_info.fd >= 0)
+ close(local_import_info.fd);
+ if (gbm_bo)
+ gbm_bo_destroy(gbm_bo);
return;
}
- mem->device = args->device;
+ mem->device = dev;
mem->property_flags = property_flags;
mem->valid_fd_types = valid_fd_types;
- list_inithead(&mem->exported_head);
+ mem->gbm_bo = gbm_bo;
+ mem->allocation_size = args->pAllocateInfo->allocationSize;
+ mem->memory_type_index = mem_type_index;
}
static void
@@ -141,8 +253,7 @@ vkr_dispatch_vkFreeMemory(struct vn_dispatch_context *dispatch,
if (!mem)
return;
- list_del(&mem->exported_head);
-
+ vkr_device_memory_release(mem);
vkr_device_memory_destroy_and_remove(dispatch->data, args);
}
@@ -151,8 +262,12 @@ vkr_dispatch_vkGetDeviceMemoryCommitment(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetDeviceMemoryCommitment *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetDeviceMemoryCommitment_args_handle(args);
- vkGetDeviceMemoryCommitment(args->device, args->memory, args->pCommittedMemoryInBytes);
+ vk->GetDeviceMemoryCommitment(args->device, args->memory,
+ args->pCommittedMemoryInBytes);
}
static void
@@ -161,9 +276,10 @@ vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress(
struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkGetDeviceMemoryOpaqueCaptureAddress_args_handle(args);
- args->ret = dev->GetDeviceMemoryOpaqueCaptureAddress(args->device, args->pInfo);
+ args->ret = vk->GetDeviceMemoryOpaqueCaptureAddress(args->device, args->pInfo);
}
static void
@@ -173,10 +289,11 @@ vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
{
struct vkr_context *ctx = dispatch->data;
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
- struct vkr_resource_attachment *att =
- util_hash_table_get(ctx->resource_table, uintptr_to_pointer(args->resourceId));
+ struct vkr_resource_attachment *att = vkr_context_get_resource(ctx, args->resourceId);
if (!att) {
+ vkr_log("failed to query resource props: invalid res_id %u", args->resourceId);
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
@@ -198,8 +315,7 @@ vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
.memoryTypeBits = 0,
};
vn_replace_vkGetMemoryResourcePropertiesMESA_args_handle(args);
- args->ret =
- dev->get_memory_fd_properties(args->device, handle_type, fd, &mem_fd_props);
+ args->ret = vk->GetMemoryFdPropertiesKHR(args->device, handle_type, fd, &mem_fd_props);
if (args->ret != VK_SUCCESS) {
close(fd);
return;
@@ -207,7 +323,7 @@ vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
args->pMemoryResourceProperties->memoryTypeBits = mem_fd_props.memoryTypeBits;
- VkMemoryResourceAllocationSizeProperties100000MESA *alloc_size_props = vkr_find_pnext(
+ VkMemoryResourceAllocationSizeProperties100000MESA *alloc_size_props = vkr_find_struct(
args->pMemoryResourceProperties->pNext,
VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA);
if (alloc_size_props)
@@ -235,3 +351,44 @@ vkr_context_init_device_memory_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkGetMemoryResourcePropertiesMESA =
vkr_dispatch_vkGetMemoryResourcePropertiesMESA;
}
+
+void
+vkr_device_memory_release(struct vkr_device_memory *mem)
+{
+ if (mem->gbm_bo)
+ gbm_bo_destroy(mem->gbm_bo);
+}
+
+int
+vkr_device_memory_export_fd(struct vkr_device_memory *mem,
+ VkExternalMemoryHandleTypeFlagBits handle_type,
+ int *out_fd)
+{
+ struct vn_device_proc_table *vk = &mem->device->proc_table;
+ int fd = -1;
+
+ if (mem->gbm_bo) {
+ /* mem->gbm_bo is a gbm bo backing non-external mappable memory */
+ assert((handle_type == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) &&
+ (mem->valid_fd_types == 1 << VIRGL_RESOURCE_FD_DMABUF));
+
+ /* gbm_bo_get_fd returns negative error code on failure */
+ fd = gbm_bo_get_fd(mem->gbm_bo);
+ if (fd < 0)
+ return fd;
+ } else {
+ VkDevice dev_handle = mem->device->base.handle.device;
+ VkDeviceMemory mem_handle = mem->base.handle.device_memory;
+ const VkMemoryGetFdInfoKHR fd_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
+ .memory = mem_handle,
+ .handleType = handle_type,
+ };
+ VkResult result = vk->GetMemoryFdKHR(dev_handle, &fd_info, &fd);
+ if (result != VK_SUCCESS)
+ return result == VK_ERROR_TOO_MANY_OBJECTS ? -EMFILE : -ENOMEM;
+ }
+
+ *out_fd = fd;
+ return 0;
+}
diff --git a/src/venus/vkr_device_memory.h b/src/venus/vkr_device_memory.h
index 7ae33de1..96ea294d 100644
--- a/src/venus/vkr_device_memory.h
+++ b/src/venus/vkr_device_memory.h
@@ -8,20 +8,34 @@
#include "vkr_common.h"
+struct gbm_bo;
+
struct vkr_device_memory {
struct vkr_object base;
- VkDevice device;
+ struct vkr_device *device;
uint32_t property_flags;
uint32_t valid_fd_types;
+ /* gbm bo backing non-external mappable memory */
+ struct gbm_bo *gbm_bo;
+
+ uint64_t allocation_size;
+ uint32_t memory_type_index;
+
bool exported;
- uint32_t exported_res_id;
- struct list_head exported_head;
};
VKR_DEFINE_OBJECT_CAST(device_memory, VK_OBJECT_TYPE_DEVICE_MEMORY, VkDeviceMemory)
void
vkr_context_init_device_memory_dispatch(struct vkr_context *ctx);
+void
+vkr_device_memory_release(struct vkr_device_memory *mem);
+
+int
+vkr_device_memory_export_fd(struct vkr_device_memory *mem,
+ VkExternalMemoryHandleTypeFlagBits handle_type,
+ int *out_fd);
+
#endif /* VKR_DEVICE_MEMORY_H */
diff --git a/src/venus/vkr_device_object.py b/src/venus/vkr_device_object.py
index c83de945..282c39e6 100644
--- a/src/venus/vkr_device_object.py
+++ b/src/venus/vkr_device_object.py
@@ -13,9 +13,12 @@ vkr_{create_func_name}_create_driver_handle(
struct vn_command_{create_cmd} *args,
struct vkr_{vkr_type} *obj)
{{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
/* handles in args are replaced */
vn_replace_{create_cmd}_args_handle(args);
- args->ret = {create_cmd}(args->device, args->{create_info}, NULL,
+ args->ret = vk->{proc_create}(args->device, args->{create_info}, NULL,
&obj->base.handle.{vkr_type});
return args->ret;
}}
@@ -31,9 +34,12 @@ VkResult vkr_{create_func_name}_create_driver_handles(
struct vn_command_{create_cmd} *args,
struct object_array *arr)
{{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
/* handles in args are replaced */
vn_replace_{create_cmd}_args_handle(args);
- args->ret = {create_cmd}(args->device, args->{create_info},
+ args->ret = vk->{proc_create}(args->device, args->{create_info},
arr->handle_storage);
return args->ret;
}}
@@ -47,9 +53,12 @@ vkr_{create_func_name}_create_driver_handles(
struct vn_command_{create_cmd} *args,
struct object_array *arr)
{{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
/* handles in args are replaced */
vn_replace_{create_cmd}_args_handle(args);
- args->ret = {create_cmd}(args->device, args->{create_cache},
+ args->ret = vk->{proc_create}(args->device, args->{create_cache},
args->{create_count}, args->{create_info}, NULL,
arr->handle_storage);
return args->ret;
@@ -63,9 +72,12 @@ vkr_{destroy_func_name}_destroy_driver_handle(
UNUSED struct vkr_context *ctx,
struct vn_command_{destroy_cmd} *args)
{{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
/* handles in args are replaced */
vn_replace_{destroy_cmd}_args_handle(args);
- {destroy_cmd}(args->device, args->{destroy_obj}, NULL);
+ vk->{proc_destroy}(args->device, args->{destroy_obj}, NULL);
}}
'''
@@ -79,6 +91,9 @@ vkr_{destroy_func_name}_destroy_driver_handles(
struct vn_command_{destroy_cmd} *args,
struct list_head *free_list)
{{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
list_inithead(free_list);
for (uint32_t i = 0; i < args->{destroy_count}; i++) {{
struct vkr_{vkr_type} *obj =
@@ -92,7 +107,7 @@ vkr_{destroy_func_name}_destroy_driver_handles(
/* handles in args are replaced */
vn_replace_{destroy_cmd}_args_handle(args);
- {destroy_cmd}(args->device, args->{destroy_pool},
+ vk->{proc_destroy}(args->device, args->{destroy_pool},
args->{destroy_count}, args->{destroy_objs});
}}
'''
@@ -155,7 +170,12 @@ vkr_{create_func_name}_create_array(
if (vkr_{create_func_name}_init_array(ctx, args, arr) != VK_SUCCESS)
return args->ret;
- if (vkr_{create_func_name}_create_driver_handles(ctx, args, arr) != VK_SUCCESS) {{
+ if (vkr_{create_func_name}_create_driver_handles(ctx, args, arr) < VK_SUCCESS) {{
+ /* In case the client expects a reply, clear all returned handles to
+ * VK_NULL_HANDLE.
+ */
+ memset(args->{create_objs}, 0,
+ args->{create_count} * sizeof(args->{create_objs}[0]));
object_array_fini(arr);
return args->ret;
}}
@@ -219,14 +239,22 @@ static inline void
vkr_{create_func_name}_add_array(
struct vkr_context *ctx,
struct vkr_device *dev,
- struct object_array *arr)
+ struct object_array *arr,
+ {vk_type} *args_{create_objs})
{{
for (uint32_t i = 0; i < arr->count; i++) {{
struct vkr_{vkr_type} *obj = arr->objects[i];
obj->base.handle.{vkr_type} = (({vk_type} *)arr->handle_storage)[i];
- vkr_device_add_object(ctx, dev, &obj->base);
+ /* Individual pipelines may fail creation. */
+ if (obj->base.handle.{vkr_type} == VK_NULL_HANDLE) {{
+ free(obj);
+ arr->objects[i] = NULL;
+ args_{create_objs}[i] = VK_NULL_HANDLE;
+ }} else {{
+ vkr_device_add_object(ctx, dev, &obj->base);
+ }}
}}
arr->objects_stolen = true;
@@ -428,6 +456,13 @@ def process_objects(json_objs):
json_obj.setdefault('create_func_name', json_obj['vkr_type'])
json_obj.setdefault('destroy_func_name', json_obj['vkr_type'])
json_obj.setdefault('variants', [])
+ json_obj['proc_create'] = json_obj.get('create_cmd')[2:]
+ json_obj['proc_destroy'] = json_obj.get('destroy_cmd')[2:]
+ for variant in json_obj.get('variants'):
+ if variant.get('create_cmd') != None:
+ variant['proc_create'] = variant.get('create_cmd')[2:]
+ if variant.get('destroy_cmd') != None:
+ variant['proc_destroy'] = variant.get('create_cmd')[2:]
def file_generator(json_file):
contents = file_header_generator(json_file)
diff --git a/src/venus/vkr_image.c b/src/venus/vkr_image.c
index 65fd1e2b..080b4425 100644
--- a/src/venus/vkr_image.c
+++ b/src/venus/vkr_image.c
@@ -12,33 +12,26 @@ static void
vkr_dispatch_vkCreateImage(struct vn_dispatch_context *dispatch,
struct vn_command_vkCreateImage *args)
{
- struct vkr_context *ctx = dispatch->data;
-
- struct vkr_device *dev = vkr_device_from_handle(args->device);
-
-#ifdef FORCE_ENABLE_DMABUF
- /* Do not chain VkExternalMemoryImageCreateInfo with optimal tiling, so that
- * guest Venus can pass memory requirement cts with dedicated allocation.
+ /* XXX If VkExternalMemoryImageCreateInfo is chained by the app, all is
+ * good. If it is not chained, we might still bind an external memory to
+ * the image, because vkr_dispatch_vkAllocateMemory makes any HOST_VISIBLE
+ * memory external. That is a spec violation.
+ *
+ * The discussions in vkr_dispatch_vkCreateBuffer are applicable to both
+ * buffers and images. Additionally, drivers usually use
+ * VkExternalMemoryImageCreateInfo to pick a well-defined image layout for
+ * interoperability with foreign queues. However, a well-defined layout
+ * might not exist for some images. When it does, it might still require a
+ * dedicated allocation or might have a degraded performance.
+ *
+ * On the other hand, binding an external memory to an image created
+ * without VkExternalMemoryImageCreateInfo usually works. Yes, it will
+ * explode if the external memory is accessed by foreign queues due to the
+ * lack of a well-defined image layout. But we never end up in that
+ * situation because the app does not consider the memory external.
*/
- VkExternalMemoryImageCreateInfo local_external_info;
- if (args->pCreateInfo->tiling != VK_IMAGE_TILING_OPTIMAL &&
- dev->physical_device->EXT_external_memory_dma_buf) {
- VkExternalMemoryImageCreateInfo *external_info = vkr_find_pnext(
- args->pCreateInfo->pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
- if (external_info) {
- external_info->handleTypes |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- } else {
- local_external_info = (const VkExternalMemoryImageCreateInfo){
- .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
- .pNext = args->pCreateInfo->pNext,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- ((VkImageCreateInfo *)args->pCreateInfo)->pNext = &local_external_info;
- }
- }
-#endif
-
- vkr_image_create_and_add(ctx, args);
+
+ vkr_image_create_and_add(dispatch->data, args);
}
static void
@@ -53,8 +46,11 @@ vkr_dispatch_vkGetImageMemoryRequirements(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetImageMemoryRequirements *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetImageMemoryRequirements_args_handle(args);
- vkGetImageMemoryRequirements(args->device, args->image, args->pMemoryRequirements);
+ vk->GetImageMemoryRequirements(args->device, args->image, args->pMemoryRequirements);
}
static void
@@ -62,8 +58,11 @@ vkr_dispatch_vkGetImageMemoryRequirements2(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetImageMemoryRequirements2 *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetImageMemoryRequirements2_args_handle(args);
- vkGetImageMemoryRequirements2(args->device, args->pInfo, args->pMemoryRequirements);
+ vk->GetImageMemoryRequirements2(args->device, args->pInfo, args->pMemoryRequirements);
}
static void
@@ -71,10 +70,13 @@ vkr_dispatch_vkGetImageSparseMemoryRequirements(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetImageSparseMemoryRequirements *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetImageSparseMemoryRequirements_args_handle(args);
- vkGetImageSparseMemoryRequirements(args->device, args->image,
- args->pSparseMemoryRequirementCount,
- args->pSparseMemoryRequirements);
+ vk->GetImageSparseMemoryRequirements(args->device, args->image,
+ args->pSparseMemoryRequirementCount,
+ args->pSparseMemoryRequirements);
}
static void
@@ -82,27 +84,36 @@ vkr_dispatch_vkGetImageSparseMemoryRequirements2(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetImageSparseMemoryRequirements2 *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetImageSparseMemoryRequirements2_args_handle(args);
- vkGetImageSparseMemoryRequirements2(args->device, args->pInfo,
- args->pSparseMemoryRequirementCount,
- args->pSparseMemoryRequirements);
+ vk->GetImageSparseMemoryRequirements2(args->device, args->pInfo,
+ args->pSparseMemoryRequirementCount,
+ args->pSparseMemoryRequirements);
}
static void
vkr_dispatch_vkBindImageMemory(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkBindImageMemory *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkBindImageMemory_args_handle(args);
args->ret =
- vkBindImageMemory(args->device, args->image, args->memory, args->memoryOffset);
+ vk->BindImageMemory(args->device, args->image, args->memory, args->memoryOffset);
}
static void
vkr_dispatch_vkBindImageMemory2(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkBindImageMemory2 *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkBindImageMemory2_args_handle(args);
- args->ret = vkBindImageMemory2(args->device, args->bindInfoCount, args->pBindInfos);
+ args->ret = vk->BindImageMemory2(args->device, args->bindInfoCount, args->pBindInfos);
}
static void
@@ -110,9 +121,12 @@ vkr_dispatch_vkGetImageSubresourceLayout(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetImageSubresourceLayout *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetImageSubresourceLayout_args_handle(args);
- vkGetImageSubresourceLayout(args->device, args->image, args->pSubresource,
- args->pLayout);
+ vk->GetImageSubresourceLayout(args->device, args->image, args->pSubresource,
+ args->pLayout);
}
static void
@@ -121,10 +135,11 @@ vkr_dispatch_vkGetImageDrmFormatModifierPropertiesEXT(
struct vn_command_vkGetImageDrmFormatModifierPropertiesEXT *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkGetImageDrmFormatModifierPropertiesEXT_args_handle(args);
- args->ret = dev->get_image_drm_format_modifier_properties(args->device, args->image,
- args->pProperties);
+ args->ret = vk->GetImageDrmFormatModifierPropertiesEXT(args->device, args->image,
+ args->pProperties);
}
static void
@@ -171,6 +186,33 @@ vkr_dispatch_vkDestroySamplerYcbcrConversion(
vkr_sampler_ycbcr_conversion_destroy_and_remove(dispatch->data, args);
}
+static void
+vkr_dispatch_vkGetDeviceImageMemoryRequirements(
+ UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkGetDeviceImageMemoryRequirements *args)
+{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
+ vn_replace_vkGetDeviceImageMemoryRequirements_args_handle(args);
+ vk->GetDeviceImageMemoryRequirements(args->device, args->pInfo,
+ args->pMemoryRequirements);
+}
+
+static void
+vkr_dispatch_vkGetDeviceImageSparseMemoryRequirements(
+ UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkGetDeviceImageSparseMemoryRequirements *args)
+{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
+ vn_replace_vkGetDeviceImageSparseMemoryRequirements_args_handle(args);
+ vk->GetDeviceImageSparseMemoryRequirements(args->device, args->pInfo,
+ args->pSparseMemoryRequirementCount,
+ args->pSparseMemoryRequirements);
+}
+
void
vkr_context_init_image_dispatch(struct vkr_context *ctx)
{
@@ -193,6 +235,10 @@ vkr_context_init_image_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkGetImageDrmFormatModifierPropertiesEXT =
vkr_dispatch_vkGetImageDrmFormatModifierPropertiesEXT;
+ dispatch->dispatch_vkGetDeviceImageMemoryRequirements =
+ vkr_dispatch_vkGetDeviceImageMemoryRequirements;
+ dispatch->dispatch_vkGetDeviceImageSparseMemoryRequirements =
+ vkr_dispatch_vkGetDeviceImageSparseMemoryRequirements;
}
void
diff --git a/src/venus/vkr_instance.c b/src/venus/vkr_instance.c
index 363ef9af..76acf6be 100644
--- a/src/venus/vkr_instance.c
+++ b/src/venus/vkr_instance.c
@@ -5,7 +5,6 @@
#include "vkr_instance.h"
-#include "venus-protocol/vn_protocol_renderer_info.h"
#include "venus-protocol/vn_protocol_renderer_instance.h"
#include "vkr_context.h"
@@ -16,7 +15,13 @@ vkr_dispatch_vkEnumerateInstanceVersion(UNUSED struct vn_dispatch_context *dispa
struct vn_command_vkEnumerateInstanceVersion *args)
{
vn_replace_vkEnumerateInstanceVersion_args_handle(args);
- args->ret = vkEnumerateInstanceVersion(args->pApiVersion);
+
+ uint32_t version = 0;
+ args->ret = vkEnumerateInstanceVersion(&version);
+ if (args->ret == VK_SUCCESS)
+ version = vkr_api_version_cap_minor(version, VKR_MAX_API_VERSION);
+
+ *args->pApiVersion = version;
}
static void
@@ -41,7 +46,7 @@ vkr_dispatch_vkEnumerateInstanceExtensionProperties(
for (uint32_t i = 0; i < ARRAY_SIZE(private_extensions); i++) {
VkExtensionProperties *props = &private_extensions[i];
- props->specVersion = vn_info_extension_spec_version(props->extensionName);
+ props->specVersion = vkr_extension_get_spec_version(props->extensionName);
}
const uint32_t count = MIN2(*args->pPropertyCount, ARRAY_SIZE(private_extensions));
@@ -196,11 +201,6 @@ vkr_dispatch_vkCreateInstance(struct vn_dispatch_context *dispatch,
return;
}
- instance->get_memory_fd = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
- instance->base.handle.instance, "vkGetMemoryFdKHR");
- instance->get_fence_fd = (PFN_vkGetFenceFdKHR)vkGetInstanceProcAddr(
- instance->base.handle.instance, "vkGetFenceFdKHR");
-
if (ctx->validate_level != VKR_CONTEXT_VALIDATE_NONE) {
instance->create_debug_utils_messenger =
(PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
diff --git a/src/venus/vkr_instance.h b/src/venus/vkr_instance.h
index 38e30342..180b9211 100644
--- a/src/venus/vkr_instance.h
+++ b/src/venus/vkr_instance.h
@@ -14,9 +14,6 @@ struct vkr_instance {
uint32_t api_version;
PFN_vkCreateDebugUtilsMessengerEXT create_debug_utils_messenger;
PFN_vkDestroyDebugUtilsMessengerEXT destroy_debug_utils_messenger;
- PFN_vkGetMemoryFdKHR get_memory_fd;
- PFN_vkGetFenceFdKHR get_fence_fd;
-
VkDebugUtilsMessengerEXT validation_messenger;
uint32_t physical_device_count;
diff --git a/src/venus/vkr_physical_device.c b/src/venus/vkr_physical_device.c
index 259f09e9..686fec06 100644
--- a/src/venus/vkr_physical_device.c
+++ b/src/venus/vkr_physical_device.c
@@ -6,12 +6,36 @@
#include "vkr_physical_device.h"
#include "venus-protocol/vn_protocol_renderer_device.h"
-#include "venus-protocol/vn_protocol_renderer_info.h"
+#include "vrend_winsys_gbm.h"
#include "vkr_context.h"
#include "vkr_device.h"
#include "vkr_instance.h"
+/* TODO open render node and create gbm_device per vkr_physical_device */
+static struct gbm_device *vkr_gbm_dev;
+
+static void
+vkr_gbm_device_init_once(void)
+{
+ struct virgl_gbm *vkr_gbm = virgl_gbm_init(-1);
+ if (!vkr_gbm) {
+ vkr_log("virgl_gbm_init failed");
+ exit(-1);
+ }
+
+ vkr_gbm_dev = vkr_gbm->device;
+}
+
+static struct gbm_device *
+vkr_physical_device_get_gbm_device(UNUSED struct vkr_physical_device *physical_dev)
+{
+ static once_flag gbm_once_flag = ONCE_FLAG_INIT;
+ call_once(&gbm_once_flag, vkr_gbm_device_init_once);
+
+ return vkr_gbm_dev;
+}
+
void
vkr_physical_device_destroy(struct vkr_context *ctx,
struct vkr_physical_device *physical_dev)
@@ -72,10 +96,82 @@ vkr_instance_lookup_physical_device(struct vkr_instance *instance,
}
static void
+vkr_physical_device_init_id_properties(struct vkr_physical_device *physical_dev)
+{
+ VkPhysicalDevice handle = physical_dev->base.handle.physical_device;
+ physical_dev->id_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
+ VkPhysicalDeviceProperties2 props2 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
+ .pNext = &physical_dev->id_properties
+ };
+ vkGetPhysicalDeviceProperties2(handle, &props2);
+}
+
+static void
vkr_physical_device_init_memory_properties(struct vkr_physical_device *physical_dev)
{
VkPhysicalDevice handle = physical_dev->base.handle.physical_device;
vkGetPhysicalDeviceMemoryProperties(handle, &physical_dev->memory_properties);
+
+ /* XXX When a VkMemoryType has VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, we
+ * assume any VkDeviceMemory with the memory type can be made external and
+ * be exportable. That is incorrect but is what we have to live with with
+ * the existing external memory extensions.
+ *
+ * The main reason is that the external memory extensions require us to use
+ * vkGetPhysicalDeviceExternalBufferProperties or
+ * vkGetPhysicalDeviceImageFormatProperties2 to determine if we can
+ * allocate an exportable external VkDeviceMemory. But we normally do not
+ * have the info to make the queries during vkAllocateMemory.
+ *
+ * We only have VkMemoryAllocateInfo during vkAllocateMemory. The only
+ * useful info in the struct is the memory type. What we need is thus an
+ * extension that tells us that, given a memory type, if all VkDeviceMemory
+ * with the memory type is exportable. If we had the extension, we could
+ * filter out VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT here if a memory type is
+ * not always exportable.
+ */
+
+ /* XXX is_dma_buf_fd_export_supported and is_opaque_fd_export_supported
+ * needs to be filled with a new extension which supports query fd export
+ * against the raw memory types. Currently, we workaround by checking
+ * external buffer properties before force-enabling either dma_buf or opaque
+ * fd path of device memory allocation.
+ */
+ physical_dev->is_dma_buf_fd_export_supported = false;
+ physical_dev->is_opaque_fd_export_supported = false;
+
+ VkPhysicalDeviceExternalBufferInfo info = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,
+ .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ };
+ VkExternalBufferProperties props = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,
+ };
+
+ if (physical_dev->EXT_external_memory_dma_buf) {
+ info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ vkGetPhysicalDeviceExternalBufferProperties(handle, &info, &props);
+ physical_dev->is_dma_buf_fd_export_supported =
+ (props.externalMemoryProperties.externalMemoryFeatures &
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT) &&
+ (props.externalMemoryProperties.exportFromImportedHandleTypes &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ }
+
+ if (physical_dev->KHR_external_memory_fd) {
+ info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
+ vkGetPhysicalDeviceExternalBufferProperties(handle, &info, &props);
+ physical_dev->is_opaque_fd_export_supported =
+ (props.externalMemoryProperties.externalMemoryFeatures &
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT) &&
+ (props.externalMemoryProperties.exportFromImportedHandleTypes &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
+ }
+
+ if (!physical_dev->is_dma_buf_fd_export_supported &&
+ !physical_dev->is_opaque_fd_export_supported)
+ physical_dev->gbm_device = vkr_physical_device_get_gbm_device(physical_dev);
}
static void
@@ -111,7 +207,7 @@ vkr_physical_device_init_extensions(struct vkr_physical_device *physical_dev,
else if (!strcmp(props->extensionName, "VK_KHR_external_fence_fd"))
physical_dev->KHR_external_fence_fd = true;
- const uint32_t spec_ver = vn_info_extension_spec_version(props->extensionName);
+ const uint32_t spec_ver = vkr_extension_get_spec_version(props->extensionName);
if (spec_ver) {
if (props->specVersion > spec_ver)
props->specVersion = spec_ver;
@@ -147,9 +243,15 @@ vkr_physical_device_init_properties(struct vkr_physical_device *physical_dev)
vkGetPhysicalDeviceProperties(handle, &physical_dev->properties);
VkPhysicalDeviceProperties *props = &physical_dev->properties;
- props->driverVersion = 0;
+ props->apiVersion = vkr_api_version_cap_minor(props->apiVersion, VKR_MAX_API_VERSION);
+}
- /* TODO lie about props->pipelineCacheUUID and patch cache header */
+static void
+vkr_physical_device_init_proc_table(struct vkr_physical_device *physical_dev,
+ struct vkr_instance *instance)
+{
+ vn_util_init_physical_device_proc_table(instance->base.handle.instance,
+ &physical_dev->proc_table);
}
static void
@@ -209,11 +311,13 @@ vkr_dispatch_vkEnumeratePhysicalDevices(struct vn_dispatch_context *dispatch,
physical_dev->base.handle.physical_device = instance->physical_device_handles[i];
+ vkr_physical_device_init_proc_table(physical_dev, instance);
vkr_physical_device_init_properties(physical_dev);
physical_dev->api_version =
MIN2(physical_dev->properties.apiVersion, instance->api_version);
vkr_physical_device_init_extensions(physical_dev, instance);
vkr_physical_device_init_memory_properties(physical_dev);
+ vkr_physical_device_init_id_properties(physical_dev);
list_inithead(&physical_dev->devices);
@@ -254,11 +358,16 @@ vkr_dispatch_vkEnumeratePhysicalDeviceGroups(
VkPhysicalDeviceGroupProperties *orig_props = args->pPhysicalDeviceGroupProperties;
if (orig_props) {
args->pPhysicalDeviceGroupProperties =
- malloc(sizeof(*orig_props) * *args->pPhysicalDeviceGroupCount);
+ calloc(*args->pPhysicalDeviceGroupCount, sizeof(*orig_props));
if (!args->pPhysicalDeviceGroupProperties) {
args->ret = VK_ERROR_OUT_OF_HOST_MEMORY;
return;
}
+
+ for (uint32_t i = 0; i < *args->pPhysicalDeviceGroupCount; i++) {
+ args->pPhysicalDeviceGroupProperties[i].sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES;
+ }
}
vn_replace_vkEnumeratePhysicalDeviceGroups_args_handle(args);
@@ -361,9 +470,9 @@ vkr_dispatch_vkGetPhysicalDeviceMemoryProperties(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetPhysicalDeviceMemoryProperties *args)
{
- /* TODO lie about this */
- vn_replace_vkGetPhysicalDeviceMemoryProperties_args_handle(args);
- vkGetPhysicalDeviceMemoryProperties(args->physicalDevice, args->pMemoryProperties);
+ struct vkr_physical_device *physical_dev =
+ vkr_physical_device_from_handle(args->physicalDevice);
+ *args->pMemoryProperties = physical_dev->memory_properties;
}
static void
@@ -412,59 +521,8 @@ vkr_dispatch_vkGetPhysicalDeviceProperties2(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetPhysicalDeviceProperties2 *args)
{
- struct vkr_physical_device *physical_dev =
- vkr_physical_device_from_handle(args->physicalDevice);
-
vn_replace_vkGetPhysicalDeviceProperties2_args_handle(args);
vkGetPhysicalDeviceProperties2(args->physicalDevice, args->pProperties);
-
- union {
- VkBaseOutStructure *pnext;
- VkPhysicalDeviceProperties2 *props;
- VkPhysicalDeviceVulkan11Properties *vk11;
- VkPhysicalDeviceVulkan12Properties *vk12;
- VkPhysicalDeviceIDProperties *id;
- VkPhysicalDeviceDriverProperties *driver;
- } u;
-
- u.pnext = (VkBaseOutStructure *)args->pProperties;
- while (u.pnext) {
- switch (u.pnext->sType) {
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
- u.props->properties = physical_dev->properties;
- break;
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
- memset(u.vk11->deviceUUID, 0, sizeof(u.vk11->deviceUUID));
- memset(u.vk11->driverUUID, 0, sizeof(u.vk11->driverUUID));
- memset(u.vk11->deviceLUID, 0, sizeof(u.vk11->deviceLUID));
- u.vk11->deviceNodeMask = 0;
- u.vk11->deviceLUIDValid = false;
- break;
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
- u.vk12->driverID = 0;
- memset(u.vk12->driverName, 0, sizeof(u.vk12->driverName));
- memset(u.vk12->driverInfo, 0, sizeof(u.vk12->driverInfo));
- memset(&u.vk12->conformanceVersion, 0, sizeof(u.vk12->conformanceVersion));
- break;
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
- memset(u.id->deviceUUID, 0, sizeof(u.id->deviceUUID));
- memset(u.id->driverUUID, 0, sizeof(u.id->driverUUID));
- memset(u.id->deviceLUID, 0, sizeof(u.id->deviceLUID));
- u.id->deviceNodeMask = 0;
- u.id->deviceLUIDValid = false;
- break;
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
- u.driver->driverID = 0;
- memset(u.driver->driverName, 0, sizeof(u.driver->driverName));
- memset(u.driver->driverInfo, 0, sizeof(u.driver->driverInfo));
- memset(&u.driver->conformanceVersion, 0, sizeof(u.driver->conformanceVersion));
- break;
- default:
- break;
- }
-
- u.pnext = u.pnext->pNext;
- }
}
static void
@@ -483,9 +541,9 @@ vkr_dispatch_vkGetPhysicalDeviceMemoryProperties2(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetPhysicalDeviceMemoryProperties2 *args)
{
- /* TODO lie about this */
- vn_replace_vkGetPhysicalDeviceMemoryProperties2_args_handle(args);
- vkGetPhysicalDeviceMemoryProperties2(args->physicalDevice, args->pMemoryProperties);
+ struct vkr_physical_device *physical_dev =
+ vkr_physical_device_from_handle(args->physicalDevice);
+ args->pMemoryProperties->memoryProperties = physical_dev->memory_properties;
}
static void
@@ -549,6 +607,20 @@ vkr_dispatch_vkGetPhysicalDeviceExternalFenceProperties(
args->physicalDevice, args->pExternalFenceInfo, args->pExternalFenceProperties);
}
+static void
+vkr_dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(
+ UNUSED struct vn_dispatch_context *ctx,
+ struct vn_command_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT *args)
+{
+ struct vkr_physical_device *physical_dev =
+ vkr_physical_device_from_handle(args->physicalDevice);
+ struct vn_physical_device_proc_table *vk = &physical_dev->proc_table;
+
+ vn_replace_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_args_handle(args);
+ args->ret = vk->GetPhysicalDeviceCalibrateableTimeDomainsEXT(
+ args->physicalDevice, args->pTimeDomainCount, args->pTimeDomains);
+}
+
void
vkr_context_init_physical_device_dispatch(struct vkr_context *ctx)
{
@@ -598,4 +670,6 @@ vkr_context_init_physical_device_dispatch(struct vkr_context *ctx)
vkr_dispatch_vkGetPhysicalDeviceExternalSemaphoreProperties;
dispatch->dispatch_vkGetPhysicalDeviceExternalFenceProperties =
vkr_dispatch_vkGetPhysicalDeviceExternalFenceProperties;
+ dispatch->dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT =
+ vkr_dispatch_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;
}
diff --git a/src/venus/vkr_physical_device.h b/src/venus/vkr_physical_device.h
index 0e84b5fe..03c2aceb 100644
--- a/src/venus/vkr_physical_device.h
+++ b/src/venus/vkr_physical_device.h
@@ -8,9 +8,15 @@
#include "vkr_common.h"
+#include "venus-protocol/vn_protocol_renderer_util.h"
+
+struct gbm_device;
+
struct vkr_physical_device {
struct vkr_object base;
+ struct vn_physical_device_proc_table proc_table;
+
VkPhysicalDeviceProperties properties;
uint32_t api_version;
@@ -21,8 +27,13 @@ struct vkr_physical_device {
bool EXT_external_memory_dma_buf;
bool KHR_external_fence_fd;
+ bool KHR_external_semaphore_fd;
VkPhysicalDeviceMemoryProperties memory_properties;
+ VkPhysicalDeviceIDProperties id_properties;
+ bool is_dma_buf_fd_export_supported;
+ bool is_opaque_fd_export_supported;
+ struct gbm_device *gbm_device;
struct list_head devices;
};
diff --git a/src/venus/vkr_pipeline.c b/src/venus/vkr_pipeline.c
index d30756ea..967d2370 100644
--- a/src/venus/vkr_pipeline.c
+++ b/src/venus/vkr_pipeline.c
@@ -53,18 +53,24 @@ static void
vkr_dispatch_vkGetPipelineCacheData(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetPipelineCacheData *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetPipelineCacheData_args_handle(args);
- args->ret = vkGetPipelineCacheData(args->device, args->pipelineCache, args->pDataSize,
- args->pData);
+ args->ret = vk->GetPipelineCacheData(args->device, args->pipelineCache,
+ args->pDataSize, args->pData);
}
static void
vkr_dispatch_vkMergePipelineCaches(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkMergePipelineCaches *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkMergePipelineCaches_args_handle(args);
- args->ret = vkMergePipelineCaches(args->device, args->dstCache, args->srcCacheCount,
- args->pSrcCaches);
+ args->ret = vk->MergePipelineCaches(args->device, args->dstCache, args->srcCacheCount,
+ args->pSrcCaches);
}
static void
@@ -75,10 +81,10 @@ vkr_dispatch_vkCreateGraphicsPipelines(struct vn_dispatch_context *dispatch,
struct vkr_device *dev = vkr_device_from_handle(args->device);
struct object_array arr;
- if (vkr_graphics_pipeline_create_array(ctx, args, &arr) != VK_SUCCESS)
+ if (vkr_graphics_pipeline_create_array(ctx, args, &arr) < VK_SUCCESS)
return;
- vkr_pipeline_add_array(ctx, dev, &arr);
+ vkr_pipeline_add_array(ctx, dev, &arr, args->pPipelines);
}
static void
@@ -89,10 +95,10 @@ vkr_dispatch_vkCreateComputePipelines(struct vn_dispatch_context *dispatch,
struct vkr_device *dev = vkr_device_from_handle(args->device);
struct object_array arr;
- if (vkr_compute_pipeline_create_array(ctx, args, &arr) != VK_SUCCESS)
+ if (vkr_compute_pipeline_create_array(ctx, args, &arr) < VK_SUCCESS)
return;
- vkr_pipeline_add_array(ctx, dev, &arr);
+ vkr_pipeline_add_array(ctx, dev, &arr, args->pPipelines);
}
static void
diff --git a/src/venus/vkr_query_pool.c b/src/venus/vkr_query_pool.c
index ed16d17c..eb77fe4d 100644
--- a/src/venus/vkr_query_pool.c
+++ b/src/venus/vkr_query_pool.c
@@ -25,10 +25,13 @@ static void
vkr_dispatch_vkGetQueryPoolResults(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetQueryPoolResults *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetQueryPoolResults_args_handle(args);
- args->ret = vkGetQueryPoolResults(args->device, args->queryPool, args->firstQuery,
- args->queryCount, args->dataSize, args->pData,
- args->stride, args->flags);
+ args->ret = vk->GetQueryPoolResults(args->device, args->queryPool, args->firstQuery,
+ args->queryCount, args->dataSize, args->pData,
+ args->stride, args->flags);
}
static void
@@ -36,9 +39,10 @@ vkr_dispatch_vkResetQueryPool(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkResetQueryPool *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkResetQueryPool_args_handle(args);
- dev->ResetQueryPool(args->device, args->queryPool, args->firstQuery, args->queryCount);
+ vk->ResetQueryPool(args->device, args->queryPool, args->firstQuery, args->queryCount);
}
void
diff --git a/src/venus/vkr_queue.c b/src/venus/vkr_queue.c
index 3298e957..3b38d8ce 100644
--- a/src/venus/vkr_queue.c
+++ b/src/venus/vkr_queue.c
@@ -7,15 +7,17 @@
#include "venus-protocol/vn_protocol_renderer_queue.h"
+#include "vkr_context.h"
#include "vkr_physical_device.h"
#include "vkr_queue_gen.h"
struct vkr_queue_sync *
vkr_device_alloc_queue_sync(struct vkr_device *dev,
uint32_t fence_flags,
- uint64_t queue_id,
- void *fence_cookie)
+ uint32_t ring_idx,
+ uint64_t fence_id)
{
+ struct vn_device_proc_table *vk = &dev->proc_table;
struct vkr_queue_sync *sync;
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
@@ -38,7 +40,7 @@ vkr_device_alloc_queue_sync(struct vkr_device *dev,
.pNext = dev->physical_device->KHR_external_fence_fd ? &export_info : NULL,
};
VkResult result =
- vkCreateFence(dev->base.handle.device, &create_info, NULL, &sync->fence);
+ vk->CreateFence(dev->base.handle.device, &create_info, NULL, &sync->fence);
if (result != VK_SUCCESS) {
free(sync);
return NULL;
@@ -50,12 +52,13 @@ vkr_device_alloc_queue_sync(struct vkr_device *dev,
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
mtx_unlock(&dev->free_sync_mutex);
- vkResetFences(dev->base.handle.device, 1, &sync->fence);
+ vk->ResetFences(dev->base.handle.device, 1, &sync->fence);
}
+ sync->device_lost = false;
sync->flags = fence_flags;
- sync->queue_id = queue_id;
- sync->fence_cookie = fence_cookie;
+ sync->ring_idx = ring_idx;
+ sync->fence_id = fence_id;
return sync;
}
@@ -78,6 +81,7 @@ vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
bool *queue_empty)
{
struct vkr_device *dev = queue->device;
+ struct vn_device_proc_table *vk = &dev->proc_table;
struct vkr_queue_sync *sync, *tmp;
assert(!(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB));
@@ -101,9 +105,11 @@ vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
mtx_unlock(&queue->mutex);
} else {
LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->pending_syncs, head) {
- VkResult result = vkGetFenceStatus(dev->base.handle.device, sync->fence);
- if (result == VK_NOT_READY)
- break;
+ if (!sync->device_lost) {
+ VkResult result = vk->GetFenceStatus(dev->base.handle.device, sync->fence);
+ if (result == VK_NOT_READY)
+ break;
+ }
bool is_last_sync = sync->head.next == &queue->pending_syncs;
@@ -123,11 +129,13 @@ vkr_queue_sync_retire(struct vkr_context *ctx,
struct vkr_device *dev,
struct vkr_queue_sync *sync)
{
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
- ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
+ ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
vkr_device_free_queue_sync(dev, sync);
} else {
- vkDestroyFence(dev->base.handle.device, sync->fence, NULL);
+ vk->DestroyFence(dev->base.handle.device, sync->fence, NULL);
sync->fence = VK_NULL_HANDLE;
/* move to the ctx to be retired and freed at the next retire_fences */
@@ -170,6 +178,9 @@ vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
list_del(&queue->busy_head);
list_del(&queue->base.track_head);
+ if (queue->ring_idx > 0)
+ ctx->sync_queues[queue->ring_idx] = NULL;
+
if (queue->base.id)
vkr_context_remove_object(ctx, &queue->base);
else
@@ -182,11 +193,12 @@ vkr_queue_thread(void *arg)
struct vkr_queue *queue = arg;
struct vkr_context *ctx = queue->context;
struct vkr_device *dev = queue->device;
+ struct vn_device_proc_table *vk = &dev->proc_table;
const uint64_t ns_per_sec = 1000000000llu;
char thread_name[16];
snprintf(thread_name, ARRAY_SIZE(thread_name), "vkr-queue-%d", ctx->base.ctx_id);
- pipe_thread_setname(thread_name);
+ u_thread_setname(thread_name);
mtx_lock(&queue->mutex);
while (true) {
@@ -201,8 +213,13 @@ vkr_queue_thread(void *arg)
mtx_unlock(&queue->mutex);
- VkResult result =
- vkWaitForFences(dev->base.handle.device, 1, &sync->fence, false, ns_per_sec * 3);
+ VkResult result;
+ if (sync->device_lost) {
+ result = VK_ERROR_DEVICE_LOST;
+ } else {
+ result = vk->WaitForFences(dev->base.handle.device, 1, &sync->fence, true,
+ ns_per_sec * 3);
+ }
mtx_lock(&queue->mutex);
@@ -212,7 +229,7 @@ vkr_queue_thread(void *arg)
list_del(&sync->head);
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
- ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
+ ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
vkr_device_free_queue_sync(queue->device, sync);
} else {
list_addtail(&sync->head, &queue->signaled_syncs);
@@ -350,6 +367,26 @@ vkr_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context *dispatch,
return;
}
+ const VkDeviceQueueTimelineInfoMESA *timeline_info = vkr_find_struct(
+ args->pQueueInfo->pNext, VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA);
+ if (timeline_info) {
+ if (timeline_info->ringIdx == 0 ||
+ timeline_info->ringIdx >= ARRAY_SIZE(ctx->sync_queues)) {
+ vkr_log("invalid ring_idx %d", timeline_info->ringIdx);
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+ return;
+ }
+
+ if (ctx->sync_queues[timeline_info->ringIdx]) {
+ vkr_log("sync_queue %d already bound", timeline_info->ringIdx);
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+ return;
+ }
+
+ queue->ring_idx = timeline_info->ringIdx;
+ ctx->sync_queues[timeline_info->ringIdx] = queue;
+ }
+
const vkr_object_id id =
vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
vkr_queue_assign_object_id(ctx, queue, id);
@@ -359,17 +396,24 @@ static void
vkr_dispatch_vkQueueSubmit(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkQueueSubmit *args)
{
+ struct vkr_queue *queue = vkr_queue_from_handle(args->queue);
+ struct vn_device_proc_table *vk = &queue->device->proc_table;
+
vn_replace_vkQueueSubmit_args_handle(args);
- args->ret = vkQueueSubmit(args->queue, args->submitCount, args->pSubmits, args->fence);
+ args->ret =
+ vk->QueueSubmit(args->queue, args->submitCount, args->pSubmits, args->fence);
}
static void
vkr_dispatch_vkQueueBindSparse(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkQueueBindSparse *args)
{
+ struct vkr_queue *queue = vkr_queue_from_handle(args->queue);
+ struct vn_device_proc_table *vk = &queue->device->proc_table;
+
vn_replace_vkQueueBindSparse_args_handle(args);
args->ret =
- vkQueueBindSparse(args->queue, args->bindInfoCount, args->pBindInfo, args->fence);
+ vk->QueueBindSparse(args->queue, args->bindInfoCount, args->pBindInfo, args->fence);
}
static void
@@ -382,6 +426,18 @@ vkr_dispatch_vkQueueWaitIdle(struct vn_dispatch_context *dispatch,
}
static void
+vkr_dispatch_vkQueueSubmit2(UNUSED struct vn_dispatch_context *dispatch,
+ struct vn_command_vkQueueSubmit2 *args)
+{
+ struct vkr_queue *queue = vkr_queue_from_handle(args->queue);
+ struct vn_device_proc_table *vk = &queue->device->proc_table;
+
+ vn_replace_vkQueueSubmit2_args_handle(args);
+ args->ret =
+ vk->QueueSubmit2(args->queue, args->submitCount, args->pSubmits, args->fence);
+}
+
+static void
vkr_dispatch_vkCreateFence(struct vn_dispatch_context *dispatch,
struct vn_command_vkCreateFence *args)
{
@@ -399,16 +455,22 @@ static void
vkr_dispatch_vkResetFences(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkResetFences *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkResetFences_args_handle(args);
- args->ret = vkResetFences(args->device, args->fenceCount, args->pFences);
+ args->ret = vk->ResetFences(args->device, args->fenceCount, args->pFences);
}
static void
vkr_dispatch_vkGetFenceStatus(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetFenceStatus *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetFenceStatus_args_handle(args);
- args->ret = vkGetFenceStatus(args->device, args->fence);
+ args->ret = vk->GetFenceStatus(args->device, args->fence);
}
static void
@@ -416,20 +478,42 @@ vkr_dispatch_vkWaitForFences(struct vn_dispatch_context *dispatch,
struct vn_command_vkWaitForFences *args)
{
struct vkr_context *ctx = dispatch->data;
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
+ vn_replace_vkWaitForFences_args_handle(args);
+ args->ret = vk->WaitForFences(args->device, args->fenceCount, args->pFences,
+ args->waitAll, args->timeout);
- /* Being single-threaded, we cannot afford potential blocking calls. It
- * also leads to GPU lost when the wait never returns and can only be
- * unblocked by a following command (e.g., vkCmdWaitEvents that is
- * unblocked by a following vkSetEvent).
- */
- if (args->timeout) {
+ if (args->ret == VK_ERROR_DEVICE_LOST)
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+}
+
+static void
+vkr_dispatch_vkResetFenceResource100000MESA(
+ struct vn_dispatch_context *dispatch,
+ struct vn_command_vkResetFenceResource100000MESA *args)
+{
+ struct vkr_context *ctx = dispatch->data;
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+ int fd = -1;
+
+ vn_replace_vkResetFenceResource100000MESA_args_handle(args);
+
+ const VkFenceGetFdInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR,
+ .fence = args->fence,
+ .handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
+ };
+ VkResult result = vk->GetFenceFdKHR(args->device, &info, &fd);
+ if (result != VK_SUCCESS) {
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
- vn_replace_vkWaitForFences_args_handle(args);
- args->ret = vkWaitForFences(args->device, args->fenceCount, args->pFences,
- args->waitAll, args->timeout);
+ if (fd >= 0)
+ close(fd);
}
static void
@@ -451,9 +535,10 @@ vkr_dispatch_vkGetSemaphoreCounterValue(UNUSED struct vn_dispatch_context *dispa
struct vn_command_vkGetSemaphoreCounterValue *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkGetSemaphoreCounterValue_args_handle(args);
- args->ret = dev->GetSemaphoreCounterValue(args->device, args->semaphore, args->pValue);
+ args->ret = vk->GetSemaphoreCounterValue(args->device, args->semaphore, args->pValue);
}
static void
@@ -462,15 +547,13 @@ vkr_dispatch_vkWaitSemaphores(struct vn_dispatch_context *dispatch,
{
struct vkr_context *ctx = dispatch->data;
struct vkr_device *dev = vkr_device_from_handle(args->device);
-
- /* no blocking call */
- if (args->timeout) {
- vkr_cs_decoder_set_fatal(&ctx->decoder);
- return;
- }
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkWaitSemaphores_args_handle(args);
- args->ret = dev->WaitSemaphores(args->device, args->pWaitInfo, args->timeout);
+ args->ret = vk->WaitSemaphores(args->device, args->pWaitInfo, args->timeout);
+
+ if (args->ret == VK_ERROR_DEVICE_LOST)
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
}
static void
@@ -478,9 +561,65 @@ vkr_dispatch_vkSignalSemaphore(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkSignalSemaphore *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkSignalSemaphore_args_handle(args);
- args->ret = dev->SignalSemaphore(args->device, args->pSignalInfo);
+ args->ret = vk->SignalSemaphore(args->device, args->pSignalInfo);
+}
+
+static void
+vkr_dispatch_vkWaitSemaphoreResource100000MESA(
+ struct vn_dispatch_context *dispatch,
+ struct vn_command_vkWaitSemaphoreResource100000MESA *args)
+{
+ struct vkr_context *ctx = dispatch->data;
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+ int fd = -1;
+
+ vn_replace_vkWaitSemaphoreResource100000MESA_args_handle(args);
+
+ const VkSemaphoreGetFdInfoKHR info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
+ .semaphore = args->semaphore,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ };
+ VkResult result = vk->GetSemaphoreFdKHR(args->device, &info, &fd);
+ if (result != VK_SUCCESS) {
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+ return;
+ }
+
+ if (fd >= 0)
+ close(fd);
+}
+
+static void
+vkr_dispatch_vkImportSemaphoreResource100000MESA(
+ struct vn_dispatch_context *dispatch,
+ struct vn_command_vkImportSemaphoreResource100000MESA *args)
+{
+ struct vkr_context *ctx = dispatch->data;
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
+ vn_replace_vkImportSemaphoreResource100000MESA_args_handle(args);
+
+ const VkImportSemaphoreResourceInfo100000MESA *res_info =
+ args->pImportSemaphoreResourceInfo;
+
+ /* resourceId 0 is for importing a signaled payload to sync_fd fence */
+ assert(!res_info->resourceId);
+
+ const VkImportSemaphoreFdInfoKHR import_info = {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
+ .semaphore = res_info->semaphore,
+ .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
+ .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ .fd = -1,
+ };
+ if (vk->ImportSemaphoreFdKHR(args->device, &import_info) != VK_SUCCESS)
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
}
static void
@@ -501,24 +640,33 @@ static void
vkr_dispatch_vkGetEventStatus(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetEventStatus *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetEventStatus_args_handle(args);
- args->ret = vkGetEventStatus(args->device, args->event);
+ args->ret = vk->GetEventStatus(args->device, args->event);
}
static void
vkr_dispatch_vkSetEvent(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkSetEvent *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkSetEvent_args_handle(args);
- args->ret = vkSetEvent(args->device, args->event);
+ args->ret = vk->SetEvent(args->device, args->event);
}
static void
vkr_dispatch_vkResetEvent(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkResetEvent *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkResetEvent_args_handle(args);
- args->ret = vkResetEvent(args->device, args->event);
+ args->ret = vk->ResetEvent(args->device, args->event);
}
void
@@ -531,6 +679,9 @@ vkr_context_init_queue_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkQueueSubmit = vkr_dispatch_vkQueueSubmit;
dispatch->dispatch_vkQueueBindSparse = vkr_dispatch_vkQueueBindSparse;
dispatch->dispatch_vkQueueWaitIdle = vkr_dispatch_vkQueueWaitIdle;
+
+ /* VK_KHR_synchronization2 */
+ dispatch->dispatch_vkQueueSubmit2 = vkr_dispatch_vkQueueSubmit2;
}
void
@@ -543,6 +694,9 @@ vkr_context_init_fence_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkResetFences = vkr_dispatch_vkResetFences;
dispatch->dispatch_vkGetFenceStatus = vkr_dispatch_vkGetFenceStatus;
dispatch->dispatch_vkWaitForFences = vkr_dispatch_vkWaitForFences;
+
+ dispatch->dispatch_vkResetFenceResource100000MESA =
+ vkr_dispatch_vkResetFenceResource100000MESA;
}
void
@@ -556,6 +710,11 @@ vkr_context_init_semaphore_dispatch(struct vkr_context *ctx)
vkr_dispatch_vkGetSemaphoreCounterValue;
dispatch->dispatch_vkWaitSemaphores = vkr_dispatch_vkWaitSemaphores;
dispatch->dispatch_vkSignalSemaphore = vkr_dispatch_vkSignalSemaphore;
+
+ dispatch->dispatch_vkWaitSemaphoreResource100000MESA =
+ vkr_dispatch_vkWaitSemaphoreResource100000MESA;
+ dispatch->dispatch_vkImportSemaphoreResource100000MESA =
+ vkr_dispatch_vkImportSemaphoreResource100000MESA;
}
void
diff --git a/src/venus/vkr_queue.h b/src/venus/vkr_queue.h
index b0dce2ca..4ca9d649 100644
--- a/src/venus/vkr_queue.h
+++ b/src/venus/vkr_queue.h
@@ -10,10 +10,11 @@
struct vkr_queue_sync {
VkFence fence;
+ bool device_lost;
uint32_t flags;
- uint64_t queue_id;
- void *fence_cookie;
+ uint32_t ring_idx;
+ uint64_t fence_id;
struct list_head head;
};
@@ -28,6 +29,9 @@ struct vkr_queue {
uint32_t family;
uint32_t index;
+ /* only used when client driver uses multiple timelines */
+ uint32_t ring_idx;
+
/* Submitted fences are added to pending_syncs first. How submitted fences
* are retired depends on VKR_RENDERER_THREAD_SYNC and
* VKR_RENDERER_ASYNC_FENCE_CB.
@@ -86,8 +90,8 @@ vkr_context_init_event_dispatch(struct vkr_context *ctx);
struct vkr_queue_sync *
vkr_device_alloc_queue_sync(struct vkr_device *dev,
uint32_t fence_flags,
- uint64_t queue_id,
- void *fence_cookie);
+ uint32_t ring_idx,
+ uint64_t fence_id);
void
vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync);
diff --git a/src/venus/vkr_render_pass.c b/src/venus/vkr_render_pass.c
index 6f0d1e36..4d8c59ef 100644
--- a/src/venus/vkr_render_pass.c
+++ b/src/venus/vkr_render_pass.c
@@ -20,6 +20,7 @@ vkr_dispatch_vkCreateRenderPass2(struct vn_dispatch_context *dispatch,
{
struct vkr_context *ctx = dispatch->data;
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
struct vkr_render_pass *pass = vkr_context_alloc_object(
ctx, sizeof(*pass), VK_OBJECT_TYPE_RENDER_PASS, args->pRenderPass);
@@ -29,8 +30,8 @@ vkr_dispatch_vkCreateRenderPass2(struct vn_dispatch_context *dispatch,
}
vn_replace_vkCreateRenderPass2_args_handle(args);
- args->ret = dev->CreateRenderPass2(args->device, args->pCreateInfo, NULL,
- &pass->base.handle.render_pass);
+ args->ret = vk->CreateRenderPass2(args->device, args->pCreateInfo, NULL,
+ &pass->base.handle.render_pass);
if (args->ret != VK_SUCCESS) {
free(pass);
return;
@@ -50,8 +51,11 @@ static void
vkr_dispatch_vkGetRenderAreaGranularity(UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetRenderAreaGranularity *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetRenderAreaGranularity_args_handle(args);
- vkGetRenderAreaGranularity(args->device, args->renderPass, args->pGranularity);
+ vk->GetRenderAreaGranularity(args->device, args->renderPass, args->pGranularity);
}
static void
diff --git a/src/venus/vkr_renderer.c b/src/venus/vkr_renderer.c
index 0f19885a..64ab3725 100644
--- a/src/venus/vkr_renderer.c
+++ b/src/venus/vkr_renderer.c
@@ -26,9 +26,28 @@ vkr_get_capset(void *capset)
c->wire_format_version = vn_info_wire_format_version();
c->vk_xml_version = vn_info_vk_xml_version();
c->vk_ext_command_serialization_spec_version =
- vn_info_extension_spec_version("VK_EXT_command_serialization");
+ vkr_extension_get_spec_version("VK_EXT_command_serialization");
c->vk_mesa_venus_protocol_spec_version =
- vn_info_extension_spec_version("VK_MESA_venus_protocol");
+ vkr_extension_get_spec_version("VK_MESA_venus_protocol");
+ /* After https://gitlab.freedesktop.org/virgl/virglrenderer/-/merge_requests/688,
+ * this flag is used to indicate render server config, and will be needed until drm
+ * virtio-gpu blob mem gets fixed to attach_resource before resource_map.
+ */
+ c->supports_blob_id_0 = (bool)(vkr_renderer_flags & VKR_RENDERER_RENDER_SERVER);
+
+ uint32_t ext_mask[VN_INFO_EXTENSION_MAX_NUMBER / 32 + 1] = { 0 };
+ vn_info_extension_mask_init(ext_mask);
+
+ static_assert(sizeof(ext_mask) <= sizeof(c->vk_extension_mask1),
+ "Time to extend venus capset with vk_extension_mask2");
+ memcpy(c->vk_extension_mask1, ext_mask, sizeof(ext_mask));
+
+ /* set bit 0 to enable the extension mask(s) */
+ assert(!(c->vk_extension_mask1[0] & 0x1u));
+ c->vk_extension_mask1[0] |= 0x1u;
+
+ c->allow_vk_wait_syncs = 1;
+ c->supports_multiple_timelines = 1;
}
return sizeof(*c);
@@ -37,8 +56,6 @@ vkr_get_capset(void *capset)
int
vkr_renderer_init(uint32_t flags)
{
- /* TODO VKR_RENDERER_MULTI_PROCESS hint */
-
if ((vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) &&
!(vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC))
return -EINVAL;
diff --git a/src/venus/vkr_renderer.h b/src/venus/vkr_renderer.h
index 8adbb63a..1a3849ae 100644
--- a/src/venus/vkr_renderer.h
+++ b/src/venus/vkr_renderer.h
@@ -11,12 +11,12 @@
#include <stddef.h>
#include <stdint.h>
-#include "os/os_misc.h"
+#include "util/os_misc.h"
#include "virgl_util.h"
#define VKR_RENDERER_THREAD_SYNC (1u << 0)
-#define VKR_RENDERER_MULTI_PROCESS (1u << 1)
-#define VKR_RENDERER_ASYNC_FENCE_CB (1u << 2)
+#define VKR_RENDERER_ASYNC_FENCE_CB (1u << 1)
+#define VKR_RENDERER_RENDER_SERVER (1u << 2)
struct virgl_context;
diff --git a/src/venus/vkr_ring.c b/src/venus/vkr_ring.c
index c6aaeeb2..5fea070d 100644
--- a/src/venus/vkr_ring.c
+++ b/src/venus/vkr_ring.c
@@ -8,40 +8,43 @@
#include <stdio.h>
#include <time.h>
-#include "virgl_context.h"
#include "vrend_iov.h"
+#include "vkr_context.h"
+
enum vkr_ring_status_flag {
VKR_RING_STATUS_IDLE = 1u << 0,
};
/* callers must make sure they do not seek to end-of-resource or beyond */
static const struct iovec *
-seek_resource(const struct virgl_resource *res,
+seek_resource(const struct vkr_resource_attachment *att,
int base_iov_index,
size_t offset,
int *out_iov_index,
size_t *out_iov_offset)
{
- const struct iovec *iov = &res->iov[base_iov_index];
- assert(iov - res->iov < res->iov_count);
+ const struct iovec *iov = &att->iov[base_iov_index];
+ assert(iov - att->iov < att->iov_count);
while (offset >= iov->iov_len) {
offset -= iov->iov_len;
iov++;
- assert(iov - res->iov < res->iov_count);
+ assert(iov - att->iov < att->iov_count);
}
- *out_iov_index = iov - res->iov;
+ *out_iov_index = iov - att->iov;
*out_iov_offset = offset;
return iov;
}
static void *
-get_resource_pointer(const struct virgl_resource *res, int base_iov_index, size_t offset)
+get_resource_pointer(const struct vkr_resource_attachment *att,
+ int base_iov_index,
+ size_t offset)
{
const struct iovec *iov =
- seek_resource(res, base_iov_index, offset, &base_iov_index, &offset);
+ seek_resource(att, base_iov_index, offset, &base_iov_index, &offset);
return (uint8_t *)iov->iov_base + offset;
}
@@ -50,7 +53,7 @@ vkr_ring_init_extra(struct vkr_ring *ring, const struct vkr_ring_layout *layout)
{
struct vkr_ring_extra *extra = &ring->extra;
- seek_resource(layout->resource, 0, layout->extra.begin, &extra->base_iov_index,
+ seek_resource(layout->attachment, 0, layout->extra.begin, &extra->base_iov_index,
&extra->base_iov_offset);
extra->region = vkr_region_make_relative(&layout->extra);
@@ -62,11 +65,11 @@ vkr_ring_init_buffer(struct vkr_ring *ring, const struct vkr_ring_layout *layout
struct vkr_ring_buffer *buf = &ring->buffer;
const struct iovec *base_iov =
- seek_resource(layout->resource, 0, layout->buffer.begin, &buf->base_iov_index,
+ seek_resource(layout->attachment, 0, layout->buffer.begin, &buf->base_iov_index,
&buf->base_iov_offset);
buf->size = vkr_region_size(&layout->buffer);
- assert(buf->size && util_is_power_of_two(buf->size));
+ assert(util_is_power_of_two_nonzero(buf->size));
buf->mask = buf->size - 1;
buf->cur = 0;
@@ -80,9 +83,9 @@ vkr_ring_init_control(struct vkr_ring *ring, const struct vkr_ring_layout *layou
{
struct vkr_ring_control *ctrl = &ring->control;
- ctrl->head = get_resource_pointer(layout->resource, 0, layout->head.begin);
- ctrl->tail = get_resource_pointer(layout->resource, 0, layout->tail.begin);
- ctrl->status = get_resource_pointer(layout->resource, 0, layout->status.begin);
+ ctrl->head = get_resource_pointer(layout->attachment, 0, layout->head.begin);
+ ctrl->tail = get_resource_pointer(layout->attachment, 0, layout->tail.begin);
+ ctrl->status = get_resource_pointer(layout->attachment, 0, layout->status.begin);
/* we will manage head and status, and we expect them to be 0 initially */
if (*ctrl->head || *ctrl->status)
@@ -120,7 +123,7 @@ static void
vkr_ring_read_buffer(struct vkr_ring *ring, void *data, uint32_t size)
{
struct vkr_ring_buffer *buf = &ring->buffer;
- const struct virgl_resource *res = ring->resource;
+ const struct vkr_resource_attachment *att = ring->attachment;
assert(size <= buf->size);
const uint32_t buf_offset = buf->cur & buf->mask;
@@ -156,24 +159,24 @@ vkr_ring_read_buffer(struct vkr_ring *ring, void *data, uint32_t size)
return;
}
} else {
- vrend_read_from_iovec(buf->cur_iov, res->iov_count - buf->cur_iov_index,
+ vrend_read_from_iovec(buf->cur_iov, att->iov_count - buf->cur_iov_index,
buf->cur_iov_offset, data, read_size);
}
if (wrap_size) {
- vrend_read_from_iovec(res->iov + buf->base_iov_index,
- res->iov_count - buf->base_iov_index, buf->base_iov_offset,
+ vrend_read_from_iovec(att->iov + buf->base_iov_index,
+ att->iov_count - buf->base_iov_index, buf->base_iov_offset,
(char *)data + read_size, wrap_size);
}
/* advance cur */
buf->cur += size;
if (!wrap) {
- buf->cur_iov = seek_resource(res, buf->cur_iov_index, buf->cur_iov_offset + size,
+ buf->cur_iov = seek_resource(att, buf->cur_iov_index, buf->cur_iov_offset + size,
&buf->cur_iov_index, &buf->cur_iov_offset);
} else {
buf->cur_iov =
- seek_resource(res, buf->base_iov_index, buf->base_iov_offset + wrap_size,
+ seek_resource(att, buf->base_iov_index, buf->base_iov_offset + wrap_size,
&buf->cur_iov_index, &buf->cur_iov_offset);
}
}
@@ -190,7 +193,7 @@ vkr_ring_create(const struct vkr_ring_layout *layout,
if (!ring)
return NULL;
- ring->resource = layout->resource;
+ ring->attachment = layout->attachment;
if (!vkr_ring_init_control(ring, layout)) {
free(ring);
@@ -229,6 +232,8 @@ vkr_ring_create(const struct vkr_ring_layout *layout,
void
vkr_ring_destroy(struct vkr_ring *ring)
{
+ list_del(&ring->head);
+
assert(!ring->started);
mtx_destroy(&ring->mutex);
cnd_destroy(&ring->cond);
@@ -276,7 +281,7 @@ vkr_ring_thread(void *arg)
char thread_name[16];
snprintf(thread_name, ARRAY_SIZE(thread_name), "vkr-ring-%d", ctx->ctx_id);
- pipe_thread_setname(thread_name);
+ u_thread_setname(thread_name);
uint64_t last_submit = vkr_ring_now();
uint32_t relax_iter = 0;
@@ -346,7 +351,7 @@ bool
vkr_ring_stop(struct vkr_ring *ring)
{
mtx_lock(&ring->mutex);
- if (ring->thread == thrd_current()) {
+ if (thrd_equal(ring->thread, thrd_current())) {
mtx_unlock(&ring->mutex);
return false;
}
@@ -385,7 +390,7 @@ vkr_ring_write_extra(struct vkr_ring *ring, size_t offset, uint32_t val)
/* Mesa always sets offset to 0 and the cache hit rate will be 100% */
extra->cached_offset = offset;
- extra->cached_data = get_resource_pointer(ring->resource, extra->base_iov_index,
+ extra->cached_data = get_resource_pointer(ring->attachment, extra->base_iov_index,
extra->base_iov_offset + offset);
}
diff --git a/src/venus/vkr_ring.h b/src/venus/vkr_ring.h
index 8dc83bb5..56c79738 100644
--- a/src/venus/vkr_ring.h
+++ b/src/venus/vkr_ring.h
@@ -21,7 +21,7 @@
* vkr_ring_create.
*/
struct vkr_ring_layout {
- struct virgl_resource *resource;
+ const struct vkr_resource_attachment *attachment;
struct vkr_region head;
struct vkr_region tail;
@@ -86,7 +86,7 @@ struct vkr_ring {
struct list_head head;
/* ring regions */
- struct virgl_resource *resource;
+ const struct vkr_resource_attachment *attachment;
struct vkr_ring_control control;
struct vkr_ring_buffer buffer;
struct vkr_ring_extra extra;
diff --git a/src/venus/vkr_transport.c b/src/venus/vkr_transport.c
index c0f3e3e9..361c7f31 100644
--- a/src/venus/vkr_transport.c
+++ b/src/venus/vkr_transport.c
@@ -20,15 +20,15 @@ vkr_dispatch_vkSetReplyCommandStreamMESA(
struct vkr_context *ctx = dispatch->data;
struct vkr_resource_attachment *att;
- att = util_hash_table_get(ctx->resource_table,
- uintptr_to_pointer(args->pStream->resourceId));
+ att = vkr_context_get_resource(ctx, args->pStream->resourceId);
if (!att) {
+ vkr_log("failed to set reply stream: invalid res_id %u", args->pStream->resourceId);
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
- vkr_cs_encoder_set_stream(&ctx->encoder, att->resource->iov, att->resource->iov_count,
- args->pStream->offset, args->pStream->size);
+ vkr_cs_encoder_set_stream(&ctx->encoder, att, args->pStream->offset,
+ args->pStream->size);
}
static void
@@ -44,33 +44,37 @@ static void *
copy_command_stream(struct vkr_context *ctx, const VkCommandStreamDescriptionMESA *stream)
{
struct vkr_resource_attachment *att;
- struct virgl_resource *res;
- att = util_hash_table_get(ctx->resource_table, uintptr_to_pointer(stream->resourceId));
- if (!att)
+ att = vkr_context_get_resource(ctx, stream->resourceId);
+ if (!att) {
+ vkr_log("failed to copy command stream: invalid res_id %u", stream->resourceId);
return NULL;
- res = att->resource;
+ }
/* seek to offset */
size_t iov_offset = stream->offset;
const struct iovec *iov = NULL;
- for (int i = 0; i < res->iov_count; i++) {
- if (iov_offset < res->iov[i].iov_len) {
- iov = &res->iov[i];
+ for (int i = 0; i < att->iov_count; i++) {
+ if (iov_offset < att->iov[i].iov_len) {
+ iov = &att->iov[i];
break;
}
- iov_offset -= res->iov[i].iov_len;
+ iov_offset -= att->iov[i].iov_len;
}
- if (!iov)
+ if (!iov) {
+ vkr_log("failed to copy command stream: invalid offset %zu", stream->offset);
return NULL;
+ }
/* XXX until the decoder supports scatter-gather and is robust enough,
* always make a copy in case the caller modifies the commands while we
* parse
*/
uint8_t *data = malloc(stream->size);
- if (!data)
+ if (!data) {
+ vkr_log("failed to copy command stream: malloc(%zu) failed", stream->size);
return NULL;
+ }
uint32_t copied = 0;
while (true) {
@@ -80,7 +84,8 @@ copy_command_stream(struct vkr_context *ctx, const VkCommandStreamDescriptionMES
copied += s;
if (copied == stream->size) {
break;
- } else if (iov == &res->iov[res->iov_count - 1]) {
+ } else if (iov == &att->iov[att->iov_count - 1]) {
+ vkr_log("failed to copy command stream: invalid size %zu", stream->size);
free(data);
return NULL;
}
@@ -99,13 +104,15 @@ vkr_dispatch_vkExecuteCommandStreamsMESA(
{
struct vkr_context *ctx = dispatch->data;
- if (!args->streamCount) {
+ if (unlikely(!args->streamCount)) {
+ vkr_log("failed to execute command streams: no stream specified");
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
/* note that nested vkExecuteCommandStreamsMESA is not allowed */
- if (!vkr_cs_decoder_push_state(&ctx->decoder)) {
+ if (unlikely(!vkr_cs_decoder_push_state(&ctx->decoder))) {
+ vkr_log("failed to execute command streams: nested execution");
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
@@ -154,12 +161,12 @@ lookup_ring(struct vkr_context *ctx, uint64_t ring_id)
static bool
vkr_ring_layout_init(struct vkr_ring_layout *layout,
- struct virgl_resource *res,
+ const struct vkr_resource_attachment *att,
const VkRingCreateInfoMESA *info)
{
/* clang-format off */
*layout = (struct vkr_ring_layout){
- .resource = res,
+ .attachment = att,
.head = VKR_REGION_INIT(info->offset + info->headOffset, sizeof(uint32_t)),
.tail = VKR_REGION_INIT(info->offset + info->tailOffset, sizeof(uint32_t)),
.status = VKR_REGION_INIT(info->offset + info->statusOffset, sizeof(uint32_t)),
@@ -178,7 +185,7 @@ vkr_ring_layout_init(struct vkr_ring_layout *layout,
/* clang-format on */
const struct vkr_region res_size =
- VKR_REGION_INIT(0, vrend_get_iovec_size(res->iov, res->iov_count));
+ VKR_REGION_INIT(0, vrend_get_iovec_size(att->iov, att->iov_count));
if (!vkr_region_is_valid(&res_region) || !vkr_region_is_within(&res_region, &res_size))
return false;
@@ -217,10 +224,9 @@ vkr_ring_layout_init(struct vkr_ring_layout *layout,
}
const size_t buf_size = vkr_region_size(&layout->buffer);
- if (!buf_size || buf_size > VKR_RING_BUFFER_MAX_SIZE ||
- !util_is_power_of_two(buf_size)) {
- vkr_log("ring buffer size (%lu) must be a power of two and not exceed %lu",
- buf_size, VKR_RING_BUFFER_MAX_SIZE);
+ if (buf_size > VKR_RING_BUFFER_MAX_SIZE || !util_is_power_of_two_nonzero(buf_size)) {
+ vkr_log("ring buffer size (%z) must be a power of two and not exceed %lu", buf_size,
+ VKR_RING_BUFFER_MAX_SIZE);
return false;
}
@@ -236,14 +242,14 @@ vkr_dispatch_vkCreateRingMESA(struct vn_dispatch_context *dispatch,
const struct vkr_resource_attachment *att;
struct vkr_ring *ring;
- att = util_hash_table_get(ctx->resource_table, uintptr_to_pointer(info->resourceId));
+ att = vkr_context_get_resource(ctx, info->resourceId);
if (!att) {
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
struct vkr_ring_layout layout;
- if (!vkr_ring_layout_init(&layout, att->resource, info)) {
+ if (!vkr_ring_layout_init(&layout, att, info)) {
vkr_log("vkCreateRingMESA supplied with invalid buffer layout parameters");
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
@@ -272,7 +278,6 @@ vkr_dispatch_vkDestroyRingMESA(struct vn_dispatch_context *dispatch,
return;
}
- list_del(&ring->head);
vkr_ring_destroy(ring);
}
@@ -314,6 +319,7 @@ vkr_dispatch_vkGetVenusExperimentalFeatureData100000MESA(
.memoryResourceAllocationSize = VK_TRUE,
.globalFencing = VK_FALSE,
.largeRing = VK_TRUE,
+ .syncFdFencing = VK_TRUE,
};
vn_replace_vkGetVenusExperimentalFeatureData100000MESA_args_handle(args);
diff --git a/src/venus_hw.h b/src/venus_hw.h
index 076e616a..18bd1964 100644
--- a/src/venus_hw.h
+++ b/src/venus_hw.h
@@ -32,6 +32,38 @@ struct virgl_renderer_capset_venus {
uint32_t vk_xml_version;
uint32_t vk_ext_command_serialization_spec_version;
uint32_t vk_mesa_venus_protocol_spec_version;
+
+ /* This flag indicates render server config, and will be needed until drm
+ * virtio-gpu blob mem gets fixed to attach_resource before resource_map.
+ */
+ uint32_t supports_blob_id_0;
+
+ /* Extension number N, where N is defined by the Vulkan spec, corresponds
+ * to bit [N / 32] & (1 << N % 32). The below mask1 covers the first 1023
+ * Vulkan extensions (numbered from 1 to 1023).
+ *
+ * Bit (mask1[0] & 0x1) is used for backward compatibility purpose. When
+ * that bit is set, the extension mask(s) are valid. Otherwise, all the
+ * extensions are assumed to be supported by the renderer side protocol.
+ */
+ uint32_t vk_extension_mask1[32];
+
+ /* The single-threaded renderer cannot afford potential blocking calls. It
+ * also leads to GPU lost if the wait depends on a following command. This
+ * capset allows such blocking calls to passthrough from the clients, and
+ * shifts the responsibilities to the client drivers.
+ */
+ uint32_t allow_vk_wait_syncs;
+
+ /* This flag indicates that the renderer supports multiple fencing
+ * timelines. The client driver is expected to associate each VkQueue with
+ * one of these timelines at queue creation by binding it with an unused
+ * ring_idx. Queues created without a ring_idx binding are associated to a
+ * shared legacy timeline. The special ring_idx==0 is reserved for CPU
+ * fences that are signaled by the renderer immediately upon consumption of
+ * the associated renderer submission.
+ */
+ uint32_t supports_multiple_timelines;
};
#endif
diff --git a/src/virgl_context.c b/src/virgl_context.c
index 6df23091..b74aad92 100644
--- a/src/virgl_context.c
+++ b/src/virgl_context.c
@@ -26,7 +26,7 @@
#include <errno.h>
-#include "os/os_misc.h"
+#include "util/os_misc.h"
#include "util/u_hash_table.h"
#include "util/u_pointer.h"
#include "virgl_util.h"
@@ -44,7 +44,7 @@ int
virgl_context_table_init(void)
{
virgl_context_table = util_hash_table_create(hash_func_u32,
- compare_func,
+ equal_func,
virgl_context_destroy_func);
return virgl_context_table ? 0 : ENOMEM;
}
@@ -53,6 +53,7 @@ void
virgl_context_table_cleanup(void)
{
util_hash_table_destroy(virgl_context_table);
+ virgl_context_table = NULL;
}
void
diff --git a/src/virgl_context.h b/src/virgl_context.h
index ea86b31e..046948d8 100644
--- a/src/virgl_context.h
+++ b/src/virgl_context.h
@@ -40,19 +40,20 @@ struct virgl_context_blob {
enum virgl_resource_fd_type type;
union {
int fd;
+ uint32_t opaque_handle;
struct pipe_resource *pipe_resource;
} u;
uint32_t map_info;
- void *renderer_data;
+ struct virgl_resource_opaque_fd_metadata opaque_fd_metadata;
};
struct virgl_context;
typedef void (*virgl_context_fence_retire)(struct virgl_context *ctx,
- uint64_t queue_id,
- void *fence_cookie);
+ uint32_t ring_idx,
+ uint64_t fence_id);
/**
* Base class for renderer contexts. For example, vrend_decode_ctx is a
@@ -79,6 +80,9 @@ struct virgl_context {
struct virgl_resource *res);
void (*detach_resource)(struct virgl_context *ctx,
struct virgl_resource *res);
+ enum virgl_resource_fd_type (*export_opaque_handle)(struct virgl_context *ctx,
+ struct virgl_resource *res,
+ int *out_fd);
int (*transfer_3d)(struct virgl_context *ctx,
struct virgl_resource *res,
@@ -88,19 +92,17 @@ struct virgl_context {
/* These are used to create a virgl_resource from a context object.
*
* get_blob returns a virgl_context_blob from which a virgl_resource can be
- * created. get_blob_done is optional and allows the context to associate
- * the newly created resource with the context object.
+ * created.
*
* Note that get_blob is a one-time thing. The context object might be
* destroyed or reject subsequent get_blob calls.
*/
int (*get_blob)(struct virgl_context *ctx,
+ uint32_t res_id,
uint64_t blob_id,
+ uint64_t blob_size,
uint32_t blob_flags,
struct virgl_context_blob *blob);
- void (*get_blob_done)(struct virgl_context *ctx,
- uint32_t res_id,
- struct virgl_context_blob *blob);
int (*submit_cmd)(struct virgl_context *ctx,
const void *buffer,
@@ -118,8 +120,8 @@ struct virgl_context {
/* submit a fence to the queue identified by queue_id */
int (*submit_fence)(struct virgl_context *ctx,
uint32_t flags,
- uint64_t queue_id,
- void *fence_cookie);
+ uint32_t ring_idx,
+ uint64_t fence_id);
};
struct virgl_context_foreach_args {
diff --git a/src/virgl_hw.h b/src/virgl_hw.h
index 81cef9c5..dfbcf816 100644
--- a/src/virgl_hw.h
+++ b/src/virgl_hw.h
@@ -343,6 +343,12 @@ enum virgl_formats {
VIRGL_FORMAT_A8L8_SNORM = 260,
VIRGL_FORMAT_A8L8_SRGB = 261,
+ VIRGL_FORMAT_A1B5G5R5_UNORM = 262,
+ VIRGL_FORMAT_A1R5G5B5_UNORM = 263,
+ VIRGL_FORMAT_A2B10G10R10_UNORM = 264,
+ VIRGL_FORMAT_A2R10G10B10_UNORM = 265,
+ VIRGL_FORMAT_A4R4G4B4_UNORM = 266,
+
VIRGL_FORMAT_X8B8G8R8_SNORM = 268,
@@ -391,6 +397,18 @@ enum virgl_formats {
VIRGL_FORMAT_A4B4G4R4_UNORM = 311,
VIRGL_FORMAT_R8_SRGB = 312,
+ VIRGL_FORMAT_R8G8_SRGB = 313,
+
+ VIRGL_FORMAT_P010 = 314,
+ VIRGL_FORMAT_P012 = 315,
+ VIRGL_FORMAT_P016 = 316,
+
+ VIRGL_FORMAT_B8G8R8_UNORM = 317,
+ VIRGL_FORMAT_R3G3B2_UNORM = 318,
+ VIRGL_FORMAT_R4G4B4A4_UNORM = 319,
+ VIRGL_FORMAT_R5G5B5A1_UNORM = 320,
+ VIRGL_FORMAT_R5G6B5_UNORM = 321,
+
VIRGL_FORMAT_MAX /* = PIPE_FORMAT_COUNT */,
/* Below formats must not be used in the guest. */
@@ -420,7 +438,7 @@ enum virgl_formats {
#define VIRGL_CAP_QBO (1 << 16)
#define VIRGL_CAP_TRANSFER (1 << 17)
#define VIRGL_CAP_FBO_MIXED_COLOR_FORMATS (1 << 18)
-#define VIRGL_CAP_FAKE_FP64 (1 << 19)
+#define VIRGL_CAP_HOST_IS_GLES (1 << 19)
#define VIRGL_CAP_BIND_COMMAND_ARGS (1 << 20)
#define VIRGL_CAP_MULTI_DRAW_INDIRECT (1 << 21)
#define VIRGL_CAP_INDIRECT_PARAMS (1 << 22)
@@ -434,6 +452,9 @@ enum virgl_formats {
#define VIRGL_CAP_CLEAR_TEXTURE (1 << 30)
#define VIRGL_CAP_ARB_BUFFER_STORAGE (1 << 31)
+// Legacy alias
+#define VIRGL_CAP_FAKE_FP64 VIRGL_CAP_HOST_IS_GLES
+
/* These are used by the capability_bits_v2 field in virgl_caps_v2. */
#define VIRGL_CAP_V2_BLEND_EQUATION (1 << 0)
#define VIRGL_CAP_V2_UNTYPED_RESOURCE (1 << 1)
@@ -442,6 +463,10 @@ enum virgl_formats {
#define VIRGL_CAP_V2_STRING_MARKER (1 << 4)
#define VIRGL_CAP_V2_DIFFERENT_GPU (1 << 5)
#define VIRGL_CAP_V2_IMPLICIT_MSAA (1 << 6)
+#define VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS (1 << 7)
+#define VIRGL_CAP_V2_SCANOUT_USES_GBM (1 << 8)
+#define VIRGL_CAP_V2_SSO (1 << 9)
+#define VIRGL_CAP_V2_TEXTURE_SHADOW_LOD (1 << 10)
/* virgl bind flags - these are compatible with mesa 10.5 gallium.
* but are fixed, no other should be passed to virgl either.
@@ -466,7 +491,7 @@ enum virgl_formats {
#define VIRGL_BIND_STAGING (1 << 19)
#define VIRGL_BIND_SHARED (1 << 20)
-/* bit (1<<21) reserved for non-functional VIRGL_BIND_PREFER_EMULATED_BGRA */
+#define VIRGL_BIND_PREFER_EMULATED_BGRA (1 << 21) /* non-functional */
#define VIRGL_BIND_LINEAR (1 << 22)
@@ -543,6 +568,26 @@ struct virgl_caps_v1 {
uint32_t max_texture_gather_components;
};
+struct virgl_video_caps {
+ uint32_t profile:8;
+ uint32_t entrypoint:8;
+ uint32_t max_level:8;
+ uint32_t stacked_frames:8;
+
+ uint32_t max_width:16;
+ uint32_t max_height:16;
+
+ uint32_t prefered_format:16;
+ uint32_t max_macroblocks:16;
+
+ uint32_t npot_texture:1;
+ uint32_t supports_progressive:1;
+ uint32_t supports_interlaced:1;
+ uint32_t prefers_interlaced:1;
+ uint32_t max_temporal_layers:8;
+ uint32_t reserved:20;
+};
+
/*
* This struct should be growable when used in capset 2,
* so we shouldn't have to add a v3 ever.
@@ -597,6 +642,12 @@ struct virgl_caps_v2 {
uint32_t max_video_memory;
char renderer[64];
float max_anisotropy;
+ uint32_t max_texture_image_units;
+ struct virgl_supported_format_mask supported_multisample_formats;
+ uint32_t max_const_buffer_size[6]; // PIPE_SHADER_TYPES
+ uint32_t num_video_caps;
+ struct virgl_video_caps video_caps[32];
+ uint32_t max_uniform_block_size;
};
union virgl_caps {
@@ -625,7 +676,9 @@ enum virgl_ctx_errors {
VIRGL_ERROR_CTX_ILLEGAL_FORMAT,
VIRGL_ERROR_CTX_ILLEGAL_SAMPLER_VIEW_TARGET,
VIRGL_ERROR_CTX_TRANSFER_IOV_BOUNDS,
- VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND
+ VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND,
+ VIRGL_ERROR_CTX_UNSUPPORTED_FUNCTION,
+ VIRGL_ERROR_CTX_ILLEGAL_PROGRAM_PIPELINE,
};
/**
diff --git a/src/virgl_protocol.h b/src/virgl_protocol.h
index c1797d98..98aa431e 100644
--- a/src/virgl_protocol.h
+++ b/src/virgl_protocol.h
@@ -116,6 +116,19 @@ enum virgl_context_cmd {
VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE,
VIRGL_CCMD_GET_MEMORY_INFO,
VIRGL_CCMD_SEND_STRING_MARKER,
+ VIRGL_CCMD_LINK_SHADER,
+
+ /* video codec */
+ VIRGL_CCMD_CREATE_VIDEO_CODEC,
+ VIRGL_CCMD_DESTROY_VIDEO_CODEC,
+ VIRGL_CCMD_CREATE_VIDEO_BUFFER,
+ VIRGL_CCMD_DESTROY_VIDEO_BUFFER,
+ VIRGL_CCMD_BEGIN_FRAME,
+ VIRGL_CCMD_DECODE_MACROBLOCK,
+ VIRGL_CCMD_DECODE_BITSTREAM,
+ VIRGL_CCMD_ENCODE_BITSTREAM,
+ VIRGL_CCMD_END_FRAME,
+
VIRGL_MAX_COMMANDS
};
@@ -370,7 +383,7 @@ enum virgl_context_cmd {
#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(x) (((x) & 0x1) << 15)
#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(x) (((x) & 0x7) << 16)
#define VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(x) (((x) & 0x1) << 19)
-#define VIRGL_OBJ_SAMPLE_STATE_MAX_ANISOTROPY (((x & 0x3f)) << 20)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MAX_ANISOTROPY(x) (((x & 0x3f)) << 20)
#define VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS 3
#define VIRGL_OBJ_SAMPLER_STATE_MIN_LOD 4
@@ -607,12 +620,16 @@ enum virgl_context_cmd {
#define VIRGL_TRANSFER3D_DATA_OFFSET 12
#define VIRGL_TRANSFER3D_DIRECTION 13
-/* Copy transfer */
+/* Copy transfer to host and from host*/
#define VIRGL_COPY_TRANSFER3D_SIZE 14
/* The first 11 dwords are the same as VIRGL_RESOURCE_IW_* */
#define VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE 12
#define VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET 13
-#define VIRGL_COPY_TRANSFER3D_SYNCHRONIZED 14
+#define VIRGL_COPY_TRANSFER3D_FLAGS 14
+#define VIRGL_COPY_TRANSFER3D_FLAGS_SYNCHRONIZED (1 << 0)
+/* 1 << 1 means transfer from host.
+ 0 << 1 means transfer to host.*/
+#define VIRGL_COPY_TRANSFER3D_FLAGS_READ_FROM_HOST (1 << 1)
/* set tweak flags */
#define VIRGL_SET_TWEAKS_SIZE 2
@@ -673,4 +690,68 @@ enum vrend_tweak_type {
#define VIRGL_SEND_STRING_MARKER_STRING_SIZE 1
#define VIRGL_SEND_STRING_MARKER_OFFSET 2
+/* link shader program */
+#define VIRGL_LINK_SHADER_SIZE 6
+#define VIRGL_LINK_SHADER_VERTEX_HANDLE 1
+#define VIRGL_LINK_SHADER_FRAGMENT_HANDLE 2
+#define VIRGL_LINK_SHADER_GEOMETRY_HANDLE 3
+#define VIRGL_LINK_SHADER_TESS_CTRL_HANDLE 4
+#define VIRGL_LINK_SHADER_TESS_EVAL_HANDLE 5
+#define VIRGL_LINK_SHADER_COMPUTE_HANDLE 6
+
+/* VIRGL_CCMD_CREATE_VIDEO_CODEC */
+#define VIRGL_CREATE_VIDEO_CODEC_MIN_SIZE 7
+#define VIRGL_CREATE_VIDEO_CODEC_HANDLE 1
+#define VIRGL_CREATE_VIDEO_CODEC_PROFILE 2
+#define VIRGL_CREATE_VIDEO_CODEC_ENTRYPOINT 3
+#define VIRGL_CREATE_VIDEO_CODEC_CHROMA_FMT 4
+#define VIRGL_CREATE_VIDEO_CODEC_LEVEL 5
+#define VIRGL_CREATE_VIDEO_CODEC_WIDTH 6
+#define VIRGL_CREATE_VIDEO_CODEC_HEIGHT 7
+#define VIRGL_CREATE_VIDEO_CODEC_MAX_REF 8
+
+/* VIRGL_CCMD_DESTROY_VIDEO_CODEC */
+#define VIRGL_DESTROY_VIDEO_CODEC_MIN_SIZE 1
+#define VIRGL_DESTROY_VIDEO_CODEC_HANDLE 1
+
+/* VIRGL_CCMD_CREATE_VIDEO_BUFFER */
+#define VIRGL_CREATE_VIDEO_BUFFER_MIN_SIZE 5
+#define VIRGL_CREATE_VIDEO_BUFFER_HANDLE 1
+#define VIRGL_CREATE_VIDEO_BUFFER_FORMAT 2
+#define VIRGL_CREATE_VIDEO_BUFFER_WIDTH 3
+#define VIRGL_CREATE_VIDEO_BUFFER_HEIGHT 4
+#define VIRGL_CREATE_VIDEO_BUFFER_RES_BASE 5
+
+/* VIRGL_CCMD_DESTROY_VIDEO_BUFFER */
+#define VIRGL_DESTROY_VIDEO_BUFFER_MIN_SIZE 1
+#define VIRGL_DESTROY_VIDEO_BUFFER_HANDLE 1
+
+/* VIRGL_CCMD_BEGIN_FRAME */
+#define VIRGL_BEGIN_FRAME_MIN_SIZE 2
+#define VIRGL_BEGIN_FRAME_CDC_HANDLE 1
+#define VIRGL_BEGIN_FRAME_TGT_HANDLE 2
+
+/* VIRGL_CCMD_DECODE_MACROBLOCK */
+
+/* VIRGL_CCMD_DECODE_BITSTREAM */
+#define VIRGL_DECODE_BS_MIN_SIZE 5
+#define VIRGL_DECODE_BS_CDC_HANDLE 1
+#define VIRGL_DECODE_BS_TGT_HANDLE 2
+#define VIRGL_DECODE_BS_DSC_HANDLE 3
+#define VIRGL_DECODE_BS_BUF_HANDLE 4
+#define VIRGL_DECODE_BS_BUF_SIZE 5
+
+/* VIRGL_CCMD_ENCODE_BITSTREAM */
+#define VIRGL_ENCODE_BS_MIN_SIZE 5
+#define VIRGL_ENCODE_BS_CDC_HANDLE 1
+#define VIRGL_ENCODE_BS_SRC_HANDLE 2
+#define VIRGL_ENCODE_BS_DEST_HANDLE 3
+#define VIRGL_ENCODE_BS_DESC_HANDLE 4
+#define VIRGL_ENCODE_BS_FEED_HANDLE 5
+
+/* VIRGL_CCMD_END_FRAME */
+#define VIRGL_END_FRAME_MIN_SIZE 2
+#define VIRGL_END_FRAME_CDC_HANDLE 1
+#define VIRGL_END_FRAME_TGT_HANDLE 2
+
#endif
diff --git a/src/virgl_resource.c b/src/virgl_resource.c
index 254a6afe..7f2c3e6a 100644
--- a/src/virgl_resource.c
+++ b/src/virgl_resource.c
@@ -30,9 +30,11 @@
#include <string.h>
#include <unistd.h>
+#include "util/os_file.h"
#include "util/u_hash_table.h"
#include "util/u_pointer.h"
#include "virgl_util.h"
+#include "virgl_context.h"
static struct util_hash_table *virgl_resource_table;
static struct virgl_resource_pipe_callbacks pipe_callbacks;
@@ -44,7 +46,8 @@ virgl_resource_destroy_func(void *val)
if (res->pipe_resource)
pipe_callbacks.unref(res->pipe_resource, pipe_callbacks.data);
- if (res->fd_type != VIRGL_RESOURCE_FD_INVALID)
+ if ((res->fd_type != VIRGL_RESOURCE_FD_INVALID) &&
+ (res->fd_type != VIRGL_RESOURCE_OPAQUE_HANDLE))
close(res->fd);
free(res);
@@ -54,7 +57,7 @@ int
virgl_resource_table_init(const struct virgl_resource_pipe_callbacks *callbacks)
{
virgl_resource_table = util_hash_table_create(hash_func_u32,
- compare_func,
+ equal_func,
virgl_resource_destroy_func);
if (!virgl_resource_table)
return ENOMEM;
@@ -69,6 +72,7 @@ void
virgl_resource_table_cleanup(void)
{
util_hash_table_destroy(virgl_resource_table);
+ virgl_resource_table = NULL;
memset(&pipe_callbacks, 0, sizeof(pipe_callbacks));
}
@@ -129,7 +133,8 @@ virgl_resource_create_from_fd(uint32_t res_id,
enum virgl_resource_fd_type fd_type,
int fd,
const struct iovec *iov,
- int iov_count)
+ int iov_count,
+ const struct virgl_resource_opaque_fd_metadata *opaque_fd_metadata)
{
struct virgl_resource *res;
@@ -146,6 +151,34 @@ virgl_resource_create_from_fd(uint32_t res_id,
res->iov = iov;
res->iov_count = iov_count;
+ if (opaque_fd_metadata && fd_type == VIRGL_RESOURCE_FD_OPAQUE)
+ res->opaque_fd_metadata = *opaque_fd_metadata;
+
+ return res;
+}
+
+struct virgl_resource *
+virgl_resource_create_from_opaque_handle(struct virgl_context *ctx,
+ uint32_t res_id,
+ uint32_t opaque_handle)
+{
+ struct virgl_resource *res;
+
+ res = virgl_resource_create(res_id);
+ if (!res)
+ return NULL;
+
+ res->fd_type = VIRGL_RESOURCE_OPAQUE_HANDLE;
+ res->opaque_handle = opaque_handle;
+
+ /* We need the ctx to get an fd from handle (which we don't want to do
+ * until asked, to avoid file descriptor limits)
+ *
+ * Shareable resources should not use OPAQUE_HANDLE, to avoid lifetime
+ * issues (ie. resource outliving the context which created it).
+ */
+ res->opaque_handle_context_id = ctx->ctx_id;
+
return res;
}
@@ -218,14 +251,16 @@ virgl_resource_detach_iov(struct virgl_resource *res)
enum virgl_resource_fd_type
virgl_resource_export_fd(struct virgl_resource *res, int *fd)
{
- if (res->fd_type != VIRGL_RESOURCE_FD_INVALID) {
-#ifdef F_DUPFD_CLOEXEC
- *fd = fcntl(res->fd, F_DUPFD_CLOEXEC, 0);
- if (*fd < 0)
- *fd = dup(res->fd);
-#else
- *fd = dup(res->fd);
-#endif
+ if (res->fd_type == VIRGL_RESOURCE_OPAQUE_HANDLE) {
+ struct virgl_context *ctx;
+
+ ctx = virgl_context_lookup(res->opaque_handle_context_id);
+ if (!ctx)
+ return VIRGL_RESOURCE_FD_INVALID;
+
+ return ctx->export_opaque_handle(ctx, res, fd);
+ } else if (res->fd_type != VIRGL_RESOURCE_FD_INVALID) {
+ *fd = os_dupfd_cloexec(res->fd);
return *fd >= 0 ? res->fd_type : VIRGL_RESOURCE_FD_INVALID;
} else if (res->pipe_resource) {
return pipe_callbacks.export_fd(res->pipe_resource,
@@ -235,3 +270,15 @@ virgl_resource_export_fd(struct virgl_resource *res, int *fd)
return VIRGL_RESOURCE_FD_INVALID;
}
+
+uint64_t
+virgl_resource_get_size(struct virgl_resource *res)
+{
+ if (res->map_size)
+ return res->map_size;
+
+ if (res->pipe_resource)
+ return pipe_callbacks.get_size(res->pipe_resource, pipe_callbacks.data);
+
+ return 0;
+}
diff --git a/src/virgl_resource.h b/src/virgl_resource.h
index 73779940..f610bec9 100644
--- a/src/virgl_resource.h
+++ b/src/virgl_resource.h
@@ -29,14 +29,36 @@
struct iovec;
struct pipe_resource;
+struct virgl_context;
enum virgl_resource_fd_type {
VIRGL_RESOURCE_FD_DMABUF,
VIRGL_RESOURCE_FD_OPAQUE,
+ /* mmap()-able, usually memfd or shm */
+ VIRGL_RESOURCE_FD_SHM,
+
+ /**
+ * An opaque handle can be something like a GEM handle, from which a
+ * fd can be created upon demand.
+ *
+ * Renderers which use this type must implement virgl_context::export_fd
+ *
+ * Do not use this type for resources that are _BLOB_FLAG_USE_SHAREABLE,
+ * as the opaque handle can become invalid/stale any time outside of the
+ * original context.
+ */
+ VIRGL_RESOURCE_OPAQUE_HANDLE,
VIRGL_RESOURCE_FD_INVALID = -1,
};
+struct virgl_resource_opaque_fd_metadata {
+ uint8_t driver_uuid[16];
+ uint8_t device_uuid[16];
+ uint64_t allocation_size;
+ uint32_t memory_type_index;
+};
+
/**
* A global cross-context resource. A virgl_resource is not directly usable
* by renderer contexts, but must be attached and imported into renderer
@@ -60,9 +82,17 @@ struct virgl_resource {
struct pipe_resource *pipe_resource;
+ /* valid fd or handle type: */
enum virgl_resource_fd_type fd_type;
int fd;
+ /**
+ * For fd_type==VIRGL_RESOURCE_OPAQUE_HANDLE, the id of the context
+ * which created this resource
+ */
+ uint32_t opaque_handle_context_id;
+ uint32_t opaque_handle;
+
const struct iovec *iov;
int iov_count;
@@ -71,6 +101,8 @@ struct virgl_resource {
uint64_t map_size;
void *mapped;
+ struct virgl_resource_opaque_fd_metadata opaque_fd_metadata;
+
void *private_data;
};
@@ -88,6 +120,8 @@ struct virgl_resource_pipe_callbacks {
enum virgl_resource_fd_type (*export_fd)(struct pipe_resource *pres,
int *fd,
void *data);
+
+ uint64_t (*get_size)(struct pipe_resource *pres, void *data);
};
int
@@ -110,7 +144,13 @@ virgl_resource_create_from_fd(uint32_t res_id,
enum virgl_resource_fd_type fd_type,
int fd,
const struct iovec *iov,
- int iov_count);
+ int iov_count,
+ const struct virgl_resource_opaque_fd_metadata *opaque_fd_metadata);
+
+struct virgl_resource *
+virgl_resource_create_from_opaque_handle(struct virgl_context *ctx,
+ uint32_t res_id,
+ uint32_t opaque_handle);
struct virgl_resource *
virgl_resource_create_from_iov(uint32_t res_id,
@@ -134,4 +174,7 @@ virgl_resource_detach_iov(struct virgl_resource *res);
enum virgl_resource_fd_type
virgl_resource_export_fd(struct virgl_resource *res, int *fd);
+uint64_t
+virgl_resource_get_size(struct virgl_resource *res);
+
#endif /* VIRGL_RESOURCE_H */
diff --git a/src/virgl_util.c b/src/virgl_util.c
index 99ff88e6..6ef1802e 100644
--- a/src/virgl_util.c
+++ b/src/virgl_util.c
@@ -34,7 +34,7 @@
#endif
#include <unistd.h>
-#include "os/os_misc.h"
+#include "util/os_misc.h"
#include "util/u_pointer.h"
#include <assert.h>
@@ -53,20 +53,15 @@
#include <stdio.h>
#endif
-unsigned hash_func_u32(void *key)
+uint32_t hash_func_u32(const void *key)
{
intptr_t ip = pointer_to_intptr(key);
- return (unsigned)(ip & 0xffffffff);
+ return (uint32_t)(ip & 0xffffffff);
}
-int compare_func(void *key1, void *key2)
+bool equal_func(const void *key1, const void *key2)
{
- if (key1 < key2)
- return -1;
- if (key1 > key2)
- return 1;
- else
- return 0;
+ return key1 == key2;
}
bool has_eventfd(void)
@@ -187,9 +182,14 @@ void trace_init(void)
#endif
#if ENABLE_TRACING == TRACE_WITH_PERFETTO
+static void on_tracing_state_change(bool enabled) {
+ virgl_log("%s: tracing state change: %d\n", __func__, enabled);
+}
+
void trace_init(void)
{
struct vperfetto_min_config config = {
+ .on_tracing_state_change = on_tracing_state_change,
.init_flags = VPERFETTO_INIT_FLAG_USE_SYSTEM_BACKEND,
.filename = NULL,
.shmem_size_hint_kb = 32 * 1024,
diff --git a/src/virgl_util.h b/src/virgl_util.h
index 4559f351..d92ed29c 100644
--- a/src/virgl_util.h
+++ b/src/virgl_util.h
@@ -33,6 +33,8 @@
#include "config.h"
#endif
+#include "util/macros.h"
+
#include "virglrenderer.h"
#define TRACE_WITH_PERFETTO 1
@@ -56,9 +58,9 @@ static inline bool is_only_bit(uint32_t mask, uint32_t bit)
return (mask == bit);
}
-unsigned hash_func_u32(void *key);
+uint32_t hash_func_u32(const void *key);
-int compare_func(void *key1, void *key2);
+bool equal_func(const void *key1, const void *key2);
bool has_eventfd(void);
int create_eventfd(unsigned int initval);
@@ -68,7 +70,7 @@ void flush_eventfd(int fd);
virgl_debug_callback_type virgl_log_set_logger(virgl_debug_callback_type logger);
void virgl_logv(const char *fmt, va_list va);
-static inline void virgl_log(const char *fmt, ...)
+static inline void PRINTFLIKE(1, 2) virgl_log(const char *fmt, ...)
{
va_list va;
va_start(va, fmt);
@@ -120,7 +122,7 @@ void trace_end(const char **scope);
#define TRACE_FUNC()
#define TRACE_SCOPE(SCOPE)
#define TRACE_SCOPE_SLOW(SCOPE)
-#define TRACE_SCOPE_BEGIN(SCOPE, VAR)
+#define TRACE_SCOPE_BEGIN(SCOPE)
#define TRACE_SCOPE_END(VAR)
#endif /* ENABLE_TRACING */
diff --git a/src/virgl_video.c b/src/virgl_video.c
new file mode 100644
index 00000000..025ce00c
--- /dev/null
+++ b/src/virgl_video.c
@@ -0,0 +1,2347 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Kylin Software Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Implementation of general video codec interface.
+ *
+ * This implementation is currently based on VA-API, and other interfaces,
+ * such as VDPAU and proprietary interfaces, can also be considered in the
+ * future.
+ *
+ * Two objects are implemented here:
+ * virgl_video_buffer:
+ * Buffer for storing raw YUV formatted data. Currently, it is a wrapper
+ * for VASurface.
+ * virgl_video_codec:
+ * Represents a video encoder or decoder. It's a wrapper of VAContext and
+ * mainly provides the following methods:
+ * - virgl_video_begin_frame()
+ * It calls vaBeginPicture() to prepare for encoding and decoding. For
+ * encoding, it also needs to upload the raw picture data from the guest
+ * side into the local VASurface.
+ * - virgl_video_decode_bitstream()
+ * It constructs the decoding-related VABuffers according to the picture
+ * description information, and then calls vaRenderPicture() for decoding.
+ * - virgl_video_encode_bitstream()
+ * It constructs the encoding-related VABuffers according to the picture
+ * description information, and then calls vaRenderPicture() for encoding.
+ * - virgl_video_end_frame()
+ * It calls vaEndPicture() to end encoding and decoding. After decoding,
+ * it transmits the raw picture data from VASurface to the guest side,
+ * and after encoding, it transmits the result and the coded data in
+ * VACodedBuffer to the guest side.
+ *
+ * @author Feng Jiang <jiangfeng@kylinos.cn>
+ */
+
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <epoxy/gl.h>
+#include <epoxy/egl.h>
+#include <va/va.h>
+#include <va/va_drm.h>
+#include <va/va_drmcommon.h>
+#include <drm_fourcc.h>
+
+#include "pipe/p_video_state.h"
+#include "util/u_memory.h"
+#include "virgl_hw.h"
+#include "virgl_video_hw.h"
+#include "virgl_util.h"
+#include "virgl_video.h"
+
+/*
+ * The max size of codec buffer is approximately:
+ * num_of_macroblocks * max_size_of_per_macroblock + size_of_some_headers
+ * Now, we only support YUV420 formats, this means that we have a limit of
+ * 3200 bits(400 Bytes) per macroblock. To simplify the calculation, we
+ * directly use 512 instead of 400.
+ */
+#define CODED_BUF_DEFAULT_SIZE(width, height) \
+ ((width) * (height) / (16 * 16) * 512)
+
+struct virgl_video_buffer {
+ enum pipe_format format;
+ uint32_t width;
+ uint32_t height;
+ bool interlanced;
+ VASurfaceID va_sfc;
+ struct virgl_video_dma_buf *dmabuf;
+ void *opaque; /* User opaque data */
+};
+
+
+struct virgl_video_codec {
+ enum pipe_video_profile profile;
+ uint32_t level;
+ enum pipe_video_entrypoint entrypoint;
+ enum pipe_video_chroma_format chroma_format;
+ uint32_t width;
+ uint32_t height;
+ uint32_t max_references;
+ VAContextID va_ctx;
+ VAConfigID va_cfg;
+ struct virgl_video_buffer *buffer;
+ struct virgl_video_buffer *ref_pic_list[32]; /* Enc: reference pictures */
+ VABufferID va_coded_buf; /* Enc: VACodedBuffer */
+ void *opaque; /* User opaque data */
+};
+
+
+static VADisplay va_dpy;
+
+static struct virgl_video_callbacks *callbacks = NULL;
+
+static enum pipe_video_profile pipe_profile_from_va(VAProfile profile)
+{
+ switch (profile) {
+ case VAProfileMPEG2Simple:
+ return PIPE_VIDEO_PROFILE_MPEG2_SIMPLE;
+ case VAProfileMPEG2Main:
+ return PIPE_VIDEO_PROFILE_MPEG2_MAIN;
+ case VAProfileMPEG4Simple:
+ return PIPE_VIDEO_PROFILE_MPEG4_SIMPLE;
+ case VAProfileMPEG4AdvancedSimple:
+ return PIPE_VIDEO_PROFILE_MPEG4_ADVANCED_SIMPLE;
+ case VAProfileVC1Simple:
+ return PIPE_VIDEO_PROFILE_VC1_SIMPLE;
+ case VAProfileVC1Main:
+ return PIPE_VIDEO_PROFILE_VC1_MAIN;
+ case VAProfileVC1Advanced:
+ return PIPE_VIDEO_PROFILE_VC1_ADVANCED;
+ case VAProfileH264ConstrainedBaseline:
+ return PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE;
+ case VAProfileH264Main:
+ return PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN;
+ case VAProfileH264High:
+ return PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH;
+ case VAProfileHEVCMain:
+ return PIPE_VIDEO_PROFILE_HEVC_MAIN;
+ case VAProfileHEVCMain10:
+ return PIPE_VIDEO_PROFILE_HEVC_MAIN_10;
+ case VAProfileJPEGBaseline:
+ return PIPE_VIDEO_PROFILE_JPEG_BASELINE;
+ case VAProfileVP9Profile0:
+ return PIPE_VIDEO_PROFILE_VP9_PROFILE0;
+ case VAProfileVP9Profile2:
+ return PIPE_VIDEO_PROFILE_VP9_PROFILE2;
+ case VAProfileAV1Profile0:
+ return PIPE_VIDEO_PROFILE_AV1_MAIN;
+ case VAProfileNone:
+ return PIPE_VIDEO_PROFILE_UNKNOWN;
+ default:
+ return PIPE_VIDEO_PROFILE_UNKNOWN;
+ }
+}
+
+/* NOTE: mesa va frontend only supports VLD and EncSlice */
+static enum pipe_video_entrypoint pipe_entrypoint_from_va(
+ VAEntrypoint entrypoint)
+{
+ switch (entrypoint) {
+ case VAEntrypointVLD:
+ return PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
+ case VAEntrypointIDCT:
+ return PIPE_VIDEO_ENTRYPOINT_IDCT;
+ case VAEntrypointMoComp:
+ return PIPE_VIDEO_ENTRYPOINT_MC;
+ case VAEntrypointEncSlice: /* fall through */
+ case VAEntrypointEncSliceLP:
+ return PIPE_VIDEO_ENTRYPOINT_ENCODE;
+ default:
+ return PIPE_VIDEO_ENTRYPOINT_UNKNOWN;
+ }
+}
+
+static enum pipe_format pipe_format_from_va_fourcc(unsigned format)
+{
+ switch(format) {
+ case VA_FOURCC('N','V','1','2'):
+ return PIPE_FORMAT_NV12;
+/* TODO: These are already defined in mesa, but not yet in virglrenderer
+ case VA_FOURCC('P','0','1','0'):
+ return PIPE_FORMAT_P010;
+ case VA_FOURCC('P','0','1','6'):
+ return PIPE_FORMAT_P016;
+*/
+ case VA_FOURCC('I','4','2','0'):
+ return PIPE_FORMAT_IYUV;
+ case VA_FOURCC('Y','V','1','2'):
+ return PIPE_FORMAT_YV12;
+ case VA_FOURCC('Y','U','Y','V'):
+ case VA_FOURCC('Y','U','Y','2'):
+ return PIPE_FORMAT_YUYV;
+ case VA_FOURCC('U','Y','V','Y'):
+ return PIPE_FORMAT_UYVY;
+ case VA_FOURCC('B','G','R','A'):
+ return PIPE_FORMAT_B8G8R8A8_UNORM;
+ case VA_FOURCC('R','G','B','A'):
+ return PIPE_FORMAT_R8G8B8A8_UNORM;
+ case VA_FOURCC('B','G','R','X'):
+ return PIPE_FORMAT_B8G8R8X8_UNORM;
+ case VA_FOURCC('R','G','B','X'):
+ return PIPE_FORMAT_R8G8B8X8_UNORM;
+ default:
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+
+static VAProfile va_profile_from_pipe(enum pipe_video_profile profile)
+{
+ switch (profile) {
+ case PIPE_VIDEO_PROFILE_MPEG2_SIMPLE:
+ return VAProfileMPEG2Simple;
+ case PIPE_VIDEO_PROFILE_MPEG2_MAIN:
+ return VAProfileMPEG2Main;
+ case PIPE_VIDEO_PROFILE_MPEG4_SIMPLE:
+ return VAProfileMPEG4Simple;
+ case PIPE_VIDEO_PROFILE_MPEG4_ADVANCED_SIMPLE:
+ return VAProfileMPEG4AdvancedSimple;
+ case PIPE_VIDEO_PROFILE_VC1_SIMPLE:
+ return VAProfileVC1Simple;
+ case PIPE_VIDEO_PROFILE_VC1_MAIN:
+ return VAProfileVC1Main;
+ case PIPE_VIDEO_PROFILE_VC1_ADVANCED:
+ return VAProfileVC1Advanced;
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE:
+ return VAProfileH264ConstrainedBaseline;
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN:
+ return VAProfileH264Main;
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH:
+ return VAProfileH264High;
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN:
+ return VAProfileHEVCMain;
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_10:
+ return VAProfileHEVCMain10;
+ case PIPE_VIDEO_PROFILE_JPEG_BASELINE:
+ return VAProfileJPEGBaseline;
+ case PIPE_VIDEO_PROFILE_VP9_PROFILE0:
+ return VAProfileVP9Profile0;
+ case PIPE_VIDEO_PROFILE_VP9_PROFILE2:
+ return VAProfileVP9Profile2;
+ case PIPE_VIDEO_PROFILE_AV1_MAIN:
+ return VAProfileAV1Profile0;
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_EXTENDED:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH10:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH422:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH444:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_CONSTRAINED_BASELINE:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_12:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_STILL:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_444:
+ case PIPE_VIDEO_PROFILE_UNKNOWN:
+ return VAProfileNone;
+ default:
+ return -1;
+ }
+}
+
+/*
+ * There is no invalid entrypoint defined in libva,
+ * so add this definition to make the code clear
+ */
+#define VAEntrypointNone 0
+static int va_entrypoint_from_pipe(enum pipe_video_entrypoint entrypoint)
+{
+ switch (entrypoint) {
+ case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
+ return VAEntrypointVLD;
+ case PIPE_VIDEO_ENTRYPOINT_IDCT:
+ return VAEntrypointIDCT;
+ case PIPE_VIDEO_ENTRYPOINT_MC:
+ return VAEntrypointMoComp;
+ case PIPE_VIDEO_ENTRYPOINT_ENCODE:
+ return VAEntrypointEncSlice;
+ default:
+ return VAEntrypointNone;
+ }
+}
+
+static uint32_t va_format_from_pipe_chroma(
+ enum pipe_video_chroma_format chroma_format)
+{
+ switch (chroma_format) {
+ case PIPE_VIDEO_CHROMA_FORMAT_400:
+ return VA_RT_FORMAT_YUV400;
+ case PIPE_VIDEO_CHROMA_FORMAT_420:
+ return VA_RT_FORMAT_YUV420;
+ case PIPE_VIDEO_CHROMA_FORMAT_422:
+ return VA_RT_FORMAT_YUV422;
+ case PIPE_VIDEO_CHROMA_FORMAT_444:
+ return VA_RT_FORMAT_YUV444;
+ case PIPE_VIDEO_CHROMA_FORMAT_NONE:
+ default:
+ return 0;
+ }
+}
+
+static uint32_t drm_format_from_va_fourcc(uint32_t va_fourcc)
+{
+ switch (va_fourcc) {
+ case VA_FOURCC_NV12:
+ return DRM_FORMAT_NV12;
+ case VA_FOURCC_NV21:
+ return DRM_FORMAT_NV21;
+ default:
+ return DRM_FORMAT_INVALID;
+ }
+}
+
+static void fill_video_dma_buf(struct virgl_video_dma_buf *dmabuf,
+ const VADRMPRIMESurfaceDescriptor *desc)
+{
+ unsigned i, j, obj_idx;
+ struct virgl_video_dma_buf_plane *plane;
+
+/*
+ virgl_log("surface: fourcc=0x%08x, size=%ux%u, num_objects=%u,
+ num_layers=%u\n", desc->fourcc, desc->width, desc->height,
+ desc->num_objects, desc->num_layers);
+
+ for (i = 0; i < desc->num_objects; i++)
+ virgl_log(" objects[%u]: fd=%d, size=%u, modifier=0x%lx\n",
+ i, desc->objects[i].fd, desc->objects[i].size,
+ desc->objects[i].drm_format_modifier);
+
+ for (i = 0; i < desc->num_layers; i++)
+ virgl_log(" layers[%u] : format=0x%08x, num_planes=%u, "
+ "obj=%u,%u,%u,%u, offset=%u,%u,%u,%u, pitch=%u,%u,%u,%u\n",
+ i, desc->layers[i].drm_format, desc->layers[i].num_planes,
+ desc->layers[i].object_index[0],
+ desc->layers[i].object_index[1],
+ desc->layers[i].object_index[2],
+ desc->layers[i].object_index[3],
+ desc->layers[i].offset[0],
+ desc->layers[i].offset[1],
+ desc->layers[i].offset[2],
+ desc->layers[i].offset[3],
+ desc->layers[i].pitch[0],
+ desc->layers[i].pitch[1],
+ desc->layers[i].pitch[2],
+ desc->layers[i].pitch[3]);
+*/
+
+ dmabuf->drm_format = drm_format_from_va_fourcc(desc->fourcc);
+ dmabuf->width = desc->width;
+ dmabuf->height = desc->height;
+
+ for (i = 0, dmabuf->num_planes = 0; i < desc->num_layers; i++) {
+ for (j = 0; j < desc->layers[i].num_planes &&
+ dmabuf->num_planes < ARRAY_SIZE(dmabuf->planes); j++) {
+
+ obj_idx = desc->layers[i].object_index[j];
+ plane = &dmabuf->planes[dmabuf->num_planes++];
+ plane->drm_format = desc->layers[i].drm_format;
+ plane->offset = desc->layers[i].offset[j];
+ plane->pitch = desc->layers[i].pitch[j];
+ plane->fd = desc->objects[obj_idx].fd;
+ plane->size = desc->objects[obj_idx].size;
+ plane->modifier = desc->objects[obj_idx].drm_format_modifier;
+ }
+ }
+}
+
+static struct virgl_video_dma_buf *export_video_dma_buf(
+ struct virgl_video_buffer *buffer,
+ unsigned flags)
+{
+ struct virgl_video_dma_buf *dmabuf;
+ uint32_t exp_flags;
+ VAStatus va_stat;
+ VADRMPRIMESurfaceDescriptor desc;
+
+ exp_flags = VA_EXPORT_SURFACE_SEPARATE_LAYERS;
+
+ if (flags & VIRGL_VIDEO_DMABUF_READ_ONLY)
+ exp_flags |= VA_EXPORT_SURFACE_READ_ONLY;
+
+ if (flags & VIRGL_VIDEO_DMABUF_WRITE_ONLY)
+ exp_flags |= VA_EXPORT_SURFACE_WRITE_ONLY;
+
+ dmabuf = calloc(1, sizeof(*dmabuf));
+ if (!dmabuf)
+ return NULL;
+
+ va_stat = vaExportSurfaceHandle(va_dpy, buffer->va_sfc,
+ VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, exp_flags, &desc);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("export surface failed, err = 0x%X\n", va_stat);
+ goto free_dmabuf;
+ }
+
+ fill_video_dma_buf(dmabuf, &desc);
+ dmabuf->flags = flags;
+ dmabuf->buf = buffer;
+
+ return dmabuf;
+
+free_dmabuf:
+ free(dmabuf);
+ return NULL;
+}
+
+static void destroy_video_dma_buf(struct virgl_video_dma_buf *dmabuf)
+{
+ unsigned i;
+
+ if (dmabuf) {
+ for (i = 0; i < dmabuf->num_planes; i++)
+ close(dmabuf->planes[i].fd);
+
+ free(dmabuf);
+ }
+}
+
+static void encode_upload_picture(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *buffer)
+{
+ VAStatus va_stat;
+
+ if (!callbacks || !callbacks->encode_upload_picture)
+ return;
+
+ va_stat = vaSyncSurface(va_dpy, buffer->va_sfc);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("sync surface failed, err = 0x%x\n", va_stat);
+ return;
+ }
+
+ if (!buffer->dmabuf)
+ buffer->dmabuf = export_video_dma_buf(buffer, VIRGL_VIDEO_DMABUF_WRITE_ONLY);
+
+ if (buffer->dmabuf)
+ callbacks->encode_upload_picture(codec, buffer->dmabuf);
+}
+
+static void encode_completed(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *buffer)
+{
+ VAStatus va_stat;
+ VACodedBufferSegment *buf, *buf_list;
+ void **coded_bufs = NULL;
+ unsigned *coded_sizes = NULL;
+ unsigned i, num_coded_bufs = 0;
+
+ if (!callbacks || !callbacks->encode_completed)
+ return;
+
+ va_stat = vaMapBuffer(va_dpy, codec->va_coded_buf, (void **)(&buf_list));
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("map coded buffer failed, err = 0x%x\n", va_stat);
+ return;
+ }
+
+ for (buf = buf_list; buf; buf = (VACodedBufferSegment *)buf->next)
+ num_coded_bufs++;
+
+ coded_bufs = calloc(num_coded_bufs, sizeof(void *));
+ coded_sizes = calloc(num_coded_bufs, sizeof(unsigned));
+ if (!coded_bufs || !coded_sizes) {
+ virgl_log("alloc memory failed, num_coded_bufs %u\n", num_coded_bufs);
+ goto fail_unmap_buffer;
+ }
+
+ for (buf = buf_list, i = 0; buf; buf = (VACodedBufferSegment *)buf->next) {
+ coded_bufs[i] = buf->buf;
+ coded_sizes[i++] = buf->size;
+ }
+
+ callbacks->encode_completed(codec, buffer->dmabuf, NULL, num_coded_bufs,
+ (const void * const*)coded_bufs, coded_sizes);
+
+fail_unmap_buffer:
+ vaUnmapBuffer(va_dpy, codec->va_coded_buf);
+ free(coded_bufs);
+ free(coded_sizes);
+}
+
+static void decode_completed(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *buffer)
+{
+ if (!callbacks || !callbacks->decode_completed)
+ return;
+
+ if (!buffer->dmabuf)
+ buffer->dmabuf = export_video_dma_buf(buffer, VIRGL_VIDEO_DMABUF_READ_ONLY);
+
+ if (buffer->dmabuf)
+ callbacks->decode_completed(codec, buffer->dmabuf);
+}
+
+static VASurfaceID get_enc_ref_pic(struct virgl_video_codec *codec,
+ uint32_t frame_num)
+{
+ uint32_t idx;
+ struct virgl_video_create_buffer_args args;
+
+ if (frame_num == VA_INVALID_ID)
+ return VA_INVALID_ID;
+
+ idx = frame_num % ARRAY_SIZE(codec->ref_pic_list);
+
+ if (!codec->ref_pic_list[idx]) {
+ args.format = PIPE_FORMAT_NV21;
+ args.width = codec->width;
+ args.height = codec->height;
+ args.interlaced = 0;
+ args.opaque = NULL;
+ codec->ref_pic_list[idx] = virgl_video_create_buffer(&args);
+ if (!codec->ref_pic_list[idx]) {
+ virgl_log("create ref pic for frame_num %u failed\n", frame_num);
+ return VA_INVALID_ID;
+ }
+ }
+
+ return codec->ref_pic_list[idx]->va_sfc;
+}
+
+int virgl_video_init(int drm_fd,
+ struct virgl_video_callbacks *cbs, unsigned int flags)
+{
+ VAStatus va_stat;
+ int major_ver, minor_ver;
+ const char *driver;
+
+ (void)flags;
+
+ if (drm_fd < 0) {
+ virgl_log("invalid drm fd: %d\n", drm_fd);
+ return -1;
+ }
+
+ va_dpy = vaGetDisplayDRM(drm_fd);
+ if (!va_dpy) {
+ virgl_log("get va display failed\n");
+ return -1;
+ }
+
+ va_stat = vaInitialize(va_dpy, &major_ver, &minor_ver);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("init va library failed\n");
+ virgl_video_destroy();
+ return -1;
+ }
+
+ virgl_log("VA-API version: %d.%d\n", major_ver, minor_ver);
+
+ driver = vaQueryVendorString(va_dpy);
+ virgl_log("Driver version: %s\n", driver ? driver : "<unknown>");
+
+ if (!driver || !strstr(driver, "Mesa Gallium")) {
+ virgl_log("only supports mesa va drivers now\n");
+ virgl_video_destroy();
+ return -1;
+ }
+
+ callbacks = cbs;
+
+ return 0;
+}
+
+void virgl_video_destroy(void)
+{
+ if (va_dpy) {
+ vaTerminate(va_dpy);
+ va_dpy = NULL;
+ }
+
+ callbacks = NULL;
+}
+
+static int fill_vcaps_entry(VAProfile profile, VAEntrypoint entrypoint,
+ struct virgl_video_caps *vcaps)
+{
+ VAConfigID cfg;
+ VASurfaceAttrib *attrs;
+ unsigned i, num_attrs;
+
+ /* FIXME: default values */
+ vcaps->profile = pipe_profile_from_va(profile);
+ vcaps->entrypoint = pipe_entrypoint_from_va(entrypoint);
+ vcaps->max_level = 0;
+ vcaps->stacked_frames = 0;
+ vcaps->max_width = 0;
+ vcaps->max_height = 0;
+ vcaps->prefered_format = PIPE_FORMAT_NONE;
+ vcaps->max_macroblocks = 1;
+ vcaps->npot_texture = 1;
+ vcaps->supports_progressive = 1;
+ vcaps->supports_interlaced = 0;
+ vcaps->prefers_interlaced = 0;
+ vcaps->max_temporal_layers = 0;
+
+ vaCreateConfig(va_dpy, profile, entrypoint, NULL, 0, &cfg);
+
+ vaQuerySurfaceAttributes(va_dpy, cfg, NULL, &num_attrs);
+ attrs = calloc(num_attrs, sizeof(VASurfaceAttrib));
+ if (!attrs)
+ return -1;
+
+ vaQuerySurfaceAttributes(va_dpy, cfg, attrs, &num_attrs);
+ for (i = 0; i < num_attrs; i++) {
+ switch (attrs[i].type) {
+ case VASurfaceAttribMaxHeight:
+ vcaps->max_height = attrs[i].value.value.i;
+ break;
+ case VASurfaceAttribMaxWidth:
+ vcaps->max_width = attrs[i].value.value.i;
+ break;
+ case VASurfaceAttribPixelFormat:
+ if (PIPE_FORMAT_NONE == vcaps->prefered_format)
+ vcaps->prefered_format = \
+ pipe_format_from_va_fourcc(attrs[i].value.value.i);
+ break;
+ default:
+ break;
+ }
+ }
+
+ free(attrs);
+
+ vaDestroyConfig(va_dpy, cfg);
+
+ return 0;
+}
+
+int virgl_video_fill_caps(union virgl_caps *caps)
+{
+ int i, j;
+ int num_profiles, num_entrypoints;
+ VAProfile *profiles = NULL;
+ VAEntrypoint *entrypoints = NULL;
+
+ if (!va_dpy || !caps)
+ return -1;
+
+ num_entrypoints = vaMaxNumEntrypoints(va_dpy);
+ entrypoints = calloc(num_entrypoints, sizeof(VAEntrypoint));
+ if (!entrypoints)
+ return -1;
+
+ num_profiles = vaMaxNumProfiles(va_dpy);
+ profiles = calloc(num_profiles, sizeof(VAProfile));
+ if (!profiles) {
+ free(entrypoints);
+ return -1;
+ }
+
+ vaQueryConfigProfiles(va_dpy, profiles, &num_profiles);
+ for (i = 0, caps->v2.num_video_caps = 0; i < num_profiles; i++) {
+ /* only support H.264 and H.265 now */
+ if (profiles[i] != VAProfileH264Main &&
+ profiles[i] != VAProfileH264High &&
+ profiles[i] != VAProfileH264ConstrainedBaseline &&
+ profiles[i] != VAProfileHEVCMain)
+ continue;
+
+ vaQueryConfigEntrypoints(va_dpy, profiles[i],
+ entrypoints, &num_entrypoints);
+ for (j = 0; j < num_entrypoints &&
+ caps->v2.num_video_caps < ARRAY_SIZE(caps->v2.video_caps); j++) {
+ /* support encoding and decoding */
+ if (VAEntrypointVLD != entrypoints[j] &&
+ VAEntrypointEncSlice != entrypoints[j])
+ continue;
+
+ fill_vcaps_entry(profiles[i], entrypoints[j],
+ &caps->v2.video_caps[caps->v2.num_video_caps++]);
+ }
+ }
+
+ free(profiles);
+ free(entrypoints);
+
+ return 0;
+}
+
+struct virgl_video_codec *virgl_video_create_codec(
+ const struct virgl_video_create_codec_args *args)
+{
+ VAStatus va_stat;
+ VAConfigID cfg;
+ VAContextID ctx;
+ VAConfigAttrib attr;
+ VAProfile profile;
+ VAEntrypoint entrypoint;
+ uint32_t format;
+ struct virgl_video_codec *codec;
+
+ if (!va_dpy || !args)
+ return NULL;
+
+ profile = va_profile_from_pipe(args->profile);
+ entrypoint = va_entrypoint_from_pipe(args->entrypoint);
+ format = va_format_from_pipe_chroma(args->chroma_format);
+ if (VAProfileNone == profile || VAEntrypointNone == entrypoint)
+ return NULL;
+
+ codec = (struct virgl_video_codec *)calloc(1, sizeof(*codec));
+ if (!codec)
+ return NULL;
+
+ attr.type = VAConfigAttribRTFormat;
+ vaGetConfigAttributes(va_dpy, profile, entrypoint, &attr, 1);
+ if (!(attr.value & format)) {
+ virgl_log("format 0x%x not supported, supported formats: 0x%x\n",
+ format, attr.value);
+ goto err;
+ }
+
+ va_stat = vaCreateConfig(va_dpy, profile, entrypoint, &attr, 1, &cfg);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("create config failed, err = 0x%x\n", va_stat);
+ goto err;
+ }
+ codec->va_cfg = cfg;
+
+ va_stat = vaCreateContext(va_dpy, cfg, args->width, args->height,
+ VA_PROGRESSIVE, NULL, 0, &ctx);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("create context failed, err = 0x%x\n", va_stat);
+ goto err;
+ }
+ codec->va_ctx = ctx;
+
+ codec->profile = args->profile;
+ codec->level = args->level;
+ codec->entrypoint = args->entrypoint;
+ codec->chroma_format = args->chroma_format;
+ codec->width = args->width;
+ codec->height = args->height;
+ codec->max_references = args->max_references;
+ codec->opaque = args->opaque;
+
+ if (entrypoint == VAEntrypointEncSlice) {
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncCodedBufferType,
+ CODED_BUF_DEFAULT_SIZE(codec->width, codec->height),
+ 1, NULL, &codec->va_coded_buf);
+ }
+
+ return codec;
+
+err:
+ virgl_video_destroy_codec(codec);
+
+ return NULL;
+}
+
+void virgl_video_destroy_codec(struct virgl_video_codec *codec)
+{
+ unsigned i;
+
+ if (!va_dpy || !codec)
+ return;
+
+ if (codec->va_ctx)
+ vaDestroyContext(va_dpy, codec->va_ctx);
+
+ if (codec->va_cfg)
+ vaDestroyConfig(va_dpy, codec->va_cfg);
+
+ if (codec->va_coded_buf)
+ vaDestroyBuffer(va_dpy, codec->va_coded_buf);
+
+ for (i = 0; i < ARRAY_SIZE(codec->ref_pic_list); i++) {
+ if (codec->ref_pic_list[i])
+ free(codec->ref_pic_list[i]);
+ }
+
+ free(codec);
+}
+
+struct virgl_video_buffer *virgl_video_create_buffer(
+ const struct virgl_video_create_buffer_args *args)
+{
+ VAStatus va_stat;
+ VASurfaceID sfc;
+ uint32_t format;
+ struct virgl_video_buffer *buffer;
+
+ if (!va_dpy || !args)
+ return NULL;
+
+ /*
+ * FIXME: always use YUV420 now,
+ * may be use va_format_from_pipe(args->format)
+ */
+ format = VA_RT_FORMAT_YUV420;
+ if (!format) {
+ virgl_log("pipe format %d not supported\n", args->format);
+ return NULL;
+ }
+
+ buffer = (struct virgl_video_buffer *)calloc(1, sizeof(*buffer));
+ if (!buffer)
+ return NULL;
+
+ va_stat = vaCreateSurfaces(va_dpy, format,
+ args->width, args->height, &sfc, 1, NULL, 0);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ free(buffer);
+ return NULL;
+ }
+
+ buffer->va_sfc = sfc;
+ buffer->format = args->format;
+ buffer->width = args->width;
+ buffer->height = args->height;
+ buffer->opaque = args->opaque;
+
+ return buffer;
+}
+
+void virgl_video_destroy_buffer(struct virgl_video_buffer *buffer)
+{
+ if (!va_dpy || !buffer)
+ return;
+
+ if (buffer->dmabuf)
+ destroy_video_dma_buf(buffer->dmabuf);
+
+ if (buffer->va_sfc)
+ vaDestroySurfaces(va_dpy, &buffer->va_sfc, 1);
+
+ free(buffer);
+}
+
+void *virgl_video_codec_opaque_data(struct virgl_video_codec *codec)
+{
+ return codec ? codec->opaque : NULL;
+}
+
+enum pipe_video_profile virgl_video_codec_profile(
+ const struct virgl_video_codec *codec)
+{
+ return codec ? codec->profile : PIPE_VIDEO_PROFILE_UNKNOWN;
+}
+
+uint32_t virgl_video_buffer_id(const struct virgl_video_buffer *buffer)
+{
+ return (uint32_t)(buffer ? buffer->va_sfc : VA_INVALID_SURFACE);
+}
+
+void *virgl_video_buffer_opaque_data(struct virgl_video_buffer *buffer)
+{
+ return buffer ? buffer->opaque : NULL;
+}
+
+int virgl_video_begin_frame(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target)
+{
+ VAStatus va_stat;
+
+ if (!va_dpy || !codec || !target)
+ return -1;
+
+ if (codec->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
+ encode_upload_picture(codec, target);
+
+ codec->buffer = target;
+ va_stat = vaBeginPicture(va_dpy, codec->va_ctx, target->va_sfc);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("begin picture failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+#define ITEM_SET(dest, src, member) \
+ (dest)->member = (src)->member
+
+#define ITEM_CPY(dest, src, member) \
+ memcpy(&(dest)->member, &(src)->member, sizeof((dest)->member))
+
+
+static void h264_init_picture(VAPictureH264 *pic)
+{
+ pic->picture_id = VA_INVALID_SURFACE;
+ pic->frame_idx = 0;
+ pic->flags = VA_PICTURE_H264_INVALID;
+ pic->TopFieldOrderCnt = 0;
+ pic->BottomFieldOrderCnt = 0;
+}
+
+/*
+ * Refer to vlVaHandlePictureParameterBufferH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_picture_param(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target,
+ const struct virgl_h264_picture_desc *desc,
+ VAPictureParameterBufferH264 *vapp)
+{
+ unsigned i;
+ VAPictureH264 *pic;
+
+ (void)codec;
+
+ /* CurrPic */
+ pic = &vapp->CurrPic;
+ pic->picture_id = target->va_sfc;
+ pic->frame_idx = desc->frame_num;
+ pic->flags = desc->is_reference ? VA_PICTURE_H264_SHORT_TERM_REFERENCE : 0;
+ if (desc->field_pic_flag)
+ pic->flags |= (desc->bottom_field_flag ? VA_PICTURE_H264_BOTTOM_FIELD
+ : VA_PICTURE_H264_TOP_FIELD);
+ pic->TopFieldOrderCnt = desc->field_order_cnt[0];
+ pic->BottomFieldOrderCnt = desc->field_order_cnt[1];
+
+
+ /* ReferenceFrames */
+ for (i = 0; i < ARRAY_SIZE(vapp->ReferenceFrames); i++)
+ h264_init_picture(&vapp->ReferenceFrames[i]);
+
+ for (i = 0; i < desc->num_ref_frames; i++) {
+ pic = &vapp->ReferenceFrames[i];
+
+ pic->picture_id = desc->buffer_id[i];
+ pic->frame_idx = desc->frame_num_list[i];
+ pic->flags = (desc->is_long_term[i]
+ ? VA_PICTURE_H264_LONG_TERM_REFERENCE
+ : VA_PICTURE_H264_SHORT_TERM_REFERENCE);
+ if (desc->top_is_reference[i] && desc->bottom_is_reference[i]) {
+ // Full frame. This block intentionally left blank. No flags set.
+ } else {
+ if (desc->top_is_reference[i])
+ pic->flags |= VA_PICTURE_H264_TOP_FIELD;
+ else
+ pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
+ }
+ pic->TopFieldOrderCnt = desc->field_order_cnt_list[i][0];
+ pic->BottomFieldOrderCnt = desc->field_order_cnt_list[i][1];
+ }
+
+ //vapp->picture_width_in_mbs_minus1 = (codec->width - 1) / 16;
+ //vapp->picture_height_in_mbs_minus1 = (codec->height - 1) / 16;
+ ITEM_SET(vapp, &desc->pps.sps, bit_depth_luma_minus8);
+ ITEM_SET(vapp, &desc->pps.sps, bit_depth_chroma_minus8);
+ ITEM_SET(vapp, desc, num_ref_frames);
+
+ ITEM_SET(&vapp->seq_fields.bits, &desc->pps.sps, chroma_format_idc);
+ //vapp->seq_fields.bits.residual_colour_transform_flag = 0;
+ //vapp->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = 0;
+ ITEM_SET(&vapp->seq_fields.bits, &desc->pps.sps, frame_mbs_only_flag);
+ ITEM_SET(&vapp->seq_fields.bits,
+ &desc->pps.sps, mb_adaptive_frame_field_flag);
+ ITEM_SET(&vapp->seq_fields.bits, &desc->pps.sps, direct_8x8_inference_flag);
+ ITEM_SET(&vapp->seq_fields.bits, &desc->pps.sps, MinLumaBiPredSize8x8);
+ ITEM_SET(&vapp->seq_fields.bits, &desc->pps.sps, log2_max_frame_num_minus4);
+ ITEM_SET(&vapp->seq_fields.bits, &desc->pps.sps, pic_order_cnt_type);
+ ITEM_SET(&vapp->seq_fields.bits,
+ &desc->pps.sps, log2_max_pic_order_cnt_lsb_minus4);
+ ITEM_SET(&vapp->seq_fields.bits,
+ &desc->pps.sps, delta_pic_order_always_zero_flag);
+
+ //ITEM_SET(vapp, &desc->pps, num_slice_groups_minus1);
+ //ITEM_SET(vapp, &desc->pps, slice_group_map_type);
+ //ITEM_SET(vapp, &desc->pps, slice_group_change_rate_minus1);
+ ITEM_SET(vapp, &desc->pps, pic_init_qp_minus26);
+ ITEM_SET(vapp, &desc->pps, pic_init_qs_minus26);
+ ITEM_SET(vapp, &desc->pps, chroma_qp_index_offset);
+ ITEM_SET(vapp, &desc->pps, second_chroma_qp_index_offset);
+
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, entropy_coding_mode_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, weighted_pred_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, weighted_bipred_idc);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, transform_8x8_mode_flag);
+ ITEM_SET(&vapp->pic_fields.bits, desc, field_pic_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, constrained_intra_pred_flag);
+ vapp->pic_fields.bits.pic_order_present_flag =
+ desc->pps.bottom_field_pic_order_in_frame_present_flag;
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps, deblocking_filter_control_present_flag);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps, redundant_pic_cnt_present_flag);
+ vapp->pic_fields.bits.reference_pic_flag = desc->is_reference;
+
+ ITEM_SET(vapp, desc, frame_num);
+}
+
+
+ /* Refer to vlVaHandleIQMatrixBufferH264() in mesa */
+static void h264_fill_iq_matrix(const struct virgl_h264_picture_desc *desc,
+ VAIQMatrixBufferH264 *vaiqm)
+{
+ ITEM_CPY(vaiqm, &desc->pps, ScalingList4x4);
+ ITEM_CPY(vaiqm, &desc->pps, ScalingList8x8);
+}
+
+/*
+ * Refer to vlVaHandleSliceParameterBufferH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_slice_param(const struct virgl_h264_picture_desc *desc,
+ VASliceParameterBufferH264 *vasp)
+{
+ //vasp->slice_data_size;
+ //vasp->slice_data_offset;
+ //vasp->slice_data_flag;
+ //vasp->slice_data_bit_offset;
+ //vasp->first_mb_in_slice;
+ //vasp->slice_type;
+ //vasp->direct_spatial_mv_pred_flag;
+ ITEM_SET(vasp, desc, num_ref_idx_l0_active_minus1);
+ ITEM_SET(vasp, desc, num_ref_idx_l1_active_minus1);
+ //vasp->cabac_init_idc;
+ //vasp->slice_qp_delta;
+ //vasp->disable_deblocking_filter_idc;
+ //vasp->slice_alpha_c0_offset_div2;
+ //vasp->slice_beta_offset_div2;
+ //vasp->RefPicList0[32];
+ //vasp->RefPicList1[32];
+
+ /* see pred_weight_table */
+ //vasp->luma_log2_weight_denom;
+ //vasp->chroma_log2_weight_denom;
+ //vasp->luma_weight_l0_flag;
+ //vasp->luma_weight_l0[32];
+ //vasp->luma_offset_l0[32];
+ //vasp->chroma_weight_l0_flag;
+ //vasp->chroma_weight_l0[32][2];
+ //vasp->chroma_offset_l0[32][2];
+ //vasp->luma_weight_l1_flag;
+ //vasp->luma_weight_l1[32];
+ //vasp->luma_offset_l1[32];
+ //vasp->chroma_weight_l1_flag;
+ //vasp->chroma_weight_l1[32][2];
+ //vasp->chroma_offset_l1[32][2];
+}
+
+/*
+ * Refer to vlVaHandleVAEncPictureParameterBufferTypeH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_enc_picture_param(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc,
+ VAEncPictureParameterBufferH264 *param)
+{
+ unsigned i;
+
+ (void)codec;
+ (void)source;
+
+ /* CurrPic */
+ param->CurrPic.picture_id = get_enc_ref_pic(codec, desc->frame_num);
+ //CurrPic.frame_idx;
+ //CurrPic.flags;
+ param->CurrPic.TopFieldOrderCnt = desc->pic_order_cnt;
+ //CurrPic.BottomFieldOrderCnt;
+
+ /* ReferenceFrames */
+ for (i = 0; i < ARRAY_SIZE(param->ReferenceFrames); i++)
+ h264_init_picture(&param->ReferenceFrames[i]);
+
+ /* coded_buf */
+ param->coded_buf = codec->va_coded_buf;
+
+ //pic_parameter_set_id;
+ //seq_parameter_set_id;
+ //last_picture;
+ //frame_num
+ param->pic_init_qp = desc->quant_i_frames;
+ param->num_ref_idx_l0_active_minus1 = desc->num_ref_idx_l0_active_minus1;
+ param->num_ref_idx_l1_active_minus1 = desc->num_ref_idx_l1_active_minus1;
+ //chroma_qp_index_offset;
+ //second_chroma_qp_index_offset;
+
+ /* pic_fields */
+ param->pic_fields.bits.idr_pic_flag =
+ (desc->picture_type == PIPE_H2645_ENC_PICTURE_TYPE_IDR);
+ param->pic_fields.bits.reference_pic_flag = !desc->not_referenced;
+ param->pic_fields.bits.entropy_coding_mode_flag = desc->pic_ctrl.enc_cabac_enable;
+ //pic_fields.bits.weighted_pred_flag
+ //pic_fields.bits.weighted_bipred_idc
+ //pic_fields.bits.constrained_intra_pred_flag
+ //pic_fields.bits.transform_8x8_mode_flag
+ //pic_fields.bits.deblocking_filter_control_present_flag
+ //pic_fields.bits.redundant_pic_cnt_present_flag
+ //pic_fields.bits.pic_order_present_flag
+ //pic_fields.bits.pic_scaling_matrix_present_flag
+
+}
+
+/*
+ * Refer to vlVaHandleVAEncSliceParameterBufferTypeH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_enc_slice_param(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc,
+ VAEncSliceParameterBufferH264 *param)
+{
+ unsigned i;
+ const struct virgl_h264_slice_descriptor *sd;
+
+ (void)codec;
+ (void)source;
+
+ /* Get the lastest slice descriptor */
+ if (desc->num_slice_descriptors &&
+ desc->num_slice_descriptors <= ARRAY_SIZE(desc->slices_descriptors)) {
+ sd = &desc->slices_descriptors[desc->num_slice_descriptors - 1];
+ param->macroblock_address = sd->macroblock_address;
+ param->num_macroblocks = sd->num_macroblocks;
+ //macroblock_info;
+ }
+
+ switch (desc->picture_type) {
+ case PIPE_H2645_ENC_PICTURE_TYPE_P:
+ param->slice_type = 0;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_B:
+ param->slice_type = 1;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_I:
+ case PIPE_H2645_ENC_PICTURE_TYPE_IDR: /* fall through */
+ param->slice_type = 2;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_SKIP:
+ default:
+ break;
+ }
+
+ //pic_parameter_set_id;
+ //idr_pic_id;
+ //pic_order_cnt_lsb;
+ //delta_pic_order_cnt_bottom;
+ //delta_pic_order_cnt[2];
+ //direct_spatial_mv_pred_flag;
+
+ /*
+ * Sine num_ref_idx_l0_active_minus1 and num_ref_idx_l1_active_minus1
+ * have been passed by VAEncPictureParameterBufferH264,
+ * num_ref_idx_active_override_flag is always set to 0.
+ */
+ param->num_ref_idx_active_override_flag = 0;
+ //num_ref_idx_l0_active_minus1
+ //num_ref_idx_l1_active_minus1
+
+ /* Reference List */
+ for (i = 0; i < 32; i++) {
+ h264_init_picture(&param->RefPicList0[i]);
+ h264_init_picture(&param->RefPicList1[i]);
+
+ param->RefPicList0[i].picture_id =
+ get_enc_ref_pic(codec, desc->ref_idx_l0_list[i]);
+ param->RefPicList1[i].picture_id =
+ get_enc_ref_pic(codec, desc->ref_idx_l1_list[i]);
+
+ if (param->RefPicList0[i].picture_id != VA_INVALID_ID)
+ param->RefPicList0[i].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+
+ if (param->RefPicList1[i].picture_id != VA_INVALID_ID)
+ param->RefPicList1[i].flags = VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+ }
+
+ //luma_log2_weight_denom;
+ //chroma_log2_weight_denom;
+ //luma_weight_l0_flag;
+ //luma_weight_l0[32];
+ //luma_offset_l0[32];
+ //chroma_weight_l0_flag;
+ //chroma_weight_l0[32][2];
+ //chroma_offset_l0[32][2];
+ //luma_weight_l1_flag;
+ //luma_weight_l1[32];
+ //luma_offset_l1[32];
+ //chroma_weight_l1_flag;
+ //chroma_weight_l1[32][2];
+ //chroma_offset_l1[32][2];
+ param->cabac_init_idc = desc->pic_ctrl.enc_cabac_init_idc;
+ //slice_qp_delta;
+ //disable_deblocking_filter_idc;
+ //slice_alpha_c0_offset_div2;
+ //slice_beta_offset_div2;
+
+}
+
+/*
+ * Refer to vlVaHandleVAEncSequenceParameterBufferTypeH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_enc_seq_param(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc,
+ VAEncSequenceParameterBufferH264 *param)
+{
+ (void)codec;
+ (void)source;
+
+ //seq_parameter_set_id;
+ param->level_idc = codec->level;
+ //intra_period;
+ param->intra_idr_period = desc->intra_idr_period;
+ //ip_period;
+ //bits_per_second;
+ param->max_num_ref_frames = codec->max_references;
+ //picture_width_in_mbs;
+ //picture_height_in_mbs;
+
+ /* seq_fields.bits */
+ //seq_fields.bits.chroma_format_idc
+ //seq_fields.bits.frame_mbs_only_flag
+ //seq_fields.bits.mb_adaptive_frame_field_flag
+ //seq_fields.bits.seq_scaling_matrix_present_flag
+ //seq_fields.bits.direct_8x8_inference_flag
+ //seq_fields.bits.log2_max_frame_num_minus4
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, pic_order_cnt_type);
+ //seq_fields.bit.log2_max_pic_order_cnt_lsb_minus4
+ //seq_fields.bit.delta_pic_order_always_zero_flag
+
+ //bit_depth_luma_minus8;
+ //bit_depth_chroma_minus8;
+
+ //num_ref_frames_in_pic_order_cnt_cycle;
+ //offset_for_non_ref_pic;
+ //offset_for_top_to_bottom_field;
+ //offset_for_ref_frame[256];
+ if (desc->seq.enc_frame_cropping_flag) {
+ param->frame_cropping_flag = desc->seq.enc_frame_cropping_flag;
+ param->frame_crop_left_offset = desc->seq.enc_frame_crop_left_offset;
+ param->frame_crop_right_offset = desc->seq.enc_frame_crop_right_offset;
+ param->frame_crop_top_offset = desc->seq.enc_frame_crop_top_offset;
+ param->frame_crop_bottom_offset = desc->seq.enc_frame_crop_bottom_offset;
+ }
+
+ ITEM_SET(param, &desc->seq, vui_parameters_present_flag);
+
+ // vui_fields.bits
+ if (desc->seq.vui_parameters_present_flag) {
+ ITEM_SET(&param->vui_fields.bits, &desc->seq.vui_flags,
+ aspect_ratio_info_present_flag);
+ ITEM_SET(&param->vui_fields.bits, &desc->seq.vui_flags,
+ timing_info_present_flag);
+ }
+ //vui_fields.bits.bitstream_restriction_flag
+ //vui_fields.bits.log2_max_mv_length_horizontal
+ //vui_fields.bits.log2_max_mv_length_vertical
+ //vui_fields.bits.fixed_frame_rate_flag
+ //vui_fields.bits.low_delay_hrd_flag
+ //vui_fields.bits.motion_vectors_over_pic_boundaries_flag
+
+ if (desc->seq.vui_parameters_present_flag) {
+ ITEM_SET(param, &desc->seq, aspect_ratio_idc);
+ ITEM_SET(param, &desc->seq, sar_width);
+ ITEM_SET(param, &desc->seq, sar_height);
+ }
+ ITEM_SET(param, &desc->seq, num_units_in_tick);
+ ITEM_SET(param, &desc->seq, time_scale);
+}
+
+/*
+ * Refer to vlVaHandleVAEncMiscParameterTypeRateControlH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_enc_misc_param_rate_ctrl(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc,
+ VAEncMiscParameterRateControl *param)
+{
+ unsigned temporal_id = 0; /* always 0 now */
+ const struct virgl_h264_enc_rate_control *rc = &desc->rate_ctrl[temporal_id];
+
+ (void)codec;
+ (void)source;
+
+ param->bits_per_second = rc->peak_bitrate;
+ if (desc->rate_ctrl[0].rate_ctrl_method !=
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_CONSTANT) {
+ param->target_percentage = rc->target_bitrate *
+ param->bits_per_second / 100.0;
+ }
+ //window_size;
+ //initial_qp;
+ param->min_qp = rc->min_qp;
+ //basic_unit_size;
+
+ /* rc_flags */
+ //rc_flags.bits.reset
+ param->rc_flags.bits.disable_frame_skip = !rc->skip_frame_enable;
+ param->rc_flags.bits.disable_bit_stuffing = !rc->fill_data_enable;
+ //rc_flags.bits.mb_rate_control
+ param->rc_flags.bits.temporal_id = temporal_id;
+ //rc_flags.bits.cfs_I_frames
+ //rc_flags.bits.enable_parallel_brc
+ //rc_flags.bits.enable_dynamic_scaling
+ //rc_flags.bits.frame_tolerance_mode
+
+ //ICQ_quality_factor;
+ param->max_qp = rc->max_qp;
+ //quality_factor;
+ //target_frame_size;
+}
+
+/*
+ * Refer to vlVaHandleVAEncMiscParameterTypeFrameRateH264() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h264_fill_enc_misc_param_frame_rate(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc,
+ VAEncMiscParameterFrameRate *param)
+{
+ unsigned temporal_id = 0; /* always 0 now */
+ const struct virgl_h264_enc_rate_control *rc = &desc->rate_ctrl[temporal_id];
+
+ (void)codec;
+ (void)source;
+
+ param->framerate = rc->frame_rate_num | (rc->frame_rate_den << 16);
+ param->framerate_flags.bits.temporal_id = temporal_id;
+}
+
+static int h264_decode_bitstream(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target,
+ const struct virgl_h264_picture_desc *desc,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes)
+{
+ unsigned i;
+ int err = 0;
+ VAStatus va_stat;
+ VABufferID *slice_data_buf, pic_param_buf, iq_matrix_buf, slice_param_buf;
+ VAPictureParameterBufferH264 pic_param;
+ VAIQMatrixBufferH264 iq_matrix;
+ VASliceParameterBufferH264 slice_param;
+
+ slice_data_buf = calloc(num_buffers, sizeof(VABufferID));
+ if (!slice_data_buf) {
+ virgl_log("alloc slice data buffer id failed\n");
+ return -1;
+ }
+
+ h264_fill_picture_param(codec, target, desc, &pic_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAPictureParameterBufferType,
+ sizeof(pic_param), 1, &pic_param, &pic_param_buf);
+
+ h264_fill_iq_matrix(desc, &iq_matrix);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAIQMatrixBufferType,
+ sizeof(iq_matrix), 1, &iq_matrix, &iq_matrix_buf);
+
+ h264_fill_slice_param(desc, &slice_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VASliceParameterBufferType,
+ sizeof(slice_param), 1, &slice_param, &slice_param_buf);
+
+ for (i = 0; i < num_buffers; i++) {
+ vaCreateBuffer(va_dpy, codec->va_ctx, VASliceDataBufferType,
+ sizes[i], 1, (void *)(buffers[i]), &slice_data_buf[i]);
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &pic_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render picture param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto err;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &iq_matrix_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render iq matrix failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto err;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &slice_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render slice param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto err;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &slice_data_buf[i], 1);
+
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render slice data failed, err = 0x%x\n", va_stat);
+ err = -1;
+ }
+ }
+
+err:
+ vaDestroyBuffer(va_dpy, pic_param_buf);
+ vaDestroyBuffer(va_dpy, iq_matrix_buf);
+ vaDestroyBuffer(va_dpy, slice_param_buf);
+ for (i = 0; i < num_buffers; i++)
+ vaDestroyBuffer(va_dpy, slice_data_buf[i]);
+ free(slice_data_buf);
+
+ return err;
+}
+
+static int h264_encode_render_sequence(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc)
+{
+ int err = 0;
+ VAStatus va_stat;
+ VAEncSequenceParameterBufferH264 seq_param;
+ VAEncMiscParameterBuffer *misc_param;
+ VABufferID seq_param_buf, rc_param_buf, fr_param_buf;
+
+ memset(&seq_param, 0, sizeof(seq_param));
+ h264_fill_enc_seq_param(codec, source, desc, &seq_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncSequenceParameterBufferType,
+ sizeof(seq_param), 1, &seq_param, &seq_param_buf);
+
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncMiscParameterBufferType,
+ sizeof(VAEncMiscParameterBuffer) +
+ sizeof(VAEncMiscParameterRateControl), 1, NULL, &rc_param_buf);
+ vaMapBuffer(va_dpy, rc_param_buf, (void **)&misc_param);
+ misc_param->type = VAEncMiscParameterTypeRateControl;
+ h264_fill_enc_misc_param_rate_ctrl(codec, source, desc,
+ (VAEncMiscParameterRateControl *)misc_param->data);
+ vaUnmapBuffer(va_dpy, rc_param_buf);
+
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncMiscParameterBufferType,
+ sizeof(VAEncMiscParameterBuffer) +
+ sizeof(VAEncMiscParameterFrameRate), 1, NULL, &fr_param_buf);
+ vaMapBuffer(va_dpy, fr_param_buf, (void **)&misc_param);
+ misc_param->type = VAEncMiscParameterTypeFrameRate;
+ h264_fill_enc_misc_param_frame_rate(codec, source, desc,
+ (VAEncMiscParameterFrameRate *)misc_param->data);
+ vaUnmapBuffer(va_dpy, fr_param_buf);
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &seq_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h264 sequence param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto error;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &rc_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h264 rate control param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto error;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &fr_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h264 frame rate param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto error;
+ }
+
+error:
+ vaDestroyBuffer(va_dpy, seq_param_buf);
+ vaDestroyBuffer(va_dpy, rc_param_buf);
+ vaDestroyBuffer(va_dpy, fr_param_buf);
+
+ return err;
+}
+
+static int h264_encode_render_picture(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc)
+{
+ VAStatus va_stat;
+ VABufferID pic_param_buf;
+ VAEncPictureParameterBufferH264 pic_param;
+
+ memset(&pic_param, 0, sizeof(pic_param));
+ h264_fill_enc_picture_param(codec, source, desc, &pic_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncPictureParameterBufferType,
+ sizeof(pic_param), 1, &pic_param, &pic_param_buf);
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &pic_param_buf, 1);
+ vaDestroyBuffer(va_dpy, pic_param_buf);
+
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h264 picture param failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int h264_encode_render_slice(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc)
+{
+ VAStatus va_stat;
+ VABufferID slice_param_buf;
+ VAEncSliceParameterBufferH264 slice_param;
+
+ memset(&slice_param, 0, sizeof(slice_param));
+ h264_fill_enc_slice_param(codec, source, desc, &slice_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncSliceParameterBufferType,
+ sizeof(slice_param), 1, &slice_param, &slice_param_buf);
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &slice_param_buf, 1);
+ vaDestroyBuffer(va_dpy, slice_param_buf);
+
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h264 slice param failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int h264_encode_bitstream(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h264_enc_picture_desc *desc)
+{
+ if (desc->picture_type == PIPE_H2645_ENC_PICTURE_TYPE_IDR) {
+ h264_encode_render_sequence(codec, source, desc);
+ }
+
+ h264_encode_render_picture(codec, source, desc);
+ h264_encode_render_slice(codec, source, desc);
+
+ return 0;
+}
+
+static void h265_init_picture(VAPictureHEVC *pic)
+{
+ pic->picture_id = VA_INVALID_SURFACE;
+ pic->pic_order_cnt = 0;
+ pic->flags = VA_PICTURE_HEVC_INVALID;
+}
+
+/*
+ * Refer to vlVaHandlePictureParameterBufferHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_picture_param(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target,
+ const struct virgl_h265_picture_desc *desc,
+ VAPictureParameterBufferHEVC *vapp)
+{
+ unsigned i;
+
+ (void)codec;
+ (void)target;
+
+ //vapp->CurrPic.picture_id
+ vapp->CurrPic.pic_order_cnt = desc->CurrPicOrderCntVal;
+ //vapp->CurrPic.flags
+
+ for (i = 0; i < 15; i++) {
+ vapp->ReferenceFrames[i].pic_order_cnt = desc->PicOrderCntVal[i];
+ vapp->ReferenceFrames[i].picture_id = desc->ref[i];
+ vapp->ReferenceFrames[i].flags = VA_INVALID_SURFACE == desc->ref[i]
+ ? VA_PICTURE_HEVC_INVALID : 0;
+ }
+ for (i = 0; i < desc->NumPocStCurrBefore; i++)
+ vapp->ReferenceFrames[desc->RefPicSetStCurrBefore[i]].flags |= \
+ VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE;
+ for (i = 0; i < desc->NumPocStCurrAfter; i++)
+ vapp->ReferenceFrames[desc->RefPicSetStCurrAfter[i]].flags |= \
+ VA_PICTURE_HEVC_RPS_ST_CURR_AFTER;
+ for (i = 0; i < desc->NumPocLtCurr; i++)
+ vapp->ReferenceFrames[desc->RefPicSetLtCurr[i]].flags |= \
+ VA_PICTURE_HEVC_RPS_LT_CURR;
+
+ ITEM_SET(vapp, &desc->pps.sps, pic_width_in_luma_samples);
+ ITEM_SET(vapp, &desc->pps.sps, pic_height_in_luma_samples);
+
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps.sps, chroma_format_idc);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps.sps, separate_colour_plane_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps.sps, pcm_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps.sps, scaling_list_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps, transform_skip_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps.sps, amp_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps.sps, strong_intra_smoothing_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, sign_data_hiding_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, constrained_intra_pred_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, cu_qp_delta_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, weighted_pred_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, weighted_bipred_flag);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps, transquant_bypass_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps, tiles_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps, entropy_coding_sync_enabled_flag);
+ ITEM_SET(&vapp->pic_fields.bits, &desc->pps,
+ pps_loop_filter_across_slices_enabled_flag);
+ if (desc->pps.tiles_enabled_flag)
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps, loop_filter_across_tiles_enabled_flag);
+ if (desc->pps.sps.pcm_enabled_flag)
+ ITEM_SET(&vapp->pic_fields.bits,
+ &desc->pps.sps, pcm_loop_filter_disabled_flag);
+ //ITEM_SET(vapp->pic_fields.bits, desc->pps.sps, NoPicReorderingFlag);
+ //ITEM_SET(vapp->pic_fields.bits, desc->pps.sps, NoBiPredFlag);
+
+ ITEM_SET(vapp, &desc->pps.sps, sps_max_dec_pic_buffering_minus1);
+ ITEM_SET(vapp, &desc->pps.sps, bit_depth_luma_minus8);
+ ITEM_SET(vapp, &desc->pps.sps, bit_depth_chroma_minus8);
+ if (desc->pps.sps.pcm_enabled_flag) {
+ ITEM_SET(vapp, &desc->pps.sps, pcm_sample_bit_depth_luma_minus1);
+ ITEM_SET(vapp, &desc->pps.sps, pcm_sample_bit_depth_chroma_minus1);
+ }
+ ITEM_SET(vapp, &desc->pps.sps, log2_min_luma_coding_block_size_minus3);
+ ITEM_SET(vapp, &desc->pps.sps, log2_diff_max_min_luma_coding_block_size);
+ ITEM_SET(vapp, &desc->pps.sps, log2_min_transform_block_size_minus2);
+ ITEM_SET(vapp, &desc->pps.sps, log2_diff_max_min_transform_block_size);
+ if (desc->pps.sps.pcm_enabled_flag) {
+ ITEM_SET(vapp, &desc->pps.sps,
+ log2_min_pcm_luma_coding_block_size_minus3);
+ ITEM_SET(vapp, &desc->pps.sps,
+ log2_diff_max_min_pcm_luma_coding_block_size);
+ }
+ ITEM_SET(vapp, &desc->pps.sps, max_transform_hierarchy_depth_intra);
+ ITEM_SET(vapp, &desc->pps.sps, max_transform_hierarchy_depth_inter);
+ ITEM_SET(vapp, &desc->pps, init_qp_minus26);
+ ITEM_SET(vapp, &desc->pps, diff_cu_qp_delta_depth);
+ ITEM_SET(vapp, &desc->pps, pps_cb_qp_offset);
+ ITEM_SET(vapp, &desc->pps, pps_cr_qp_offset);
+ ITEM_SET(vapp, &desc->pps, log2_parallel_merge_level_minus2);
+ if (desc->pps.tiles_enabled_flag) {
+ ITEM_SET(vapp, &desc->pps, num_tile_columns_minus1);
+ ITEM_SET(vapp, &desc->pps, num_tile_rows_minus1);
+ ITEM_CPY(vapp, &desc->pps, column_width_minus1);
+ ITEM_CPY(vapp, &desc->pps, row_height_minus1);
+ }
+
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, lists_modification_present_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps.sps, long_term_ref_pics_present_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps.sps, sps_temporal_mvp_enabled_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, cabac_init_present_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, output_flag_present_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, dependent_slice_segments_enabled_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, pps_slice_chroma_qp_offsets_present_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps.sps, sample_adaptive_offset_enabled_flag);
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, deblocking_filter_override_enabled_flag);
+ vapp->slice_parsing_fields.bits.pps_disable_deblocking_filter_flag = \
+ desc->pps.pps_deblocking_filter_disabled_flag;
+ ITEM_SET(&vapp->slice_parsing_fields.bits,
+ &desc->pps, slice_segment_header_extension_present_flag);
+ vapp->slice_parsing_fields.bits.RapPicFlag = desc->RAPPicFlag;
+ vapp->slice_parsing_fields.bits.IdrPicFlag = desc->IDRPicFlag;
+ //vapp->slice_parsing_fields.bits.IntraPicFlag
+
+ ITEM_SET(vapp, &desc->pps.sps, log2_max_pic_order_cnt_lsb_minus4);
+ ITEM_SET(vapp, &desc->pps.sps, num_short_term_ref_pic_sets);
+ vapp->num_long_term_ref_pic_sps = desc->pps.sps.num_long_term_ref_pics_sps;
+ ITEM_SET(vapp, &desc->pps, num_ref_idx_l0_default_active_minus1);
+ ITEM_SET(vapp, &desc->pps, num_ref_idx_l1_default_active_minus1);
+ ITEM_SET(vapp, &desc->pps, pps_beta_offset_div2);
+ ITEM_SET(vapp, &desc->pps, pps_tc_offset_div2);
+ ITEM_SET(vapp, &desc->pps, num_extra_slice_header_bits);
+
+ ITEM_SET(vapp, &desc->pps, st_rps_bits);
+}
+
+/*
+ * Refer to vlVaHandleSliceParameterBufferHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_slice_param(const struct virgl_h265_picture_desc *desc,
+ VASliceParameterBufferHEVC *vapp)
+{
+ unsigned i, j;
+
+ //slice_data_size;
+ //slice_data_offset;
+ //slice_data_flag;
+ //slice_data_byte_offset;
+ //slice_segment_address;
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 15; j++)
+ vapp->RefPicList[i][j] = desc->RefPicList[i][j];
+ }
+ //LongSliceFlags;
+ //collocated_ref_idx;
+ //num_ref_idx_l0_active_minus1;
+ //num_ref_idx_l1_active_minus1;
+ //slice_qp_delta;
+ //slice_cb_qp_offset;
+ //slice_cr_qp_offset;
+ //slice_beta_offset_div2;
+ //slice_tc_offset_div2;
+ //luma_log2_weight_denom;
+ //delta_chroma_log2_weight_denom;
+ //delta_luma_weight_l0[15];
+ //luma_offset_l0[15];
+ //delta_chroma_weight_l0[15][2];
+ //ChromaOffsetL0[15][2];
+ //delta_luma_weight_l1[15];
+ //luma_offset_l1[15];
+ //delta_chroma_weight_l1[15][2];
+ //ChromaOffsetL1[15][2];
+ //five_minus_max_num_merge_cand;
+ //num_entry_point_offsets;
+ //entry_offset_to_subset_array;
+ //slice_data_num_emu_prevn_bytes;
+ //va_reserved[VA_PADDING_LOW - 2];
+}
+
+/*
+ * Refer to vlVaHandleVAEncSequenceParameterBufferTypeHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_enc_seq_param(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc,
+ VAEncSequenceParameterBufferHEVC *param)
+{
+ (void)codec;
+ (void)source;
+
+ ITEM_SET(param, &desc->seq, general_profile_idc);
+ ITEM_SET(param, &desc->seq, general_level_idc);
+ ITEM_SET(param, &desc->seq, general_tier_flag);
+ ITEM_SET(param, &desc->seq, intra_period);
+ //intra_idr_period
+ ITEM_SET(param, &desc->seq, ip_period);
+ //bits_per_second
+ ITEM_SET(param, &desc->seq, pic_width_in_luma_samples);
+ ITEM_SET(param, &desc->seq, pic_height_in_luma_samples);
+
+ /* seq_fields.bits */
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, chroma_format_idc);
+ //seq_fields.bits.separate_colour_plane_flag
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, bit_depth_luma_minus8);
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, bit_depth_chroma_minus8);
+ //seq_fields.bits.scaling_list_enabled_flag
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, strong_intra_smoothing_enabled_flag);
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, amp_enabled_flag);
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, sample_adaptive_offset_enabled_flag);
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, pcm_enabled_flag);
+ //seq_fields.bits.pcm_loop_filter_disabled_flag
+ ITEM_SET(&param->seq_fields.bits, &desc->seq, sps_temporal_mvp_enabled_flag);
+ //seq_fields.bits.low_delay_seq
+ //seq_fields.bits.hierachical_flag
+ //seq_fields.bits.reserved_bits
+
+ ITEM_SET(param, &desc->seq, log2_min_luma_coding_block_size_minus3);
+ ITEM_SET(param, &desc->seq, log2_diff_max_min_luma_coding_block_size);
+ ITEM_SET(param, &desc->seq, log2_min_transform_block_size_minus2);
+ ITEM_SET(param, &desc->seq, log2_diff_max_min_transform_block_size);
+ ITEM_SET(param, &desc->seq, max_transform_hierarchy_depth_inter);
+ ITEM_SET(param, &desc->seq, max_transform_hierarchy_depth_intra);
+ //pcm_sample_bit_depth_luma_minus1
+ //pcm_sample_bit_depth_chroma_minus1
+ //log2_min_pcm_luma_coding_block_size_minus3
+ //log2_max_pcm_luma_coding_block_size_minus3
+ ITEM_SET(param, &desc->seq, vui_parameters_present_flag);
+
+ /* vui_fields.bits */
+ if (desc->seq.vui_parameters_present_flag) {
+ ITEM_SET(&param->vui_fields.bits, &desc->seq.vui_flags,
+ aspect_ratio_info_present_flag);
+ }
+ //vui_fields.bits.neutral_chroma_indication_flag
+ //vui_fields.bits.field_seq_flag
+ if (desc->seq.vui_parameters_present_flag) {
+ param->vui_fields.bits.vui_timing_info_present_flag =
+ desc->seq.vui_flags.timing_info_present_flag;
+ }
+ //vui_fields.bits.bitstream_restriction_flag
+ //vui_fields.bits.tiles_fixed_structure_flag
+ //vui_fields.bits.motion_vectors_over_pic_boundaries_flag
+ //vui_fields.bits.restricted_ref_pic_lists_flag
+ //vui_fields.bits.log2_max_mv_length_horizontal
+ //vui_fields.bits.log2_max_mv_length_vertical
+
+ if (desc->seq.vui_parameters_present_flag) {
+ ITEM_SET(param, &desc->seq, aspect_ratio_idc);
+ ITEM_SET(param, &desc->seq, sar_width);
+ ITEM_SET(param, &desc->seq, sar_height);
+ }
+ param->vui_num_units_in_tick = desc->seq.num_units_in_tick;
+ param->vui_time_scale = desc->seq.time_scale;
+ //min_spatial_segmentation_idc
+ //max_bytes_per_pic_denom
+ //max_bits_per_min_cu_denom
+
+ //scc_fields.bits.palette_mode_enabled_flag
+}
+
+/*
+ * Refer to vlVaHandleVAEncPictureParameterBufferTypeHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_enc_picture_param(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc,
+ VAEncPictureParameterBufferHEVC *param)
+{
+ unsigned i;
+
+ (void)source;
+
+ param->decoded_curr_pic.picture_id = get_enc_ref_pic(codec, desc->frame_num);
+ param->decoded_curr_pic.pic_order_cnt = desc->pic_order_cnt;
+
+ for (i = 0; i < 15; i++) {
+ h265_init_picture(&param->reference_frames[i]);
+ }
+
+ param->coded_buf = codec->va_coded_buf;
+ //collocated_ref_pic_index
+ //last_picture
+ param->pic_init_qp = desc->rc.quant_i_frames;
+ //diff_cu_qp_delta_depth
+ //pps_cb_qp_offset
+ //pps_cr_qp_offset
+ //num_tile_columns_minus1
+ //num_tile_rows_minus1
+ //column_width_minus1[19]
+ //row_height_minus1[21]
+ ITEM_SET(param, &desc->pic, log2_parallel_merge_level_minus2);
+ //ctu_max_bitsize_allowed
+ param->num_ref_idx_l0_default_active_minus1 = desc->num_ref_idx_l0_active_minus1;
+ param->num_ref_idx_l1_default_active_minus1 = desc->num_ref_idx_l1_active_minus1;
+ //slice_pic_parameter_set_id
+ ITEM_SET(param, &desc->pic, nal_unit_type);
+
+ param->pic_fields.bits.idr_pic_flag =
+ (desc->picture_type == PIPE_H2645_ENC_PICTURE_TYPE_IDR);
+ switch (desc->picture_type) {
+ case PIPE_H2645_ENC_PICTURE_TYPE_IDR: /* fallthrough */
+ case PIPE_H2645_ENC_PICTURE_TYPE_I:
+ param->pic_fields.bits.coding_type = 1;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_P:
+ param->pic_fields.bits.coding_type = 2;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_B:
+ param->pic_fields.bits.coding_type = 3;
+ break;
+ default:
+ break;
+ }
+
+ param->pic_fields.bits.reference_pic_flag = !desc->not_referenced;
+ //pic_fields.bits.dependent_slice_segments_enabled_flag
+ //pic_fields.bits.sign_data_hiding_enabled_flag
+ ITEM_SET(&param->pic_fields.bits, &desc->pic, constrained_intra_pred_flag);
+ ITEM_SET(&param->pic_fields.bits, &desc->pic, transform_skip_enabled_flag);
+ //pic_fields.bits.cu_qp_delta_enabled_flag
+ //pic_fields.bits.weighted_pred_flag
+ //pic_fields.bits.weighted_bipred_flag
+ //pic_fields.bits.transquant_bypass_enabled_flag
+ //pic_fields.bits.tiles_enabled_flag
+ //pic_fields.bits.entropy_coding_sync_enabled_flag
+ //pic_fields.bits.loop_filter_across_tiles_enabled_flag
+ ITEM_SET(&param->pic_fields.bits, &desc->pic,
+ pps_loop_filter_across_slices_enabled_flag);
+ //pic_fields.bits.scaling_list_data_present_flag
+ //pic_fields.bits.screen_content_flag
+ //pic_fields.bits.enable_gpu_weighted_prediction
+ //pic_fields.bits.no_output_of_prior_pics_flag
+
+ //hierarchical_level_plus1
+ //scc_fields.bits.pps_curr_pic_ref_enabled_flag
+}
+
+/*
+ * Refer to vlVaHandleVAEncSliceParameterBufferTypeHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_enc_slice_param(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc,
+ VAEncSliceParameterBufferHEVC *param)
+{
+ unsigned i;
+ const struct virgl_h265_slice_descriptor *sd;
+
+ (void)source;
+
+ /* Get the lastest slice descriptor */
+ if (desc->num_slice_descriptors &&
+ desc->num_slice_descriptors <= ARRAY_SIZE(desc->slices_descriptors)) {
+ sd = &desc->slices_descriptors[desc->num_slice_descriptors - 1];
+ ITEM_SET(param, sd, slice_segment_address);
+ ITEM_SET(param, sd, num_ctu_in_slice);
+ }
+
+ switch (desc->picture_type) {
+ case PIPE_H2645_ENC_PICTURE_TYPE_P:
+ param->slice_type = 0;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_B:
+ param->slice_type = 1;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_I:
+ case PIPE_H2645_ENC_PICTURE_TYPE_IDR: /* fall through */
+ param->slice_type = 2;
+ break;
+ case PIPE_H2645_ENC_PICTURE_TYPE_SKIP:
+ default:
+ break;
+ }
+
+ //slice_pic_parameter_set_id
+
+ //num_ref_idx_l0_active_minus1
+ //num_ref_idx_l1_active_minus1
+
+ for (i = 0; i < 15; i++) {
+ h265_init_picture(&param->ref_pic_list0[i]);
+ h265_init_picture(&param->ref_pic_list1[i]);
+
+ param->ref_pic_list0[i].picture_id =
+ get_enc_ref_pic(codec, desc->ref_idx_l0_list[i]);
+ param->ref_pic_list1[i].picture_id =
+ get_enc_ref_pic(codec, desc->ref_idx_l1_list[i]);
+
+ if (param->ref_pic_list0[i].picture_id != VA_INVALID_ID)
+ param->ref_pic_list0[i].flags = VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE;
+
+ if (param->ref_pic_list1[i].picture_id != VA_INVALID_ID)
+ param->ref_pic_list1[i].flags = VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE;
+ }
+
+ //luma_log2_weight_denom
+ //delta_chroma_log2_weight_denom
+ //delta_luma_weight_l0[15]
+ //luma_offset_l0[15]
+ //delta_chroma_weight_l0[15][2]
+ //chroma_offset_l0[15][2]
+ //delta_luma_weight_l1[15]
+ //luma_offset_l1[15]
+ //delta_chroma_weight_l1[15][2]
+ //chroma_offset_l1[15][2]
+ ITEM_SET(param, &desc->slice, max_num_merge_cand);
+ //slice_qp_delta
+ ITEM_SET(param, &desc->slice, slice_cb_qp_offset);
+ ITEM_SET(param, &desc->slice, slice_cr_qp_offset);
+ ITEM_SET(param, &desc->slice, slice_beta_offset_div2);
+ ITEM_SET(param, &desc->slice, slice_tc_offset_div2);
+
+ //slice_fields.bits.last_slice_of_pic_flag
+ //slice_fields.bits.dependent_slice_segment_flag
+ //slice_fields.bits.colour_plane_id
+ //slice_fields.bits.slice_temporal_mvp_enabled_flag
+ //slice_fields.bits.slice_sao_luma_flag
+ //slice_fields.bits.slice_sao_chroma_flag
+ /*
+ * Sine num_ref_idx_l0_active_minus1 and num_ref_idx_l1_active_minus1
+ * have been passed by VAEncPictureParameterBufferHEVC,
+ * num_ref_idx_active_override_flag is always set to 0.
+ */
+ param->slice_fields.bits.num_ref_idx_active_override_flag = 0;
+ //slice_fields.bits.mvd_l1_zero_flag
+ ITEM_SET(&param->slice_fields.bits, &desc->slice, cabac_init_flag);
+ ITEM_SET(&param->slice_fields.bits, &desc->slice,
+ slice_deblocking_filter_disabled_flag);
+ ITEM_SET(&param->slice_fields.bits,
+ &desc->slice, slice_loop_filter_across_slices_enabled_flag);
+ //slice_fields.bits.collocated_from_l0_flag
+
+ //pred_weight_table_bit_offset
+ //pred_weight_table_bit_length;
+}
+
+/*
+ * Refer to vlVaHandleVAEncMiscParameterTypeRateControlHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_enc_misc_param_rate_ctrl(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc,
+ VAEncMiscParameterRateControl *param)
+{
+ (void)codec;
+ (void)source;
+
+ param->bits_per_second = desc->rc.peak_bitrate;
+ if (desc->rc.rate_ctrl_method !=
+ PIPE_H2645_ENC_RATE_CONTROL_METHOD_CONSTANT) {
+ param->target_percentage = desc->rc.target_bitrate *
+ param->bits_per_second / 100.0;
+ }
+ //window_size;
+ //initial_qp;
+ param->min_qp = desc->rc.min_qp;
+ //basic_unit_size;
+
+ /* rc_flags */
+ //rc_flags.bits.reset
+ param->rc_flags.bits.disable_frame_skip = !desc->rc.skip_frame_enable;
+ param->rc_flags.bits.disable_bit_stuffing = !desc->rc.fill_data_enable;
+ //rc_flags.bits.mb_rate_control
+ //rc_flags.bits.temporal_id
+ //rc_flags.bits.cfs_I_frames
+ //rc_flags.bits.enable_parallel_brc
+ //rc_flags.bits.enable_dynamic_scaling
+ //rc_flags.bits.frame_tolerance_mode
+
+ //ICQ_quality_factor;
+ param->max_qp = desc->rc.max_qp;
+ //quality_factor;
+ //target_frame_size;
+}
+
+/*
+ * Refer to vlVaHandleVAEncMiscParameterTypeFrameRateHEVC() in mesa,
+ * and comment out some unused parameters.
+ */
+static void h265_fill_enc_misc_param_frame_rate(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc,
+ VAEncMiscParameterFrameRate *param)
+{
+ (void)codec;
+ (void)source;
+
+ param->framerate = desc->rc.frame_rate_num | (desc->rc.frame_rate_den << 16);
+ //framerate_flags
+}
+
+static int h265_decode_bitstream(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target,
+ const struct virgl_h265_picture_desc *desc,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes)
+{
+ unsigned i;
+ int err = 0;
+ VAStatus va_stat;
+ VABufferID *slice_data_buf, pic_param_buf, slice_param_buf;
+ VAPictureParameterBufferHEVC pic_param = {0};
+ VASliceParameterBufferHEVC slice_param = {0};
+
+ slice_data_buf = calloc(num_buffers, sizeof(VABufferID));
+ if (!slice_data_buf) {
+ virgl_log("alloc slice data buffer id failed\n");
+ return -1;
+ }
+
+ h265_fill_picture_param(codec, target, desc, &pic_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAPictureParameterBufferType,
+ sizeof(pic_param), 1, &pic_param, &pic_param_buf);
+
+ h265_fill_slice_param(desc, &slice_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VASliceParameterBufferType,
+ sizeof(slice_param), 1, &slice_param, &slice_param_buf);
+
+ for (i = 0; i < num_buffers; i++) {
+ vaCreateBuffer(va_dpy, codec->va_ctx, VASliceDataBufferType,
+ sizes[i], 1, (void *)(buffers[i]), &slice_data_buf[i]);
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &pic_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render picture param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto err;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &slice_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render slice param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto err;
+ }
+
+ for (i = 0; i < num_buffers; i++) {
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &slice_data_buf[i], 1);
+
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render slice data failed, err = 0x%x\n", va_stat);
+ err = -1;
+ }
+ }
+
+err:
+ vaDestroyBuffer(va_dpy, pic_param_buf);
+ vaDestroyBuffer(va_dpy, slice_param_buf);
+ for (i = 0; i < num_buffers; i++)
+ vaDestroyBuffer(va_dpy, slice_data_buf[i]);
+ free(slice_data_buf);
+
+ return err;
+}
+
+static int h265_encode_render_sequence(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc)
+{
+ int err = 0;
+ VAStatus va_stat;
+ VAEncSequenceParameterBufferHEVC seq_param;
+ VAEncMiscParameterBuffer *misc_param;
+ VABufferID seq_param_buf, rc_param_buf, fr_param_buf;
+
+ memset(&seq_param, 0, sizeof(seq_param));
+ h265_fill_enc_seq_param(codec, source, desc, &seq_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncSequenceParameterBufferType,
+ sizeof(seq_param), 1, &seq_param, &seq_param_buf);
+
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncMiscParameterBufferType,
+ sizeof(VAEncMiscParameterBuffer) +
+ sizeof(VAEncMiscParameterRateControl), 1, NULL, &rc_param_buf);
+ vaMapBuffer(va_dpy, rc_param_buf, (void **)&misc_param);
+ misc_param->type = VAEncMiscParameterTypeRateControl;
+ h265_fill_enc_misc_param_rate_ctrl(codec, source, desc,
+ (VAEncMiscParameterRateControl *)misc_param->data);
+ vaUnmapBuffer(va_dpy, rc_param_buf);
+
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncMiscParameterBufferType,
+ sizeof(VAEncMiscParameterBuffer) +
+ sizeof(VAEncMiscParameterFrameRate), 1, NULL, &fr_param_buf);
+ vaMapBuffer(va_dpy, fr_param_buf, (void **)&misc_param);
+ misc_param->type = VAEncMiscParameterTypeFrameRate;
+ h265_fill_enc_misc_param_frame_rate(codec, source, desc,
+ (VAEncMiscParameterFrameRate *)misc_param->data);
+ vaUnmapBuffer(va_dpy, fr_param_buf);
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &seq_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h265 sequence param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto error;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &rc_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h265 rate control param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto error;
+ }
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &fr_param_buf, 1);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h265 frame rate param failed, err = 0x%x\n", va_stat);
+ err = -1;
+ goto error;
+ }
+
+error:
+ vaDestroyBuffer(va_dpy, seq_param_buf);
+ vaDestroyBuffer(va_dpy, rc_param_buf);
+ vaDestroyBuffer(va_dpy, fr_param_buf);
+
+ return err;
+}
+
+static int h265_encode_render_picture(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc)
+{
+ VAStatus va_stat;
+ VABufferID pic_param_buf;
+ VAEncPictureParameterBufferHEVC pic_param;
+
+ memset(&pic_param, 0, sizeof(pic_param));
+ h265_fill_enc_picture_param(codec, source, desc, &pic_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncPictureParameterBufferType,
+ sizeof(pic_param), 1, &pic_param, &pic_param_buf);
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &pic_param_buf, 1);
+ vaDestroyBuffer(va_dpy, pic_param_buf);
+
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h265 picture param failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int h265_encode_render_slice(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc)
+{
+ VAStatus va_stat;
+ VABufferID slice_param_buf;
+ VAEncSliceParameterBufferHEVC slice_param;
+
+ memset(&slice_param, 0, sizeof(slice_param));
+ h265_fill_enc_slice_param(codec, source, desc, &slice_param);
+ vaCreateBuffer(va_dpy, codec->va_ctx, VAEncSliceParameterBufferType,
+ sizeof(slice_param), 1, &slice_param, &slice_param_buf);
+
+ va_stat = vaRenderPicture(va_dpy, codec->va_ctx, &slice_param_buf, 1);
+ vaDestroyBuffer(va_dpy, slice_param_buf);
+
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("render h265 slice param failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int h265_encode_bitstream(
+ struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const struct virgl_h265_enc_picture_desc *desc)
+{
+ if (desc->picture_type == PIPE_H2645_ENC_PICTURE_TYPE_IDR) {
+ h265_encode_render_sequence(codec, source, desc);
+ }
+
+ h265_encode_render_picture(codec, source, desc);
+ h265_encode_render_slice(codec, source, desc);
+
+ return 0;
+}
+
+int virgl_video_decode_bitstream(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target,
+ const union virgl_picture_desc *desc,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes)
+{
+
+ if (!va_dpy || !codec || !target || !desc
+ || !num_buffers || !buffers || !sizes)
+ return -1;
+
+ if (desc->base.profile != codec->profile) {
+ virgl_log("profiles not matched, picture: %d, codec: %d\n",
+ desc->base.profile, codec->profile);
+ return -1;
+ }
+
+ switch (codec->profile) {
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_CONSTRAINED_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_EXTENDED:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH10:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH422:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH444:
+ return h264_decode_bitstream(codec, target, &desc->h264,
+ num_buffers, buffers, sizes);
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_10:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_STILL:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_12:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_444:
+ return h265_decode_bitstream(codec, target, &desc->h265,
+ num_buffers, buffers, sizes);
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+int virgl_video_encode_bitstream(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const union virgl_picture_desc *desc)
+{
+ if (!va_dpy || !codec || !source || !desc)
+ return -1;
+
+ if (desc->base.profile != codec->profile) {
+ virgl_log("profiles not matched, picture: %d, codec: %d\n",
+ desc->base.profile, codec->profile);
+ return -1;
+ }
+
+ switch (codec->profile) {
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_CONSTRAINED_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_EXTENDED:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH10:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH422:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH444:
+ return h264_encode_bitstream(codec, source, &desc->h264_enc);
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_10:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_STILL:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_12:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_444:
+ return h265_encode_bitstream(codec, source, &desc->h265_enc);
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+int virgl_video_end_frame(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target)
+{
+ VAStatus va_stat;
+
+ if (!va_dpy || !codec || !target)
+ return -1;
+
+ va_stat = vaEndPicture(va_dpy, codec->va_ctx);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("end picture failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ va_stat = vaSyncSurface(va_dpy, target->va_sfc);
+ if (VA_STATUS_SUCCESS != va_stat) {
+ virgl_log("sync surface failed, err = 0x%x\n", va_stat);
+ return -1;
+ }
+
+ if (codec->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE) {
+ decode_completed(codec, target);
+ } else {
+ encode_completed(codec, target);
+ }
+
+ return 0;
+}
+
diff --git a/src/virgl_video.h b/src/virgl_video.h
new file mode 100644
index 00000000..68e38894
--- /dev/null
+++ b/src/virgl_video.h
@@ -0,0 +1,161 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Kylin Software Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * General video encoding and decoding interface.
+ *
+ * This file provides a general video interface, which mainly contains
+ * two objects:
+ *
+ * virgl_video_buffer:
+ * Buffer for storing raw YUV formatted data. In VA-API based
+ * implementations, it is usually associated with a surface.
+ *
+ * virgl_video_codec:
+ * Represents an encoder or decoder. In VA-API based implementations, it
+ * usually corresponds to a context.
+ *
+ * @author Feng Jiang <jiangfeng@kylinos.cn>
+ */
+
+#ifndef VIRGL_VIDEO_H
+#define VIRGL_VIDEO_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "pipe/p_format.h"
+#include "pipe/p_video_enums.h"
+
+struct virgl_video_codec;
+struct virgl_video_buffer;
+union virgl_caps;
+union virgl_picture_desc;
+
+struct virgl_video_create_codec_args {
+ enum pipe_video_profile profile;
+ enum pipe_video_entrypoint entrypoint;
+ enum pipe_video_chroma_format chroma_format;
+ uint32_t level;
+ uint32_t width;
+ uint32_t height;
+ uint32_t max_references;
+ uint32_t flags;
+ void *opaque;
+};
+
+struct virgl_video_create_buffer_args {
+ enum pipe_format format;
+ uint32_t width;
+ uint32_t height;
+ bool interlaced;
+ void *opaque;
+};
+
+/* flags for virgl_video_dma_buffers */
+#define VIRGL_VIDEO_DMABUF_READ_ONLY 0x0001
+#define VIRGL_VIDEO_DMABUF_WRITE_ONLY 0x0002
+#define VIRGL_VIDEO_DMABUF_READ_WRITE 0x0003
+
+struct virgl_video_dma_buf {
+ struct virgl_video_buffer *buf;
+
+ uint32_t drm_format;
+ uint32_t width;
+ uint32_t height;
+ uint32_t flags;
+
+ uint32_t num_planes;
+ struct virgl_video_dma_buf_plane {
+ uint32_t drm_format;
+ int fd;
+ uint32_t size;
+ int modifier;
+ uint32_t offset;
+ uint32_t pitch;
+ } planes[4];
+};
+
+/*
+ * Use callback functions instead of directly exporting the video buffer
+ * through an interface like virgl_video_export_buffer() is because the
+ * underlying implementation may not be VA-API. The callback function can
+ * better shield the underlying logic differences.
+ */
+struct virgl_video_callbacks {
+ /* Callback when decoding is complete, used to download the decoded picture
+ * from the video buffer */
+ void (*decode_completed)(struct virgl_video_codec *codec,
+ const struct virgl_video_dma_buf *dmabuf);
+
+ /* Upload the picture data to be encoded to the video buffer */
+ void (*encode_upload_picture)(struct virgl_video_codec *codec,
+ const struct virgl_video_dma_buf *dmabuf);
+
+ /* Callback when encoding is complete, used to download the encoded data
+ * and reference picture */
+ void (*encode_completed)(struct virgl_video_codec *codec,
+ const struct virgl_video_dma_buf *src_buf,
+ const struct virgl_video_dma_buf *ref_buf,
+ unsigned num_coded_bufs,
+ const void * const *coded_bufs,
+ const unsigned *coded_sizes);
+};
+
+int virgl_video_init(int drm_fd,
+ struct virgl_video_callbacks *cbs,
+ unsigned int flags);
+void virgl_video_destroy(void);
+
+int virgl_video_fill_caps(union virgl_caps *caps);
+
+struct virgl_video_codec *virgl_video_create_codec(
+ const struct virgl_video_create_codec_args *args);
+void virgl_video_destroy_codec(struct virgl_video_codec *codec);
+uint32_t virgl_video_codec_profile(const struct virgl_video_codec *codec);
+void *virgl_video_codec_opaque_data(struct virgl_video_codec *codec);
+
+struct virgl_video_buffer *virgl_video_create_buffer(
+ const struct virgl_video_create_buffer_args *args);
+void virgl_video_destroy_buffer(struct virgl_video_buffer *buffer);
+uint32_t virgl_video_buffer_id(const struct virgl_video_buffer *buffer);
+void *virgl_video_buffer_opaque_data(struct virgl_video_buffer *buffer);
+
+int virgl_video_begin_frame(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target);
+int virgl_video_decode_bitstream(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target,
+ const union virgl_picture_desc *desc,
+ unsigned num_buffers,
+ const void * const *buffers,
+ const unsigned *sizes);
+int virgl_video_encode_bitstream(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *source,
+ const union virgl_picture_desc *desc);
+int virgl_video_end_frame(struct virgl_video_codec *codec,
+ struct virgl_video_buffer *target);
+
+#endif /* VIRGL_VIDEO_H */
+
diff --git a/src/virgl_video_hw.h b/src/virgl_video_hw.h
new file mode 100644
index 00000000..2f50f969
--- /dev/null
+++ b/src/virgl_video_hw.h
@@ -0,0 +1,585 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Kylin Software Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Data structure definition of video hardware layer.
+ *
+ * These structures are used for communication between host and guest, and
+ * they are 4-byte aligned.
+ *
+ * 'virgl_picture_desc' and other related structures mainly describe sequence
+ * parameters, picture parameters, slice parameters, etc., as well as some
+ * context information for encoding and decoding. The video backend needs them
+ * to reconstruct VA-API calls.
+ *
+ * @author Feng Jiang <jiangfeng@kylinos.cn>
+ */
+
+#ifndef VIRGL_VIDEO_HW_H
+#define VIRGL_VIDEO_HW_H
+
+#include <stdint.h>
+
+struct virgl_base_picture_desc {
+ uint16_t profile; /* enum pipe_video_profile */
+ uint8_t entry_point; /* enum pipe_video_entrypoint */
+ uint8_t protected_playback;
+ uint8_t decrypt_key[256];
+ uint32_t key_size;
+
+};
+
+struct virgl_enc_quality_modes {
+ uint32_t level;
+ uint32_t preset_mode;
+ uint32_t pre_encode_mode;
+ uint32_t vbaq_mode;
+};
+
+/* H.264 sequence parameter set */
+struct virgl_h264_sps {
+ uint8_t level_idc;
+ uint8_t chroma_format_idc;
+ uint8_t separate_colour_plane_flag;
+ uint8_t bit_depth_luma_minus8;
+
+ uint8_t bit_depth_chroma_minus8;
+ uint8_t seq_scaling_matrix_present_flag;
+ uint8_t ScalingList4x4[6][16];
+ uint8_t ScalingList8x8[6][64];
+
+ uint8_t log2_max_frame_num_minus4;
+ uint8_t pic_order_cnt_type;
+ uint8_t log2_max_pic_order_cnt_lsb_minus4;
+ uint8_t delta_pic_order_always_zero_flag;
+
+ int32_t offset_for_non_ref_pic;
+ int32_t offset_for_top_to_bottom_field;
+ int32_t offset_for_ref_frame[256];
+
+ uint8_t num_ref_frames_in_pic_order_cnt_cycle;
+ uint8_t max_num_ref_frames;
+ uint8_t frame_mbs_only_flag;
+ uint8_t mb_adaptive_frame_field_flag;
+
+ uint8_t direct_8x8_inference_flag;
+ uint8_t MinLumaBiPredSize8x8;
+ uint8_t reserved[2];
+};
+
+/* H.264 picture parameter set */
+struct virgl_h264_pps {
+ struct virgl_h264_sps sps; /* Seq Param Set */
+
+ uint8_t entropy_coding_mode_flag;
+ uint8_t bottom_field_pic_order_in_frame_present_flag;
+ uint8_t num_slice_groups_minus1;
+ uint8_t slice_group_map_type;
+
+ uint8_t slice_group_change_rate_minus1;
+ uint8_t num_ref_idx_l0_default_active_minus1;
+ uint8_t num_ref_idx_l1_default_active_minus1;
+ uint8_t weighted_pred_flag;
+
+ uint8_t weighted_bipred_idc;
+ int8_t pic_init_qp_minus26;
+ int8_t pic_init_qs_minus26;
+ int8_t chroma_qp_index_offset;
+
+ uint8_t deblocking_filter_control_present_flag;
+ uint8_t constrained_intra_pred_flag;
+ uint8_t redundant_pic_cnt_present_flag;
+ uint8_t transform_8x8_mode_flag;
+
+ uint8_t ScalingList4x4[6][16];
+ uint8_t ScalingList8x8[6][64];
+
+ int8_t second_chroma_qp_index_offset;
+ uint8_t reserved[3];
+};
+
+struct virgl_h264_picture_desc {
+ struct virgl_base_picture_desc base;
+
+ struct virgl_h264_pps pps; /* Picture Param Set */
+
+ uint32_t frame_num;
+
+ uint8_t field_pic_flag;
+ uint8_t bottom_field_flag;
+ uint8_t num_ref_idx_l0_active_minus1;
+ uint8_t num_ref_idx_l1_active_minus1;
+
+ uint32_t slice_count;
+ int32_t field_order_cnt[2];
+
+ uint8_t is_long_term[16];
+ uint8_t top_is_reference[16];
+ uint8_t bottom_is_reference[16];
+ uint32_t field_order_cnt_list[16][2];
+ uint32_t frame_num_list[16];
+ uint32_t buffer_id[16];
+
+ uint8_t is_reference;
+ uint8_t num_ref_frames;
+ uint8_t reserved[2];
+};
+
+struct virgl_h264_enc_seq_param
+{
+ uint32_t enc_constraint_set_flags;
+ uint32_t enc_frame_cropping_flag;
+ uint32_t enc_frame_crop_left_offset;
+ uint32_t enc_frame_crop_right_offset;
+ uint32_t enc_frame_crop_top_offset;
+ uint32_t enc_frame_crop_bottom_offset;
+ uint32_t pic_order_cnt_type;
+ uint32_t num_temporal_layers;
+ uint32_t vui_parameters_present_flag;
+ struct {
+ uint32_t aspect_ratio_info_present_flag: 1;
+ uint32_t timing_info_present_flag: 1;
+ uint32_t reserved:30;
+ } vui_flags;
+ uint32_t aspect_ratio_idc;
+ uint32_t sar_width;
+ uint32_t sar_height;
+ uint32_t num_units_in_tick;
+ uint32_t time_scale;
+};
+
+struct virgl_h264_enc_rate_control
+{
+ uint32_t target_bitrate;
+ uint32_t peak_bitrate;
+ uint32_t frame_rate_num;
+ uint32_t frame_rate_den;
+ uint32_t vbv_buffer_size;
+ uint32_t vbv_buf_lv;
+ uint32_t target_bits_picture;
+ uint32_t peak_bits_picture_integer;
+ uint32_t peak_bits_picture_fraction;
+ uint32_t fill_data_enable;
+ uint32_t skip_frame_enable;
+ uint32_t enforce_hrd;
+ uint32_t max_au_size;
+ uint32_t max_qp;
+ uint32_t min_qp;
+
+ uint8_t rate_ctrl_method; /* see enum pipe_h2645_enc_rate_control_method */
+ uint8_t reserved[3];
+};
+
+struct virgl_h264_enc_motion_estimation
+{
+ uint32_t motion_est_quarter_pixel;
+ uint32_t enc_disable_sub_mode;
+ uint32_t lsmvert;
+ uint32_t enc_en_ime_overw_dis_subm;
+ uint32_t enc_ime_overw_dis_subm_no;
+ uint32_t enc_ime2_search_range_x;
+ uint32_t enc_ime2_search_range_y;
+};
+
+struct virgl_h264_enc_pic_control
+{
+ uint32_t enc_cabac_enable;
+ uint32_t enc_cabac_init_idc;
+};
+
+struct virgl_h264_slice_descriptor
+{
+ uint32_t macroblock_address;
+ uint32_t num_macroblocks;
+
+ uint8_t slice_type; /* see enum pipe_h264_slice_type */
+ uint8_t reserved[3];
+};
+
+struct virgl_h264_enc_picture_desc
+{
+ struct virgl_base_picture_desc base;
+
+ struct virgl_h264_enc_seq_param seq;
+ struct virgl_h264_enc_rate_control rate_ctrl[4];
+ struct virgl_h264_enc_motion_estimation motion_est;
+ struct virgl_h264_enc_pic_control pic_ctrl;
+
+ uint32_t intra_idr_period;
+
+ uint32_t quant_i_frames;
+ uint32_t quant_p_frames;
+ uint32_t quant_b_frames;
+
+ uint32_t frame_num;
+ uint32_t frame_num_cnt;
+ uint32_t p_remain;
+ uint32_t i_remain;
+ uint32_t idr_pic_id;
+ uint32_t gop_cnt;
+ uint32_t pic_order_cnt;
+ uint32_t num_ref_idx_l0_active_minus1;
+ uint32_t num_ref_idx_l1_active_minus1;
+ uint32_t ref_idx_l0_list[32];
+ uint8_t l0_is_long_term[32];
+ uint32_t ref_idx_l1_list[32];
+ uint8_t l1_is_long_term[32];
+ uint32_t gop_size;
+ struct virgl_enc_quality_modes quality_modes;
+
+ uint32_t num_slice_descriptors;
+ struct virgl_h264_slice_descriptor slices_descriptors[128];
+
+ uint8_t picture_type; /* see enum pipe_h2645_enc_picture_type */
+ uint8_t not_referenced;
+ uint8_t is_ltr;
+ uint8_t enable_vui;
+
+ uint32_t ltr_index;
+};
+
+
+struct virgl_h265_sps
+{
+ uint32_t pic_width_in_luma_samples;
+ uint32_t pic_height_in_luma_samples;
+
+ uint8_t chroma_format_idc;
+ uint8_t separate_colour_plane_flag;
+ uint8_t bit_depth_luma_minus8;
+ uint8_t bit_depth_chroma_minus8;
+
+ uint8_t log2_max_pic_order_cnt_lsb_minus4;
+ uint8_t sps_max_dec_pic_buffering_minus1;
+ uint8_t log2_min_luma_coding_block_size_minus3;
+ uint8_t log2_diff_max_min_luma_coding_block_size;
+
+ uint8_t log2_min_transform_block_size_minus2;
+ uint8_t log2_diff_max_min_transform_block_size;
+ uint8_t max_transform_hierarchy_depth_inter;
+ uint8_t max_transform_hierarchy_depth_intra;
+
+ uint8_t ScalingList4x4[6][16];
+ uint8_t ScalingList8x8[6][64];
+ uint8_t ScalingList16x16[6][64];
+ uint8_t ScalingList32x32[2][64];
+
+ uint8_t ScalingListDCCoeff16x16[6];
+ uint8_t ScalingListDCCoeff32x32[2];
+
+ uint8_t scaling_list_enabled_flag;
+ uint8_t amp_enabled_flag;
+ uint8_t sample_adaptive_offset_enabled_flag;
+ uint8_t pcm_enabled_flag;
+
+ uint8_t pcm_sample_bit_depth_luma_minus1;
+ uint8_t pcm_sample_bit_depth_chroma_minus1;
+ uint8_t log2_min_pcm_luma_coding_block_size_minus3;
+ uint8_t log2_diff_max_min_pcm_luma_coding_block_size;
+
+ uint8_t pcm_loop_filter_disabled_flag;
+ uint8_t num_short_term_ref_pic_sets;
+ uint8_t long_term_ref_pics_present_flag;
+ uint8_t num_long_term_ref_pics_sps;
+
+ uint8_t sps_temporal_mvp_enabled_flag;
+ uint8_t strong_intra_smoothing_enabled_flag;
+ uint8_t reserved[2];
+};
+
+struct virgl_h265_pps
+{
+ struct virgl_h265_sps sps;
+
+ uint8_t dependent_slice_segments_enabled_flag;
+ uint8_t output_flag_present_flag;
+ uint8_t num_extra_slice_header_bits;
+ uint8_t sign_data_hiding_enabled_flag;
+
+ uint8_t cabac_init_present_flag;
+ uint8_t num_ref_idx_l0_default_active_minus1;
+ uint8_t num_ref_idx_l1_default_active_minus1;
+ int8_t init_qp_minus26;
+
+ uint8_t constrained_intra_pred_flag;
+ uint8_t transform_skip_enabled_flag;
+ uint8_t cu_qp_delta_enabled_flag;
+ uint8_t diff_cu_qp_delta_depth;
+
+ int8_t pps_cb_qp_offset;
+ int8_t pps_cr_qp_offset;
+ uint8_t pps_slice_chroma_qp_offsets_present_flag;
+ uint8_t weighted_pred_flag;
+
+ uint8_t weighted_bipred_flag;
+ uint8_t transquant_bypass_enabled_flag;
+ uint8_t tiles_enabled_flag;
+ uint8_t entropy_coding_sync_enabled_flag;
+
+ uint16_t column_width_minus1[20];
+ uint16_t row_height_minus1[22];
+
+ uint8_t num_tile_columns_minus1;
+ uint8_t num_tile_rows_minus1;
+ uint8_t uniform_spacing_flag;
+ uint8_t loop_filter_across_tiles_enabled_flag;
+
+ uint8_t pps_loop_filter_across_slices_enabled_flag;
+ uint8_t deblocking_filter_control_present_flag;
+ uint8_t deblocking_filter_override_enabled_flag;
+ uint8_t pps_deblocking_filter_disabled_flag;
+
+ int8_t pps_beta_offset_div2;
+ int8_t pps_tc_offset_div2;
+ uint8_t lists_modification_present_flag;
+ uint8_t log2_parallel_merge_level_minus2;
+
+ uint16_t st_rps_bits;
+ uint8_t slice_segment_header_extension_present_flag;
+ uint8_t reserved;
+};
+
+struct virgl_h265_picture_desc
+{
+ struct virgl_base_picture_desc base;
+
+ struct virgl_h265_pps pps;
+
+ int32_t CurrPicOrderCntVal;
+ uint32_t ref[16];
+ int32_t PicOrderCntVal[16];
+
+ uint32_t NumPocTotalCurr;
+ uint32_t NumDeltaPocsOfRefRpsIdx;
+ uint32_t NumShortTermPictureSliceHeaderBits;
+ uint32_t NumLongTermPictureSliceHeaderBits;
+
+ uint8_t IsLongTerm[16];
+
+ uint8_t IDRPicFlag;
+ uint8_t RAPPicFlag;
+ uint8_t CurrRpsIdx;
+ uint8_t NumPocStCurrBefore;
+
+ uint8_t NumPocStCurrAfter;
+ uint8_t NumPocLtCurr;
+ uint8_t UseRefPicList;
+ uint8_t UseStRpsBits;
+
+ uint8_t RefPicSetStCurrBefore[8];
+ uint8_t RefPicSetStCurrAfter[8];
+ uint8_t RefPicSetLtCurr[8];
+
+ uint8_t RefPicList[2][15];
+ uint8_t reserved[2];
+};
+
+struct virgl_h265_enc_seq_param
+{
+ uint8_t general_profile_idc;
+ uint8_t general_level_idc;
+ uint8_t general_tier_flag;
+ uint8_t strong_intra_smoothing_enabled_flag;
+
+ uint32_t intra_period;
+ uint32_t ip_period;
+
+ uint16_t pic_width_in_luma_samples;
+ uint16_t pic_height_in_luma_samples;
+
+ uint32_t chroma_format_idc;
+ uint32_t bit_depth_luma_minus8;
+ uint32_t bit_depth_chroma_minus8;
+
+ uint8_t amp_enabled_flag;
+ uint8_t sample_adaptive_offset_enabled_flag;
+ uint8_t pcm_enabled_flag;
+ uint8_t sps_temporal_mvp_enabled_flag;
+
+ uint8_t log2_min_luma_coding_block_size_minus3;
+ uint8_t log2_diff_max_min_luma_coding_block_size;
+ uint8_t log2_min_transform_block_size_minus2;
+ uint8_t log2_diff_max_min_transform_block_size;
+
+ uint16_t conf_win_left_offset;
+ uint16_t conf_win_right_offset;
+ uint16_t conf_win_top_offset;
+ uint16_t conf_win_bottom_offset;
+
+ uint32_t vui_parameters_present_flag;
+ struct {
+ uint32_t aspect_ratio_info_present_flag: 1;
+ uint32_t timing_info_present_flag: 1;
+ uint32_t reserved:30;
+ } vui_flags;
+ uint32_t aspect_ratio_idc;
+ uint32_t sar_width;
+ uint32_t sar_height;
+ uint32_t num_units_in_tick;
+ uint32_t time_scale;
+
+ uint8_t max_transform_hierarchy_depth_inter;
+ uint8_t max_transform_hierarchy_depth_intra;
+ uint8_t conformance_window_flag;
+ uint8_t reserved;
+};
+
+struct virgl_h265_enc_pic_param
+{
+ uint8_t log2_parallel_merge_level_minus2;
+ uint8_t nal_unit_type;
+ uint8_t constrained_intra_pred_flag;
+ uint8_t pps_loop_filter_across_slices_enabled_flag;
+
+ uint8_t transform_skip_enabled_flag;
+ uint8_t reserved[3];
+};
+
+struct virgl_h265_enc_slice_param
+{
+ uint8_t max_num_merge_cand;
+ int8_t slice_cb_qp_offset;
+ int8_t slice_cr_qp_offset;
+ int8_t slice_beta_offset_div2;
+
+ uint32_t slice_deblocking_filter_disabled_flag;
+
+ int8_t slice_tc_offset_div2;
+ uint8_t cabac_init_flag;
+ uint8_t slice_loop_filter_across_slices_enabled_flag;
+ uint8_t reserved;
+};
+
+struct virgl_h265_enc_rate_control
+{
+ uint32_t target_bitrate;
+ uint32_t peak_bitrate;
+ uint32_t frame_rate_num;
+ uint32_t frame_rate_den;
+ uint32_t quant_i_frames;
+ uint32_t quant_p_frames;
+ uint32_t quant_b_frames;
+ uint32_t vbv_buffer_size;
+ uint32_t vbv_buf_lv;
+ uint32_t target_bits_picture;
+ uint32_t peak_bits_picture_integer;
+ uint32_t peak_bits_picture_fraction;
+ uint32_t fill_data_enable;
+ uint32_t skip_frame_enable;
+ uint32_t enforce_hrd;
+ uint32_t max_au_size;
+ uint32_t max_qp;
+ uint32_t min_qp;
+
+ uint8_t rate_ctrl_method; /* see enum pipe_h2645_enc_rate_control_method */
+ uint8_t reserved[3];
+};
+
+struct virgl_h265_slice_descriptor
+{
+ uint32_t slice_segment_address;
+ uint32_t num_ctu_in_slice;
+
+ uint8_t slice_type; /* see enum pipe_h265_slice_type */
+ uint8_t reserved[3];
+};
+
+struct virgl_h265_enc_picture_desc
+{
+ struct virgl_base_picture_desc base;
+
+ struct virgl_h265_enc_seq_param seq;
+ struct virgl_h265_enc_pic_param pic;
+ struct virgl_h265_enc_slice_param slice;
+ struct virgl_h265_enc_rate_control rc;
+
+ uint32_t decoded_curr_pic;
+ uint32_t reference_frames[16];
+ uint32_t frame_num;
+ uint32_t pic_order_cnt;
+ uint32_t pic_order_cnt_type;
+ uint32_t num_ref_idx_l0_active_minus1;
+ uint32_t num_ref_idx_l1_active_minus1;
+ uint32_t ref_idx_l0_list[15];
+ uint32_t ref_idx_l1_list[15];
+ uint32_t num_slice_descriptors;
+ struct virgl_h265_slice_descriptor slices_descriptors[128];
+ struct virgl_enc_quality_modes quality_modes;
+
+ uint8_t picture_type; /* see enum pipe_h2645_enc_picture_type */
+ uint8_t not_referenced;
+ uint8_t reserved[2];
+};
+
+struct virgl_mpeg4_picture_desc
+{
+ struct virgl_base_picture_desc base;
+
+ int32_t trd[2];
+ int32_t trb[2];
+ uint16_t vop_time_increment_resolution;
+ uint8_t vop_coding_type;
+ uint8_t vop_fcode_forward;
+ uint8_t vop_fcode_backward;
+ uint8_t resync_marker_disable;
+ uint8_t interlaced;
+ uint8_t quant_type;
+ uint8_t quarter_sample;
+ uint8_t short_video_header;
+ uint8_t rounding_control;
+ uint8_t alternate_vertical_scan_flag;
+ uint8_t top_field_first;
+
+ uint8_t intra_matrix[64];
+ uint8_t non_intra_matrix[64];
+
+ uint32_t ref[2];
+};
+
+union virgl_picture_desc {
+ struct virgl_base_picture_desc base;
+ struct virgl_h264_picture_desc h264;
+ struct virgl_h265_picture_desc h265;
+ struct virgl_mpeg4_picture_desc mpeg4;
+ struct virgl_h264_enc_picture_desc h264_enc;
+ struct virgl_h265_enc_picture_desc h265_enc;
+};
+
+enum virgl_video_encode_stat {
+ VIRGL_VIDEO_ENCODE_STAT_NOT_STARTED = 0,
+ VIRGL_VIDEO_ENCODE_STAT_IN_PROGRESS,
+ VIRGL_VIDEO_ENCODE_STAT_SUCCESS,
+ VIRGL_VIDEO_ENCODE_STAT_FAILURE,
+};
+
+struct virgl_video_encode_feedback {
+ uint8_t stat; /* see enum virgl_video_encode_stat */
+ uint8_t reserved[3];
+
+ uint32_t coded_size; /* size of encoded data in bytes */
+};
+
+#endif /* VIRGL_VIDEO_HW_H */
+
diff --git a/src/virglrenderer.c b/src/virglrenderer.c
index 44982d71..c0294f6d 100644
--- a/src/virglrenderer.c
+++ b/src/virglrenderer.c
@@ -36,8 +36,11 @@
#include "pipe/p_state.h"
#include "util/u_format.h"
#include "util/u_math.h"
+#include "vkr_allocator.h"
#include "vkr_renderer.h"
+#include "drm_renderer.h"
#include "vrend_renderer.h"
+#include "proxy/proxy_renderer.h"
#include "vrend_winsys.h"
#include "virglrenderer.h"
@@ -58,6 +61,8 @@ struct global_state {
bool winsys_initialized;
bool vrend_initialized;
bool vkr_initialized;
+ bool proxy_initialized;
+ bool external_winsys_initialized;
};
static struct global_state state;
@@ -171,19 +176,22 @@ void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
if (state.vkr_initialized)
vkr_get_capset(caps);
break;
+ case VIRGL_RENDERER_CAPSET_DRM:
+ drm_renderer_capset(caps);
+ break;
default:
break;
}
}
static void per_context_fence_retire(struct virgl_context *ctx,
- uint64_t queue_id,
- void *fence_cookie)
+ uint32_t ring_idx,
+ uint64_t fence_id)
{
state.cbs->write_context_fence(state.cookie,
ctx->ctx_id,
- queue_id,
- fence_cookie);
+ ring_idx,
+ fence_id);
}
int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
@@ -219,9 +227,15 @@ int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
ctx = vrend_renderer_context_create(ctx_id, nlen, name);
break;
case VIRGL_RENDERER_CAPSET_VENUS:
- if (!state.vkr_initialized)
+ if (state.proxy_initialized)
+ ctx = proxy_context_create(ctx_id, ctx_flags, nlen, name);
+ else if (state.vkr_initialized)
+ ctx = vkr_context_create(nlen, name);
+ else
return EINVAL;
- ctx = vkr_context_create(nlen, name);
+ break;
+ case VIRGL_RENDERER_CAPSET_DRM:
+ ctx = drm_renderer_create(nlen, name);
break;
default:
return EINVAL;
@@ -391,15 +405,16 @@ int virgl_renderer_create_fence(int client_fence_id, UNUSED uint32_t ctx_id)
int virgl_renderer_context_create_fence(uint32_t ctx_id,
uint32_t flags,
- uint64_t queue_id,
- void *fence_cookie)
+ uint32_t ring_idx,
+ uint64_t fence_id)
{
+ TRACE_FUNC();
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
if (!ctx)
return -EINVAL;
assert(state.cbs->version >= 3 && state.cbs->write_context_fence);
- return ctx->submit_fence(ctx, flags, queue_id, fence_cookie);
+ return ctx->submit_fence(ctx, flags, ring_idx, fence_id);
}
void virgl_renderer_context_poll(uint32_t ctx_id)
@@ -422,7 +437,6 @@ int virgl_renderer_context_get_poll_fd(uint32_t ctx_id)
void virgl_renderer_force_ctx_0(void)
{
- TRACE_FUNC();
if (state.vrend_initialized)
vrend_renderer_force_ctx_0();
}
@@ -486,6 +500,10 @@ void virgl_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
*max_ver = 0;
*max_size = vkr_get_capset(NULL);
break;
+ case VIRGL_RENDERER_CAPSET_DRM:
+ *max_ver = 0;
+ *max_size = drm_renderer_capset(NULL);
+ break;
default:
*max_ver = 0;
*max_size = 0;
@@ -506,11 +524,12 @@ void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int nu
}
-static void ctx0_fence_retire(void *fence_cookie,
- UNUSED void *retire_data)
+static void ctx0_fence_retire(uint64_t fence_id, UNUSED void *retire_data)
{
- const uint32_t fence_id = (uint32_t)(uintptr_t)fence_cookie;
- state.cbs->write_fence(state.cookie, fence_id);
+ // ctx0 fence_id is created from uint32_t but stored internally as uint64_t,
+ // so casting back to uint32_t doesn't result in data loss.
+ assert((fence_id >> 32) == 0);
+ state.cbs->write_fence(state.cookie, (uint32_t)fence_id);
}
static virgl_renderer_gl_context create_gl_context(int scanout_idx, struct virgl_gl_ctx_param *param)
@@ -545,11 +564,33 @@ static int make_current(virgl_renderer_gl_context ctx)
return state.cbs->make_current(state.cookie, 0, ctx);
}
+static int get_drm_fd(void)
+{
+ if (state.cbs->get_drm_fd)
+ return state.cbs->get_drm_fd(state.cookie);
+
+ return -1;
+}
+
static const struct vrend_if_cbs vrend_cbs = {
ctx0_fence_retire,
create_gl_context,
destroy_gl_context,
make_current,
+ get_drm_fd,
+};
+
+static int
+proxy_renderer_cb_get_server_fd(uint32_t version)
+{
+ if (state.cbs && state.cbs->version >= 3 && state.cbs->get_server_fd)
+ return state.cbs->get_server_fd(state.cookie, version);
+ else
+ return -1;
+}
+
+static const struct proxy_renderer_cbs proxy_cbs = {
+ proxy_renderer_cb_get_server_fd,
};
void *virgl_renderer_get_cursor_data(uint32_t resource_id, uint32_t *width, uint32_t *height)
@@ -564,11 +605,29 @@ void *virgl_renderer_get_cursor_data(uint32_t resource_id, uint32_t *width, uint
height);
}
+static bool
+virgl_context_foreach_retire_fences(struct virgl_context *ctx,
+ UNUSED void* data)
+{
+ /* vrend contexts are polled explicitly by the caller */
+ if (ctx->capset_id != VIRGL_RENDERER_CAPSET_VIRGL &&
+ ctx->capset_id != VIRGL_RENDERER_CAPSET_VIRGL2)
+ {
+ assert(ctx->retire_fences);
+ ctx->retire_fences(ctx);
+ }
+ return true;
+}
+
void virgl_renderer_poll(void)
{
TRACE_FUNC();
if (state.vrend_initialized)
- vrend_renderer_check_fences();
+ vrend_renderer_poll();
+
+ struct virgl_context_foreach_args args;
+ args.callback = virgl_context_foreach_retire_fences;
+ virgl_context_foreach(&args);
}
void virgl_renderer_cleanup(UNUSED void *cookie)
@@ -583,15 +642,23 @@ void virgl_renderer_cleanup(UNUSED void *cookie)
if (state.resource_initialized)
virgl_resource_table_cleanup();
- if (state.vkr_initialized)
+ if (state.proxy_initialized)
+ proxy_renderer_fini();
+
+ if (state.vkr_initialized) {
vkr_renderer_fini();
+ /* vkr_allocator_init is called on-demand upon the first map */
+ vkr_allocator_fini();
+ }
if (state.vrend_initialized)
vrend_renderer_fini();
- if (state.winsys_initialized)
+ if (state.winsys_initialized || state.external_winsys_initialized)
vrend_winsys_cleanup();
+ drm_renderer_fini();
+
memset(&state, 0, sizeof(state));
}
@@ -612,8 +679,9 @@ int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks
return -EBUSY;
if (!state.client_initialized) {
- if (cbs && (cbs->version < 1 ||
- cbs->version > VIRGL_RENDERER_CALLBACKS_VERSION))
+ if (!cbs ||
+ cbs->version < 1 ||
+ cbs->version > VIRGL_RENDERER_CALLBACKS_VERSION)
return -1;
state.cookie = cookie;
@@ -658,6 +726,32 @@ int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks
state.winsys_initialized = true;
}
+ if (!state.winsys_initialized && !state.external_winsys_initialized &&
+ state.cbs && state.cbs->version >= 4 && state.cbs->get_egl_display) {
+ void *egl_display = NULL;
+
+ if (!cbs->create_gl_context || !cbs->destroy_gl_context ||
+ !cbs->make_current) {
+ ret = EINVAL;
+ goto fail;
+ }
+
+ egl_display = state.cbs->get_egl_display(cookie);
+
+ if (!egl_display) {
+ ret = -1;
+ goto fail;
+ }
+ ret = vrend_winsys_init_external(egl_display);
+
+ if (ret) {
+ ret = -1;
+ goto fail;
+ }
+
+ state.external_winsys_initialized = true;
+ }
+
if (!state.vrend_initialized && !(flags & VIRGL_RENDERER_NO_VIRGL)) {
uint32_t renderer_flags = 0;
@@ -672,6 +766,8 @@ int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks
renderer_flags |= VREND_USE_ASYNC_FENCE_CB;
if (flags & VIRGL_RENDERER_USE_EXTERNAL_BLOB)
renderer_flags |= VREND_USE_EXTERNAL_BLOB;
+ if (flags & VIRGL_RENDERER_USE_VIDEO)
+ renderer_flags |= VREND_USE_VIDEO;
ret = vrend_renderer_init(&vrend_cbs, renderer_flags);
if (ret)
@@ -685,6 +781,8 @@ int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks
vkr_flags |= VKR_RENDERER_THREAD_SYNC;
if (flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
vkr_flags |= VKR_RENDERER_ASYNC_FENCE_CB;
+ if (flags & VIRGL_RENDERER_RENDER_SERVER)
+ vkr_flags |= VKR_RENDERER_RENDER_SERVER;
ret = vkr_renderer_init(vkr_flags);
if (ret)
@@ -692,6 +790,23 @@ int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks
state.vkr_initialized = true;
}
+ if (!state.proxy_initialized && (flags & VIRGL_RENDERER_RENDER_SERVER)) {
+ ret = proxy_renderer_init(&proxy_cbs, flags | VIRGL_RENDERER_NO_VIRGL);
+ if (ret)
+ goto fail;
+ state.proxy_initialized = true;
+ }
+
+ if ((flags & VIRGL_RENDERER_ASYNC_FENCE_CB) &&
+ (flags & VIRGL_RENDERER_DRM)) {
+ int drm_fd = -1;
+
+ if (cbs->version >= 2 && cbs->get_drm_fd)
+ drm_fd = cbs->get_drm_fd(cookie);
+
+ drm_renderer_init(drm_fd);
+ }
+
return 0;
fail:
@@ -727,11 +842,16 @@ void virgl_renderer_reset(void)
if (state.resource_initialized)
virgl_resource_table_reset();
+ if (state.proxy_initialized)
+ proxy_renderer_reset();
+
if (state.vkr_initialized)
vkr_renderer_reset();
if (state.vrend_initialized)
vrend_renderer_reset();
+
+ drm_renderer_reset();
}
int virgl_renderer_get_poll_fd(void)
@@ -851,6 +971,10 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
if (args->res_handle == 0)
return -EINVAL;
+ /* user resource id must be unique */
+ if (virgl_resource_lookup(args->res_handle))
+ return -EINVAL;
+
if (args->size == 0)
return -EINVAL;
if (has_guest_storage) {
@@ -877,16 +1001,22 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
if (!ctx)
return -EINVAL;
- ret = ctx->get_blob(ctx, args->blob_id, args->blob_flags, &blob);
+ ret = ctx->get_blob(ctx, args->res_handle, args->blob_id, args->size, args->blob_flags, &blob);
if (ret)
return ret;
- if (blob.type != VIRGL_RESOURCE_FD_INVALID) {
+ if (blob.type == VIRGL_RESOURCE_OPAQUE_HANDLE) {
+ assert(!(args->blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_SHAREABLE));
+ res = virgl_resource_create_from_opaque_handle(ctx, args->res_handle, blob.u.opaque_handle);
+ if (!res)
+ return -ENOMEM;
+ } else if (blob.type != VIRGL_RESOURCE_FD_INVALID) {
res = virgl_resource_create_from_fd(args->res_handle,
blob.type,
blob.u.fd,
args->iovecs,
- args->num_iovs);
+ args->num_iovs,
+ &blob.opaque_fd_metadata);
if (!res) {
close(blob.u.fd);
return -ENOMEM;
@@ -905,9 +1035,6 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
res->map_info = blob.map_info;
res->map_size = args->size;
- if (ctx->get_blob_done)
- ctx->get_blob_done(ctx, args->res_handle, &blob);
-
return 0;
}
@@ -916,19 +1043,25 @@ int virgl_renderer_resource_map(uint32_t res_handle, void **out_map, uint64_t *o
TRACE_FUNC();
int ret = 0;
void *map = NULL;
+ uint64_t map_size = 0;
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!res || res->mapped)
return -EINVAL;
if (res->pipe_resource) {
- ret = vrend_renderer_resource_map(res->pipe_resource, &map, &res->map_size);
+ ret = vrend_renderer_resource_map(res->pipe_resource, &map, &map_size);
+ if (!ret)
+ res->map_size = map_size;
} else {
switch (res->fd_type) {
case VIRGL_RESOURCE_FD_DMABUF:
+ case VIRGL_RESOURCE_FD_SHM:
map = mmap(NULL, res->map_size, PROT_WRITE | PROT_READ, MAP_SHARED, res->fd, 0);
+ map_size = res->map_size;
break;
case VIRGL_RESOURCE_FD_OPAQUE:
- /* TODO support mapping opaque FD. Fallthrough for now. */
+ ret = vkr_allocator_resource_map(res, &map, &map_size);
+ break;
default:
break;
}
@@ -939,7 +1072,7 @@ int virgl_renderer_resource_map(uint32_t res_handle, void **out_map, uint64_t *o
res->mapped = map;
*out_map = map;
- *out_size = res->map_size;
+ *out_size = map_size;
return ret;
}
@@ -954,7 +1087,17 @@ int virgl_renderer_resource_unmap(uint32_t res_handle)
if (res->pipe_resource) {
ret = vrend_renderer_resource_unmap(res->pipe_resource);
} else {
- ret = munmap(res->mapped, res->map_size);
+ switch (res->fd_type) {
+ case VIRGL_RESOURCE_FD_DMABUF:
+ ret = munmap(res->mapped, res->map_size);
+ break;
+ case VIRGL_RESOURCE_FD_OPAQUE:
+ ret = vkr_allocator_resource_unmap(res);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
}
assert(!ret);
@@ -980,6 +1123,7 @@ int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint32_t *map_info
int
virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_id);
if (!res)
return EINVAL;
@@ -991,6 +1135,9 @@ virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
case VIRGL_RESOURCE_FD_OPAQUE:
*fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE;
break;
+ case VIRGL_RESOURCE_FD_SHM:
+ *fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_SHM;
+ break;
default:
return EINVAL;
}
@@ -999,6 +1146,63 @@ virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
}
int
+virgl_renderer_resource_import_blob(const struct virgl_renderer_resource_import_blob_args *args)
+{
+ TRACE_FUNC();
+ struct virgl_resource *res;
+
+ /* user resource id must be greater than 0 */
+ if (args->res_handle == 0)
+ return -EINVAL;
+
+ /* user resource id must be unique */
+ if (virgl_resource_lookup(args->res_handle))
+ return -EINVAL;
+
+ switch (args->blob_mem) {
+ case VIRGL_RENDERER_BLOB_MEM_HOST3D:
+ case VIRGL_RENDERER_BLOB_MEM_GUEST_VRAM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enum virgl_resource_fd_type fd_type = VIRGL_RESOURCE_FD_INVALID;
+ switch (args->fd_type) {
+ case VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF:
+ fd_type = VIRGL_RESOURCE_FD_DMABUF;
+ break;
+ case VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE:
+ fd_type = VIRGL_RESOURCE_FD_OPAQUE;
+ break;
+ case VIRGL_RENDERER_BLOB_FD_TYPE_SHM:
+ fd_type = VIRGL_RESOURCE_FD_SHM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (args->fd < 0)
+ return -EINVAL;
+ if (args->size == 0)
+ return -EINVAL;
+
+ res = virgl_resource_create_from_fd(args->res_handle,
+ fd_type,
+ args->fd,
+ NULL,
+ 0,
+ NULL);
+ if (!res)
+ return -ENOMEM;
+
+ res->map_info = 0;
+ res->map_size = args->size;
+
+ return 0;
+}
+
+int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd)
{
TRACE_FUNC();
diff --git a/src/virglrenderer.h b/src/virglrenderer.h
index a1c06ffd..40991d19 100644
--- a/src/virglrenderer.h
+++ b/src/virglrenderer.h
@@ -46,7 +46,7 @@ struct virgl_renderer_gl_ctx_param {
};
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
-#define VIRGL_RENDERER_CALLBACKS_VERSION 3
+#define VIRGL_RENDERER_CALLBACKS_VERSION 4
#else
#define VIRGL_RENDERER_CALLBACKS_VERSION 2
#endif
@@ -55,15 +55,39 @@ struct virgl_renderer_callbacks {
int version;
void (*write_fence)(void *cookie, uint32_t fence);
- /* interact with GL implementation */
+ /*
+ * The following 3 callbacks allows virglrenderer to
+ * use winsys from caller, instead of initializing it's own
+ * winsys (flag VIRGL_RENDERER_USE_EGL or VIRGL_RENDERER_USE_GLX).
+ */
+
+ /* create a GL/GLES context */
virgl_renderer_gl_context (*create_gl_context)(void *cookie, int scanout_idx, struct virgl_renderer_gl_ctx_param *param);
+ /* destroy a GL/GLES context */
void (*destroy_gl_context)(void *cookie, virgl_renderer_gl_context ctx);
+ /* make a context current */
int (*make_current)(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx);
- int (*get_drm_fd)(void *cookie); /* v2, used with flags & VIRGL_RENDERER_USE_EGL */
+ /*
+ * v2, used with flags & VIRGL_RENDERER_USE_EGL
+ * Chose the drm fd, that will be used by virglrenderer
+ * for winsys initialization. Virglrenderer takes ownership of the fd
+ * that is returned and is responsible to close() it. This should not
+ * return the same fd each time it is call, if called multiple times.
+ */
+ int (*get_drm_fd)(void *cookie);
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
- void (*write_context_fence)(void *cookie, uint32_t ctx_id, uint64_t queue_id, void *fence_cookie);
+ void (*write_context_fence)(void *cookie, uint32_t ctx_id, uint32_t ring_idx, uint64_t fence_id);
+
+ /* version 0: a connected socket of type SOCK_SEQPACKET */
+ int (*get_server_fd)(void *cookie, uint32_t version);
+
+ /*
+ * Get the EGLDisplay from caller. It requires create_gl_context,
+ * destroy_gl_context, make_current to be implemented by caller.
+ */
+ void *(*get_egl_display)(void *cookie);
#endif
};
@@ -105,6 +129,21 @@ struct virgl_renderer_callbacks {
*/
#define VIRGL_RENDERER_ASYNC_FENCE_CB (1 << 8)
+/* Start a render server and move GPU rendering to the render server.
+ *
+ * This is respected by the venus renderer but ignored by the virgl renderer.
+ */
+#define VIRGL_RENDERER_RENDER_SERVER (1 << 9)
+
+/*
+ * Enable drm renderer.
+ */
+#define VIRGL_RENDERER_DRM (1 << 10)
+
+/* Video encode/decode */
+#define VIRGL_RENDERER_USE_VIDEO (1 << 11)
+
+
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
VIRGL_EXPORT int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks *cb);
@@ -273,12 +312,6 @@ VIRGL_EXPORT int virgl_renderer_get_poll_fd(void);
VIRGL_EXPORT int virgl_renderer_execute(void *execute_args, uint32_t execute_size);
-/*
- * These are unstable APIs for development only. Use these for development/testing purposes
- * only, not in production
- */
-#ifdef VIRGL_RENDERER_UNSTABLE_APIS
-
#define VIRGL_RENDERER_CONTEXT_FLAG_CAPSET_ID_MASK 0xff
VIRGL_EXPORT int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
@@ -289,6 +322,7 @@ VIRGL_EXPORT int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
#define VIRGL_RENDERER_BLOB_MEM_GUEST 0x0001
#define VIRGL_RENDERER_BLOB_MEM_HOST3D 0x0002
#define VIRGL_RENDERER_BLOB_MEM_HOST3D_GUEST 0x0003
+#define VIRGL_RENDERER_BLOB_MEM_GUEST_VRAM 0x0004
#define VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE 0x0001
#define VIRGL_RENDERER_BLOB_FLAG_USE_SHAREABLE 0x0002
@@ -323,18 +357,37 @@ VIRGL_EXPORT int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint3
#define VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF 0x0001
#define VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE 0x0002
+#define VIRGL_RENDERER_BLOB_FD_TYPE_SHM 0x0003
VIRGL_EXPORT int
virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd);
+/*
+ * These are unstable APIs for development only. Use these for development/testing purposes
+ * only, not in production
+ */
+#ifdef VIRGL_RENDERER_UNSTABLE_APIS
+
+struct virgl_renderer_resource_import_blob_args
+{
+ uint32_t res_handle;
+ uint32_t blob_mem;
+ uint32_t fd_type;
+ int fd;
+ uint64_t size;
+};
+
+VIRGL_EXPORT int
+virgl_renderer_resource_import_blob(const struct virgl_renderer_resource_import_blob_args *args);
+
VIRGL_EXPORT int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd);
#define VIRGL_RENDERER_FENCE_FLAG_MERGEABLE (1 << 0)
VIRGL_EXPORT int virgl_renderer_context_create_fence(uint32_t ctx_id,
uint32_t flags,
- uint64_t queue_id,
- void *fence_cookie);
+ uint32_t ring_idx,
+ uint64_t fence_id);
VIRGL_EXPORT void virgl_renderer_context_poll(uint32_t ctx_id); /* force fences */
VIRGL_EXPORT int virgl_renderer_context_get_poll_fd(uint32_t ctx_id);
diff --git a/src/virglrenderer_hw.h b/src/virglrenderer_hw.h
index 7fd8fcd7..e421d0a9 100644
--- a/src/virglrenderer_hw.h
+++ b/src/virglrenderer_hw.h
@@ -26,12 +26,15 @@
#include "venus_hw.h"
#include "virgl_hw.h"
+#include "drm_hw.h"
enum virgl_renderer_capset {
VIRGL_RENDERER_CAPSET_VIRGL = 1,
VIRGL_RENDERER_CAPSET_VIRGL2 = 2,
/* 3 is reserved for gfxstream */
VIRGL_RENDERER_CAPSET_VENUS = 4,
+ /* 5 is reserved for cross-domain */
+ VIRGL_RENDERER_CAPSET_DRM = 6,
};
#endif /* VIRGLRENDERER_HW_H */
diff --git a/src/vrend_blitter.c b/src/vrend_blitter.c
index 34696323..aa5e7a8c 100644
--- a/src/vrend_blitter.c
+++ b/src/vrend_blitter.c
@@ -29,17 +29,32 @@
#include "util/u_memory.h"
#include "util/u_format.h"
+#include "util/u_hash_table.h"
#include "util/u_texture.h"
#include "vrend_shader.h"
#include "vrend_renderer.h"
#include "vrend_blitter.h"
+#define XXH_INLINE_ALL
+#include "util/xxhash.h"
+
#define DEST_SWIZZLE_SNIPPET_SIZE 64
-#define BLIT_USE_GLES (1 << 0)
-#define BLIT_USE_MSAA (1 << 1)
-#define BLIT_USE_DEPTH (1 << 2)
+#define BLIT_USE_GLES (1 << 0)
+#define BLIT_USE_MSAA (1 << 1)
+#define BLIT_USE_DEPTH (1 << 2)
+#define BLIT_MANUAL_SRGB_DECODE (1 << 3)
+#define BLIT_MANUAL_SRGB_ENCODE (1 << 4)
+
+struct vec4 {
+ GLfloat x,y,z,w;
+};
+
+struct blit_coord {
+ struct vec4 pos;
+ struct vec4 tex;
+};
struct vrend_blitter_ctx {
virgl_gl_context gl_context;
@@ -48,44 +63,46 @@ struct vrend_blitter_ctx {
GLuint vaoid;
+ struct util_hash_table *blit_programs;
+
GLuint vs;
- GLuint fs_texfetch_col[PIPE_MAX_TEXTURE_TYPES];
- GLuint fs_texfetch_depth[PIPE_MAX_TEXTURE_TYPES];
- GLuint fs_texfetch_depth_msaa[PIPE_MAX_TEXTURE_TYPES];
- GLuint fs_texfetch_col_swizzle;
GLuint fb_id;
- // Parameters related to the creation of fs_texfetch_col_swizzle
- unsigned fs_texfetch_col_swizzle_nr_samples;
- bool fs_texfetch_col_swizzle_has_swizzle;
- uint8_t fs_texfetch_col_swizzle_swizzle[4];
-
unsigned dst_width;
unsigned dst_height;
GLuint vbo_id;
- GLfloat vertices[4][2][4]; /**< {pos, color} or {pos, texcoord} */
+ struct blit_coord vertices[4];
};
static struct vrend_blitter_ctx vrend_blit_ctx;
-struct vrend_blitter_point {
+struct blit_point {
int x;
int y;
};
-struct vrend_blitter_delta {
- int dx;
- int dy;
+struct blit_swizzle_and_type {
+ char *swizzle;
+ char *type;
+ bool is_array;
};
-struct swizzle_and_type {
- char *twm;
- char *ivec;
- bool is_array;
+struct blit_prog_key {
+ bool is_color: 1;
+ bool is_msaa: 1;
+ bool manual_srgb_decode: 1;
+ bool manual_srgb_encode: 1;
+ uint8_t num_samples;
+ int pipe_tex_target;
+ struct {
+ bool has_swizzle;
+ enum virgl_formats src_format;
+ uint8_t swizzle[4];
+ } texcol;
};
-static GLint build_and_check(GLenum shader_type, const char *buf)
+static GLint blit_shader_build_and_check(GLenum shader_type, const char *buf)
{
GLint param;
GLint id = glCreateShader(shader_type);
@@ -105,7 +122,7 @@ static GLint build_and_check(GLenum shader_type, const char *buf)
return id;
}
-static bool link_and_check(GLuint prog_id)
+static bool blit_shader_link_and_check(GLuint prog_id)
{
GLint lret;
@@ -128,11 +145,13 @@ static void create_dest_swizzle_snippet(const uint8_t swizzle[4],
{
static const uint8_t invalid_swizzle = 0xff;
ssize_t si = 0;
- uint8_t inverse[4] = {invalid_swizzle, invalid_swizzle, invalid_swizzle,
- invalid_swizzle};
+ uint8_t inverse[4] = {invalid_swizzle, invalid_swizzle,
+ invalid_swizzle, invalid_swizzle};
for (int i = 0; i < 4; ++i) {
- if (swizzle[i] > 3) continue;
+ if (swizzle[i] > 3)
+ continue;
+
if (inverse[swizzle[i]] == invalid_swizzle)
inverse[swizzle[i]] = i;
}
@@ -170,35 +189,41 @@ static enum tgsi_return_type tgsi_ret_for_format(enum virgl_formats format)
return TGSI_RETURN_TYPE_UNORM;
}
-static void get_swizzle(int tgsi_tex_target, unsigned flags,
- struct swizzle_and_type *retval)
+static void blit_get_swizzle(int tgsi_tex_target, unsigned flags,
+ struct blit_swizzle_and_type *retval)
{
- retval->twm = "";
- retval->ivec = "";
+ retval->swizzle = "";
+ retval->type = "";
retval->is_array = false;
switch (tgsi_tex_target) {
case TGSI_TEXTURE_1D:
- if (flags & (BLIT_USE_GLES | BLIT_USE_DEPTH)) {
- retval->twm = ".xy";
+ if ((flags & (BLIT_USE_GLES | BLIT_USE_DEPTH)) == (BLIT_USE_GLES | BLIT_USE_DEPTH)) {
+ retval->swizzle = ".xy";
break;
}
/* fallthrough */
case TGSI_TEXTURE_BUFFER:
- retval->twm = ".x";
+ retval->swizzle = ".x";
break;
case TGSI_TEXTURE_2D_MSAA:
if (flags & BLIT_USE_MSAA) {
- retval->ivec = "ivec2";
+ retval->type = "ivec2";
}
- /* fallthrough */
+ retval->swizzle = ".xy";
+ break;
case TGSI_TEXTURE_1D_ARRAY:
+ if (flags & (BLIT_USE_GLES)) {
+ retval->swizzle = ".xyz";
+ break;
+ }
+ /* fallthrough */
case TGSI_TEXTURE_2D:
case TGSI_TEXTURE_RECT:
- retval->twm = ".xy";
+ retval->swizzle = ".xy";
break;
case TGSI_TEXTURE_2D_ARRAY_MSAA:
if (flags & BLIT_USE_MSAA) {
- retval->ivec = "ivec3";
+ retval->type = "ivec3";
retval->is_array = true;
}
/* fallthrough */
@@ -209,19 +234,19 @@ static void get_swizzle(int tgsi_tex_target, unsigned flags,
case TGSI_TEXTURE_3D:
case TGSI_TEXTURE_CUBE:
case TGSI_TEXTURE_2D_ARRAY:
- retval->twm = ".xyz";
+ retval->swizzle = ".xyz";
break;
case TGSI_TEXTURE_SHADOWCUBE:
case TGSI_TEXTURE_SHADOW2D_ARRAY:
case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
case TGSI_TEXTURE_CUBE_ARRAY:
- retval->twm = "";
+ retval->swizzle = "";
break;
default:
if (flags & BLIT_USE_MSAA) {
break;
}
- retval->twm = ".xy";
+ retval->swizzle = ".xy";
break;
}
}
@@ -230,11 +255,12 @@ static GLuint blit_build_frag_tex_col(struct vrend_blitter_ctx *blit_ctx,
int tgsi_tex_target,
enum tgsi_return_type tgsi_ret,
const uint8_t swizzle[4],
- int nr_samples)
+ int nr_samples,
+ uint32_t flags)
{
char shader_buf[4096];
- struct swizzle_and_type retval;
- unsigned flags = 0;
+ struct blit_swizzle_and_type swizzle_and_type;
+ unsigned swizzle_flags = 0;
char dest_swizzle_snippet[DEST_SWIZZLE_SNIPPET_SIZE] = "texel";
const char *ext_str = "";
bool msaa = nr_samples > 0;
@@ -242,88 +268,119 @@ static GLuint blit_build_frag_tex_col(struct vrend_blitter_ctx *blit_ctx,
if (msaa && !blit_ctx->use_gles)
ext_str = "#extension GL_ARB_texture_multisample : enable\n";
else if (tgsi_tex_target == TGSI_TEXTURE_CUBE_ARRAY ||
- tgsi_tex_target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
- ext_str = "#extension GL_ARB_texture_cube_map_array : require\n";
+ tgsi_tex_target == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
+ if (blit_ctx->use_gles)
+ ext_str = "#extension GL_EXT_texture_cube_map_array : require\n";
+ else
+ ext_str = "#extension GL_ARB_texture_cube_map_array : require\n";
+ }
if (blit_ctx->use_gles)
- flags |= BLIT_USE_GLES;
+ swizzle_flags |= BLIT_USE_GLES;
if (msaa)
- flags |= BLIT_USE_MSAA;
- get_swizzle(tgsi_tex_target, flags, &retval);
+ swizzle_flags |= BLIT_USE_MSAA;
+ blit_get_swizzle(tgsi_tex_target, swizzle_flags, &swizzle_and_type);
if (swizzle)
create_dest_swizzle_snippet(swizzle, dest_swizzle_snippet);
+ bool needs_manual_srgb_decode = has_bit(flags, BLIT_MANUAL_SRGB_DECODE);
+ bool needs_manual_srgb_encode = has_bit(flags, BLIT_MANUAL_SRGB_ENCODE);
+
if (msaa)
snprintf(shader_buf, 4096, blit_ctx->use_gles ?
- (retval.is_array ? FS_TEXFETCH_COL_MSAA_ARRAY_GLES
+ (swizzle_and_type.is_array ? FS_TEXFETCH_COL_MSAA_ARRAY_GLES
: FS_TEXFETCH_COL_MSAA_GLES)
: FS_TEXFETCH_COL_MSAA_GL,
ext_str, vec4_type_for_tgsi_ret(tgsi_ret),
+ needs_manual_srgb_decode ? FS_FUNC_COL_SRGB_DECODE : "",
+ needs_manual_srgb_encode ? FS_FUNC_COL_SRGB_ENCODE : "",
+ needs_manual_srgb_decode ? "srgb_decode" : "",
+ needs_manual_srgb_encode ? "srgb_encode" : "",
vrend_shader_samplerreturnconv(tgsi_ret),
vrend_shader_samplertypeconv(blit_ctx->use_gles, tgsi_tex_target),
- nr_samples, retval.ivec, retval.twm, dest_swizzle_snippet);
+ nr_samples, swizzle_and_type.type, swizzle_and_type.swizzle, dest_swizzle_snippet);
else
snprintf(shader_buf, 4096, blit_ctx->use_gles ?
(tgsi_tex_target == TGSI_TEXTURE_1D ?
FS_TEXFETCH_COL_GLES_1D : FS_TEXFETCH_COL_GLES)
: FS_TEXFETCH_COL_GL,
ext_str, vec4_type_for_tgsi_ret(tgsi_ret),
+ needs_manual_srgb_decode ? FS_FUNC_COL_SRGB_DECODE : "",
+ needs_manual_srgb_encode ? FS_FUNC_COL_SRGB_ENCODE : "",
+ needs_manual_srgb_decode ? "srgb_decode" : "",
+ needs_manual_srgb_encode ? "srgb_encode" : "",
vrend_shader_samplerreturnconv(tgsi_ret),
vrend_shader_samplertypeconv(blit_ctx->use_gles, tgsi_tex_target),
- retval.twm, dest_swizzle_snippet);
+ swizzle_and_type.swizzle, dest_swizzle_snippet);
- VREND_DEBUG(dbg_blit, NULL, "-- Blit FS shader MSAA: %d -----------------\n"
+ VREND_DEBUG(dbg_blit, NULL, "-- Blit FS color shader MSAA: %d -----------------\n"
"%s\n---------------------------------------\n", msaa, shader_buf);
- return build_and_check(GL_FRAGMENT_SHADER, shader_buf);
+ return blit_shader_build_and_check(GL_FRAGMENT_SHADER, shader_buf);
}
static GLuint blit_build_frag_depth(struct vrend_blitter_ctx *blit_ctx, int tgsi_tex_target, bool msaa)
{
char shader_buf[4096];
- struct swizzle_and_type retval;
+ struct blit_swizzle_and_type swizzle_and_type;
unsigned flags = BLIT_USE_DEPTH;
if (msaa)
flags |= BLIT_USE_MSAA;
- get_swizzle(tgsi_tex_target, flags, &retval);
+ blit_get_swizzle(tgsi_tex_target, flags, &swizzle_and_type);
if (msaa)
snprintf(shader_buf, 4096, blit_ctx->use_gles ?
- (retval.is_array ? FS_TEXFETCH_DS_MSAA_ARRAY_GLES : FS_TEXFETCH_DS_MSAA_GLES)
+ (swizzle_and_type.is_array ? FS_TEXFETCH_DS_MSAA_ARRAY_GLES : FS_TEXFETCH_DS_MSAA_GLES)
: FS_TEXFETCH_DS_MSAA_GL,
- vrend_shader_samplertypeconv(blit_ctx->use_gles, tgsi_tex_target), retval.ivec, retval.twm);
+ vrend_shader_samplertypeconv(blit_ctx->use_gles, tgsi_tex_target), swizzle_and_type.type, swizzle_and_type.swizzle);
else
snprintf(shader_buf, 4096, blit_ctx->use_gles ? FS_TEXFETCH_DS_GLES : FS_TEXFETCH_DS_GL,
- vrend_shader_samplertypeconv(blit_ctx->use_gles, tgsi_tex_target), retval.twm);
+ vrend_shader_samplertypeconv(blit_ctx->use_gles, tgsi_tex_target), swizzle_and_type.swizzle);
+
+ VREND_DEBUG(dbg_blit, NULL, "-- Blit FS depth shader MSAA: %d -----------------\n"
+ "%s\n---------------------------------------\n", msaa, shader_buf);
- return build_and_check(GL_FRAGMENT_SHADER, shader_buf);
+ return blit_shader_build_and_check(GL_FRAGMENT_SHADER, shader_buf);
}
static GLuint blit_get_frag_tex_writedepth(struct vrend_blitter_ctx *blit_ctx, int pipe_tex_target, unsigned nr_samples)
{
- assert(pipe_tex_target < PIPE_MAX_TEXTURE_TYPES);
-
- GLuint *shader = nr_samples > 0 ? &blit_ctx->fs_texfetch_depth_msaa[pipe_tex_target]
- : &blit_ctx->fs_texfetch_depth[pipe_tex_target];
-
- if (!*shader) {
- unsigned tgsi_tex = util_pipe_tex_to_tgsi_tex(pipe_tex_target, nr_samples);
- *shader = blit_build_frag_depth(blit_ctx, tgsi_tex, nr_samples > 0);
- }
- return *shader;
+ struct blit_prog_key key = {
+ .is_color = false,
+ .is_msaa = nr_samples > 0,
+ .num_samples = nr_samples,
+ .pipe_tex_target = pipe_tex_target,
+ };
+
+ void *shader = util_hash_table_get(blit_ctx->blit_programs, &key);
+ GLuint prog_id;
+ if (shader) {
+ prog_id = (GLuint)((size_t)(shader) & 0xffffffff);
+ } else {
+ prog_id = glCreateProgram();
+ glAttachShader(prog_id, blit_ctx->vs);
+ unsigned tgsi_tex = util_pipe_tex_to_tgsi_tex(pipe_tex_target, key.num_samples);
+ GLuint fs_id = blit_build_frag_depth(blit_ctx, tgsi_tex, key.is_msaa);
+ glAttachShader(prog_id, fs_id);
+ if(!blit_shader_link_and_check(prog_id))
+ return 0;
+
+ glDeleteShader(fs_id);
+ util_hash_table_set(blit_ctx->blit_programs, &key, (void *)(uintptr_t)prog_id);
+ }
+ return prog_id;
}
static GLuint blit_get_frag_tex_col(struct vrend_blitter_ctx *blit_ctx,
- int pipe_tex_target,
- unsigned nr_samples,
- const struct vrend_format_table *src_entry,
- uint8_t swizzle[static 4])
+ int pipe_tex_target,
+ unsigned nr_samples,
+ const struct vrend_format_table *src_entry,
+ const uint8_t swizzle[static 4],
+ uint32_t flags)
{
- assert(pipe_tex_target < PIPE_MAX_TEXTURE_TYPES);
-
bool needs_swizzle = false;
for (uint i = 0; i < 4; ++i) {
if (swizzle[i] != i) {
@@ -332,38 +389,67 @@ static GLuint blit_get_frag_tex_col(struct vrend_blitter_ctx *blit_ctx,
}
}
- GLuint *shader;
- if (needs_swizzle || nr_samples > 1) {
- shader = &blit_ctx->fs_texfetch_col_swizzle;
- if (*shader &&
- (blit_ctx->fs_texfetch_col_swizzle_nr_samples != nr_samples ||
- blit_ctx->fs_texfetch_col_swizzle_has_swizzle != needs_swizzle ||
- (needs_swizzle && memcmp(blit_ctx->fs_texfetch_col_swizzle_swizzle, swizzle, 4)))) {
- glDeleteShader(*shader);
- *shader = 0;
- }
- blit_ctx->fs_texfetch_col_swizzle_has_swizzle = needs_swizzle;
- if (needs_swizzle)
- memcpy(blit_ctx->fs_texfetch_col_swizzle_swizzle, swizzle, 4);
- blit_ctx->fs_texfetch_col_swizzle_nr_samples = nr_samples;
+ struct blit_prog_key key = {
+ .is_color = true,
+ .is_msaa = nr_samples > 0,
+ .manual_srgb_decode = has_bit(flags, BLIT_MANUAL_SRGB_DECODE),
+ .manual_srgb_encode = has_bit(flags, BLIT_MANUAL_SRGB_ENCODE),
+ .num_samples = nr_samples,
+ .pipe_tex_target = pipe_tex_target
+ };
+
+ key.texcol.src_format = src_entry->format;
+ key.texcol.has_swizzle = needs_swizzle;
+ if (key.texcol.has_swizzle)
+ memcpy(key.texcol.swizzle, swizzle, 4);
+
+ GLuint prog_id = 0;
+ void *shader = util_hash_table_get(blit_ctx->blit_programs, &key);
+
+ if (shader) {
+ prog_id = (GLuint)((size_t)(shader) & 0xffffffff);
} else {
- shader = &blit_ctx->fs_texfetch_col[pipe_tex_target];
+ prog_id = glCreateProgram();
+ glAttachShader(prog_id, blit_ctx->vs);
+ unsigned tgsi_tex = util_pipe_tex_to_tgsi_tex(pipe_tex_target, key.num_samples);
+ enum tgsi_return_type tgsi_ret = tgsi_ret_for_format(src_entry->format);
+ int msaa_samples = nr_samples > 0 ? (tgsi_ret == TGSI_RETURN_TYPE_UNORM ? nr_samples : 1) : 0;
+
+ GLuint fs_id = blit_build_frag_tex_col(blit_ctx, tgsi_tex, tgsi_ret,
+ swizzle, msaa_samples, flags);
+ glAttachShader(prog_id, fs_id);
+ if(!blit_shader_link_and_check(prog_id))
+ return 0;
+
+ glDeleteShader(fs_id);
+ util_hash_table_set(blit_ctx->blit_programs, &key, (void *)(uintptr_t)prog_id);
}
- if (!*shader) {
+ return prog_id;
+}
- unsigned tgsi_tex = util_pipe_tex_to_tgsi_tex(pipe_tex_target, nr_samples);
- enum tgsi_return_type tgsi_ret = tgsi_ret_for_format(src_entry->format);
+static uint32_t program_hash_func(const void *key)
+{
+ return XXH32(key, sizeof(struct blit_prog_key), 0);
+}
- // Integer textures are resolved using just one sample
- int msaa_samples = nr_samples > 0 ? (tgsi_ret == TGSI_RETURN_TYPE_UNORM ? nr_samples : 1) : 0;
+static bool program_equal_func(const void *key1, const void *key2)
+{
+ return memcmp(key1, key2, sizeof(struct blit_prog_key)) == 0;
+}
- *shader = blit_build_frag_tex_col(blit_ctx, tgsi_tex, tgsi_ret,
- swizzle, msaa_samples);
- }
- return *shader;
+static void program_destroy_func(void *shader_id)
+{
+ GLuint id;
+#if __SIZEOF_POINTER__ == 8
+ id = ((uint64_t)(shader_id)) & 0xffffffff;
+#else
+ id = (GLuint)(shader_id);
+#endif
+ glDeleteProgram(id);
}
+
static void vrend_renderer_init_blit_ctx(struct vrend_blitter_ctx *blit_ctx)
{
struct virgl_gl_ctx_param ctx_params;
@@ -373,7 +459,10 @@ static void vrend_renderer_init_blit_ctx(struct vrend_blitter_ctx *blit_ctx)
return;
}
- blit_ctx->initialised = true;
+ vrend_blit_ctx.blit_programs = util_hash_table_create(program_hash_func,
+ program_equal_func,
+ program_destroy_func);
+
blit_ctx->use_gles = epoxy_is_desktop_gl() == 0;
ctx_params.shared = true;
for (uint32_t i = 0; i < ARRAY_SIZE(gl_versions); i++) {
@@ -385,44 +474,48 @@ static void vrend_renderer_init_blit_ctx(struct vrend_blitter_ctx *blit_ctx)
break;
}
+ if (!blit_ctx->gl_context) {
+ vrend_printf("virglrenderer: Unable to create blit context");
+ abort();
+ }
+
vrend_sync_make_current(blit_ctx->gl_context);
glGenVertexArrays(1, &blit_ctx->vaoid);
glGenFramebuffers(1, &blit_ctx->fb_id);
glGenBuffers(1, &blit_ctx->vbo_id);
- blit_ctx->vs = build_and_check(GL_VERTEX_SHADER,
+ blit_ctx->vs = blit_shader_build_and_check(GL_VERTEX_SHADER,
blit_ctx->use_gles ? VS_PASSTHROUGH_GLES : VS_PASSTHROUGH_GL);
- for (i = 0; i < 4; i++)
- blit_ctx->vertices[i][0][3] = 1; /*v.w*/
+ for (i = 0; i < 4; i++) {
+ blit_ctx->vertices[i].pos.z = 0;
+ blit_ctx->vertices[i].pos.w = 1;
+ }
+
glBindVertexArray(blit_ctx->vaoid);
glBindBuffer(GL_ARRAY_BUFFER, blit_ctx->vbo_id);
if (!blit_ctx->use_gles)
glEnable(GL_FRAMEBUFFER_SRGB);
+
+ blit_ctx->initialised = true;
}
static void blitter_set_rectangle(struct vrend_blitter_ctx *blit_ctx,
- int x1, int y1, int x2, int y2,
- float depth)
+ int x1, int y1, int x2, int y2)
{
- int i;
-
/* set vertex positions */
- blit_ctx->vertices[0][0][0] = (float)x1 / blit_ctx->dst_width * 2.0f - 1.0f; /*v0.x*/
- blit_ctx->vertices[0][0][1] = (float)y1 / blit_ctx->dst_height * 2.0f - 1.0f; /*v0.y*/
+ blit_ctx->vertices[0].pos.x = (float)x1 / blit_ctx->dst_width * 2.0f - 1.0f; /*v0.x*/
+ blit_ctx->vertices[0].pos.y = (float)y1 / blit_ctx->dst_height * 2.0f - 1.0f; /*v0.y*/
- blit_ctx->vertices[1][0][0] = (float)x2 / blit_ctx->dst_width * 2.0f - 1.0f; /*v1.x*/
- blit_ctx->vertices[1][0][1] = (float)y1 / blit_ctx->dst_height * 2.0f - 1.0f; /*v1.y*/
+ blit_ctx->vertices[1].pos.x = (float)x2 / blit_ctx->dst_width * 2.0f - 1.0f; /*v1.x*/
+ blit_ctx->vertices[1].pos.y = (float)y1 / blit_ctx->dst_height * 2.0f - 1.0f; /*v1.y*/
- blit_ctx->vertices[2][0][0] = (float)x2 / blit_ctx->dst_width * 2.0f - 1.0f; /*v2.x*/
- blit_ctx->vertices[2][0][1] = (float)y2 / blit_ctx->dst_height * 2.0f - 1.0f; /*v2.y*/
+ blit_ctx->vertices[2].pos.x = (float)x2 / blit_ctx->dst_width * 2.0f - 1.0f; /*v2.x*/
+ blit_ctx->vertices[2].pos.y = (float)y2 / blit_ctx->dst_height * 2.0f - 1.0f; /*v2.y*/
- blit_ctx->vertices[3][0][0] = (float)x1 / blit_ctx->dst_width * 2.0f - 1.0f; /*v3.x*/
- blit_ctx->vertices[3][0][1] = (float)y2 / blit_ctx->dst_height * 2.0f - 1.0f; /*v3.y*/
-
- for (i = 0; i < 4; i++)
- blit_ctx->vertices[i][0][2] = depth; /*z*/
+ blit_ctx->vertices[3].pos.x = (float)x1 / blit_ctx->dst_width * 2.0f - 1.0f; /*v3.x*/
+ blit_ctx->vertices[3].pos.y = (float)y2 / blit_ctx->dst_height * 2.0f - 1.0f; /*v3.y*/
glViewport(0, 0, blit_ctx->dst_width, blit_ctx->dst_height);
}
@@ -482,10 +575,10 @@ static void blitter_set_texcoords(struct vrend_blitter_ctx *blit_ctx,
util_map_texcoords2d_onto_cubemap((unsigned)layer % 6,
/* pointer, stride in floats */
&face_coord[0][0], 2,
- &blit_ctx->vertices[0][1][0], 8,
+ &blit_ctx->vertices[0].tex.x, 8,
FALSE);
} else {
- set_texcoords_in_vertices(coord, &blit_ctx->vertices[0][1][0], 8);
+ set_texcoords_in_vertices(coord, &blit_ctx->vertices[0].tex.x, 8);
}
switch (src_res->base.target) {
@@ -494,41 +587,33 @@ static void blitter_set_texcoords(struct vrend_blitter_ctx *blit_ctx,
float r = layer / (float)u_minify(src_res->base.depth0,
level);
for (i = 0; i < 4; i++)
- blit_ctx->vertices[i][1][2] = r; /*r*/
+ blit_ctx->vertices[i].tex.z = r; /*r*/
}
break;
case PIPE_TEXTURE_1D_ARRAY:
for (i = 0; i < 4; i++)
- blit_ctx->vertices[i][1][1] = (float) layer; /*t*/
+ blit_ctx->vertices[i].tex.y = (float) layer; /*t*/
break;
case PIPE_TEXTURE_2D_ARRAY:
for (i = 0; i < 4; i++) {
- blit_ctx->vertices[i][1][2] = (float) layer; /*r*/
- blit_ctx->vertices[i][1][3] = (float) sample; /*q*/
+ blit_ctx->vertices[i].tex.z = (float) layer; /*r*/
+ blit_ctx->vertices[i].tex.w = (float) sample; /*q*/
}
break;
case PIPE_TEXTURE_CUBE_ARRAY:
for (i = 0; i < 4; i++)
- blit_ctx->vertices[i][1][3] = (float) ((unsigned)layer / 6); /*w*/
+ blit_ctx->vertices[i].tex.w = (float) ((unsigned)layer / 6); /*w*/
break;
case PIPE_TEXTURE_2D:
for (i = 0; i < 4; i++) {
- blit_ctx->vertices[i][1][3] = (float) sample; /*r*/
+ blit_ctx->vertices[i].tex.w = (float) sample; /*r*/
}
break;
default:;
}
}
-#if 0
-static void set_dsa_keep_depth_stencil(void)
-{
- glDisable(GL_STENCIL_TEST);
- glDisable(GL_DEPTH_TEST);
- glDepthMask(GL_FALSE);
-}
-#endif
static void set_dsa_write_depth_keep_stencil(void)
{
@@ -570,8 +655,8 @@ static int calc_delta_for_bound(int v, int max)
* them within the source resource extents */
static void calc_src_deltas_for_bounds(struct vrend_resource *src_res,
const struct pipe_blit_info *info,
- struct vrend_blitter_delta *src0_delta,
- struct vrend_blitter_delta *src1_delta)
+ struct blit_point *src0_delta,
+ struct blit_point *src1_delta)
{
int max_x = u_minify(src_res->base.width0, info->src.level) - 1;
int max_y = u_minify(src_res->base.height0, info->src.level) - 1;
@@ -583,42 +668,42 @@ static void calc_src_deltas_for_bounds(struct vrend_resource *src_res,
int src0_x_excl = info->src.box.width < 0;
int src0_y_excl = info->src.box.height < 0;
- src0_delta->dx = calc_delta_for_bound(info->src.box.x, max_x + src0_x_excl);
- src0_delta->dy = calc_delta_for_bound(info->src.box.y, max_y + src0_y_excl);
+ src0_delta->x = calc_delta_for_bound(info->src.box.x, max_x + src0_x_excl);
+ src0_delta->y = calc_delta_for_bound(info->src.box.y, max_y + src0_y_excl);
- src1_delta->dx = calc_delta_for_bound(info->src.box.x + info->src.box.width,
+ src1_delta->x = calc_delta_for_bound(info->src.box.x + info->src.box.width,
max_x + !src0_x_excl);
- src1_delta->dy = calc_delta_for_bound(info->src.box.y + info->src.box.height,
+ src1_delta->y = calc_delta_for_bound(info->src.box.y + info->src.box.height,
max_y + !src0_y_excl);
}
/* Calculate dst delta values to adjust the dst points for any changes in the
* src points */
static void calc_dst_deltas_from_src(const struct pipe_blit_info *info,
- const struct vrend_blitter_delta *src0_delta,
- const struct vrend_blitter_delta *src1_delta,
- struct vrend_blitter_delta *dst0_delta,
- struct vrend_blitter_delta *dst1_delta)
+ const struct blit_point *src0_delta,
+ const struct blit_point *src1_delta,
+ struct blit_point *dst0_delta,
+ struct blit_point *dst1_delta)
{
float scale_x = (float)info->dst.box.width / (float)info->src.box.width;
float scale_y = (float)info->dst.box.height / (float)info->src.box.height;
- dst0_delta->dx = src0_delta->dx * scale_x;
- dst0_delta->dy = src0_delta->dy * scale_y;
+ dst0_delta->x = src0_delta->x * scale_x;
+ dst0_delta->y = src0_delta->y * scale_y;
- dst1_delta->dx = src1_delta->dx * scale_x;
- dst1_delta->dy = src1_delta->dy * scale_y;
+ dst1_delta->x = src1_delta->x * scale_x;
+ dst1_delta->y = src1_delta->y * scale_y;
}
static void blitter_set_points(struct vrend_blitter_ctx *blit_ctx,
const struct pipe_blit_info *info,
struct vrend_resource *src_res,
struct vrend_resource *dst_res,
- struct vrend_blitter_point *src0,
- struct vrend_blitter_point *src1)
+ struct blit_point *src0,
+ struct blit_point *src1)
{
- struct vrend_blitter_point dst0, dst1;
- struct vrend_blitter_delta src0_delta, src1_delta, dst0_delta, dst1_delta;
+ struct blit_point dst0, dst1;
+ struct blit_point src0_delta, src1_delta, dst0_delta, dst1_delta;
blit_ctx->dst_width = u_minify(dst_res->base.width0, info->dst.level);
blit_ctx->dst_height = u_minify(dst_res->base.height0, info->dst.level);
@@ -627,21 +712,21 @@ static void blitter_set_points(struct vrend_blitter_ctx *blit_ctx,
calc_src_deltas_for_bounds(src_res, info, &src0_delta, &src1_delta);
calc_dst_deltas_from_src(info, &src0_delta, &src1_delta, &dst0_delta, &dst1_delta);
- src0->x = info->src.box.x + src0_delta.dx;
- src0->y = info->src.box.y + src0_delta.dy;
- src1->x = info->src.box.x + info->src.box.width + src1_delta.dx;
- src1->y = info->src.box.y + info->src.box.height + src1_delta.dy;
+ src0->x = info->src.box.x + src0_delta.x;
+ src0->y = info->src.box.y + src0_delta.y;
+ src1->x = info->src.box.x + info->src.box.width + src1_delta.x;
+ src1->y = info->src.box.y + info->src.box.height + src1_delta.y;
- dst0.x = info->dst.box.x + dst0_delta.dx;
- dst0.y = info->dst.box.y + dst0_delta.dy;
- dst1.x = info->dst.box.x + info->dst.box.width + dst1_delta.dx;
- dst1.y = info->dst.box.y + info->dst.box.height + dst1_delta.dy;
+ dst0.x = info->dst.box.x + dst0_delta.x;
+ dst0.y = info->dst.box.y + dst0_delta.y;
+ dst1.x = info->dst.box.x + info->dst.box.width + dst1_delta.x;
+ dst1.y = info->dst.box.y + info->dst.box.height + dst1_delta.y;
VREND_DEBUG(dbg_blit, NULL, "Blitter src:[%3d, %3d] - [%3d, %3d] to dst:[%3d, %3d] - [%3d, %3d]\n",
src0->x, src0->y, src1->x, src1->y,
dst0.x, dst0.y, dst1.x, dst1.y);
- blitter_set_rectangle(blit_ctx, dst0.x, dst0.y, dst1.x, dst1.y, 0);
+ blitter_set_rectangle(blit_ctx, dst0.x, dst0.y, dst1.x, dst1.y);
}
static void vrend_set_tex_param(struct vrend_resource *src_res,
@@ -699,81 +784,90 @@ static void vrend_set_vertex_param(GLuint prog_id)
}
/* implement blitting using OpenGL. */
-void vrend_renderer_blit_gl(MAYBE_UNUSED struct vrend_context *ctx,
+void vrend_renderer_blit_gl(ASSERTED struct vrend_context *ctx,
struct vrend_resource *src_res,
struct vrend_resource *dst_res,
- GLenum blit_views[2],
- const struct pipe_blit_info *info,
- bool has_texture_srgb_decode,
- bool has_srgb_write_control,
- uint8_t swizzle[static 4])
+ const struct vrend_blit_info *info)
{
struct vrend_blitter_ctx *blit_ctx = &vrend_blit_ctx;
- GLuint buffers;
- GLuint prog_id;
- GLuint fs_id;
- bool has_depth, has_stencil;
- bool blit_stencil, blit_depth;
+
int dst_z;
- struct vrend_blitter_point src0, src1;
+ struct blit_point src0, src1;
const struct util_format_description *src_desc =
util_format_description(src_res->base.format);
const struct util_format_description *dst_desc =
util_format_description(dst_res->base.format);
- const struct vrend_format_table *orig_src_entry = vrend_get_format_table_entry(info->src.format);
-
- has_depth = util_format_has_depth(src_desc) &&
- util_format_has_depth(dst_desc);
- has_stencil = util_format_has_stencil(src_desc) &&
- util_format_has_stencil(dst_desc);
+ const struct vrend_format_table *orig_src_entry = vrend_get_format_table_entry(info->b.src.format);
- blit_depth = has_depth && (info->mask & PIPE_MASK_Z);
- blit_stencil = has_stencil && (info->mask & PIPE_MASK_S) & 0;
+ bool blit_depth = util_format_has_depth(src_desc) &&
+ util_format_has_depth(dst_desc) &&
+ (info->b.mask & PIPE_MASK_Z);
vrend_renderer_init_blit_ctx(blit_ctx);
- blitter_set_points(blit_ctx, info, src_res, dst_res, &src0, &src1);
+ blitter_set_points(blit_ctx, &info->b, src_res, dst_res, &src0, &src1);
- prog_id = glCreateProgram();
- glAttachShader(prog_id, blit_ctx->vs);
+ GLuint prog_id;
- if (blit_depth || blit_stencil) {
- fs_id = blit_get_frag_tex_writedepth(blit_ctx, src_res->base.target,
- src_res->base.nr_samples);
+ if (blit_depth) {
+ prog_id = blit_get_frag_tex_writedepth(blit_ctx, src_res->base.target,
+ src_res->base.nr_samples);
} else {
VREND_DEBUG(dbg_blit, ctx, "BLIT: applying swizzle during blit: (%d %d %d %d)\n",
- swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
- fs_id = blit_get_frag_tex_col(blit_ctx, src_res->base.target,
- src_res->base.nr_samples,
- orig_src_entry,
- swizzle);
+ info->swizzle[0], info->swizzle[1], info->swizzle[2], info->swizzle[3]);
+
+ if (info->needs_manual_srgb_decode)
+ VREND_DEBUG(dbg_blit, ctx,
+ "BLIT: applying manual srgb->linear conversion for src %s(%s)\n",
+ util_format_name(src_res->base.format),
+ util_format_name(info->b.src.format));
+
+ if (info->needs_manual_srgb_encode)
+ VREND_DEBUG(dbg_blit, ctx,
+ "BLIT: applying manual linear->srgb conversion for dst %s(%s)\n",
+ util_format_name(dst_res->base.format),
+ util_format_name(info->b.dst.format));
+
+ uint32_t flags = 0;
+ flags |= info->needs_manual_srgb_decode ? BLIT_MANUAL_SRGB_DECODE : 0;
+ flags |= info->needs_manual_srgb_encode ? BLIT_MANUAL_SRGB_ENCODE : 0;
+ prog_id = blit_get_frag_tex_col(blit_ctx, src_res->base.target,
+ src_res->base.nr_samples,
+ orig_src_entry,
+ info->swizzle,
+ flags);
}
- glAttachShader(prog_id, fs_id);
-
- if(!link_and_check(prog_id))
+ if (!prog_id) {
+ vrend_printf("Blitter: unable to create or find shader program\n");
return;
+ }
glUseProgram(prog_id);
glBindFramebuffer(GL_FRAMEBUFFER, blit_ctx->fb_id);
- vrend_fb_bind_texture_id(dst_res, blit_views[1], 0, info->dst.level, info->dst.box.z, 0);
+ vrend_fb_bind_texture_id(dst_res, info->dst_view, 0, info->b.dst.level, info->b.dst.box.z, 0);
- buffers = GL_COLOR_ATTACHMENT0;
+ GLuint buffers = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, &buffers);
- glBindTexture(src_res->target, blit_views[0]);
- vrend_set_tex_param(src_res, info, has_texture_srgb_decode);
+ glBindTexture(src_res->target, info->src_view);
+ vrend_set_tex_param(src_res, &info->b,
+ info->has_texture_srgb_decode &&
+ !info->needs_manual_srgb_decode);
vrend_set_vertex_param(prog_id);
set_dsa_write_depth_keep_stencil();
- if (info->scissor_enable) {
- glScissor(info->scissor.minx, info->scissor.miny, info->scissor.maxx - info->scissor.minx, info->scissor.maxy - info->scissor.miny);
+ if (info->b.scissor_enable) {
+ glScissor(info->b.scissor.minx, info->b.scissor.miny,
+ info->b.scissor.maxx - info->b.scissor.minx,
+ info->b.scissor.maxy - info->b.scissor.miny);
glEnable(GL_SCISSOR_TEST);
} else
glDisable(GL_SCISSOR_TEST);
- if (has_srgb_write_control) {
- if (util_format_is_srgb(info->dst.format) || util_format_is_srgb(info->src.format)) {
+ if (info->has_srgb_write_control) {
+ if (!info->needs_manual_srgb_encode &&
+ (util_format_is_srgb(info->b.dst.format) || util_format_is_srgb(info->b.src.format))) {
VREND_DEBUG(dbg_blit, ctx, "%s: Enable GL_FRAMEBUFFER_SRGB\n", __func__);
glEnable(GL_FRAMEBUFFER_SRGB);
} else {
@@ -782,22 +876,20 @@ void vrend_renderer_blit_gl(MAYBE_UNUSED struct vrend_context *ctx,
}
}
- for (dst_z = 0; dst_z < info->dst.box.depth; dst_z++) {
- float dst2src_scale = info->src.box.depth / (float)info->dst.box.depth;
- float dst_offset = ((info->src.box.depth - 1) -
- (info->dst.box.depth - 1) * dst2src_scale) * 0.5;
+ for (dst_z = 0; dst_z < info->b.dst.box.depth; dst_z++) {
+ float dst2src_scale = info->b.src.box.depth / (float)info->b.dst.box.depth;
+ float dst_offset = ((info->b.src.box.depth - 1) -
+ (info->b.dst.box.depth - 1) * dst2src_scale) * 0.5;
float src_z = (dst_z + dst_offset) * dst2src_scale;
+
uint32_t layer = (dst_res->target == GL_TEXTURE_CUBE_MAP ||
dst_res->target == GL_TEXTURE_1D_ARRAY ||
- dst_res->target == GL_TEXTURE_2D_ARRAY) ? info->dst.box.z : dst_z;
+ dst_res->target == GL_TEXTURE_2D_ARRAY) ? info->b.dst.box.z : dst_z;
- glBindFramebuffer(GL_FRAMEBUFFER, blit_ctx->fb_id);
- vrend_fb_bind_texture_id(dst_res, blit_views[1], 0, info->dst.level, layer, 0);
+ vrend_fb_bind_texture_id(dst_res, info->dst_view, 0, info->b.dst.level, layer, 0);
- buffers = GL_COLOR_ATTACHMENT0;
- glDrawBuffers(1, &buffers);
- blitter_set_texcoords(blit_ctx, src_res, info->src.level,
- info->src.box.z + src_z, 0,
+ blitter_set_texcoords(blit_ctx, src_res, info->b.src.level,
+ info->b.src.box.z + src_z, 0,
src0.x, src0.y, src1.x, src1.y);
glBufferData(GL_ARRAY_BUFFER, sizeof(blit_ctx->vertices), blit_ctx->vertices, GL_STATIC_DRAW);
@@ -805,7 +897,6 @@ void vrend_renderer_blit_gl(MAYBE_UNUSED struct vrend_context *ctx,
}
glUseProgram(0);
- glDeleteProgram(prog_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
@@ -817,5 +908,7 @@ void vrend_blitter_fini(void)
{
vrend_blit_ctx.initialised = false;
vrend_clicbs->destroy_gl_context(vrend_blit_ctx.gl_context);
+ if (vrend_blit_ctx.blit_programs)
+ util_hash_table_destroy(vrend_blit_ctx.blit_programs);
memset(&vrend_blit_ctx, 0, sizeof(vrend_blit_ctx));
}
diff --git a/src/vrend_blitter.h b/src/vrend_blitter.h
index c4a7adbe..91e24352 100644
--- a/src/vrend_blitter.h
+++ b/src/vrend_blitter.h
@@ -24,8 +24,29 @@
#ifndef VREND_BLITTER_H
#define VREND_BLITTER_H
+#include "util/os_misc.h"
+#include "util/macros.h"
+
/* shaders for blitting */
+#define FS_HEADER_GL \
+ "#version 130\n" \
+ "// Blitter\n" \
+ "%s" \
+
+#define FS_HEADER_GLES \
+ "#version 310 es\n" \
+ "// Blitter\n" \
+ "%s" \
+ "precision mediump float;\n" \
+
+#define FS_HEADER_GLES_MS_ARRAY \
+ "#version 310 es\n" \
+ "// Blitter\n" \
+ "#extension GL_OES_texture_storage_multisample_2d_array: require\n" \
+ "%s" \
+ "precision mediump float;\n" \
+
#define HEADER_GL \
"#version 130\n" \
"// Blitter\n" \
@@ -42,6 +63,25 @@
"precision mediump float;\n" \
+#define FS_FUNC_COL_SRGB_DECODE \
+ "cvec4 srgb_decode(cvec4 col) {\n" \
+ " vec3 temp = vec3(col.rgb);\n" \
+ " bvec3 thresh = lessThanEqual(temp, vec3(0.04045));\n" \
+ " vec3 a = temp / vec3(12.92);\n" \
+ " vec3 b = pow((temp + vec3(0.055)) / vec3(1.055), vec3(2.4));\n" \
+ " return cvec4(clamp(mix(b, a, thresh), 0.0, 1.0), col.a);\n" \
+ "}\n"
+
+#define FS_FUNC_COL_SRGB_ENCODE \
+ "cvec4 srgb_encode(cvec4 col) {\n" \
+ " vec3 temp = vec3(col.rgb);\n" \
+ " bvec3 thresh = lessThanEqual(temp, vec3(0.0031308));\n" \
+ " vec3 a = temp * vec3(12.92);\n" \
+ " vec3 b = (vec3(1.055) * pow(temp, vec3(1.0 / 2.4))) - vec3(0.055);\n" \
+ " return cvec4(mix(b, a, thresh), col.a);\n" \
+ "}\n"
+
+
#define VS_PASSTHROUGH_BODY \
"in vec4 arg0;\n" \
"in vec4 arg1;\n" \
@@ -54,51 +94,59 @@
#define VS_PASSTHROUGH_GL HEADER_GL VS_PASSTHROUGH_BODY
#define VS_PASSTHROUGH_GLES HEADER_GLES VS_PASSTHROUGH_BODY
-
-#define FS_TEXFETCH_COL_BODY \
- "%s" \
- "#define cvec4 %s\n" \
- "uniform mediump %csampler%s samp;\n" \
- "in vec4 tc;\n" \
- "out cvec4 FragColor;\n" \
- "void main() {\n" \
- " cvec4 texel = texture(samp, tc%s);\n" \
- " FragColor = cvec4(%s);\n" \
+#define FS_TEXFETCH_COL_BODY \
+ "#define cvec4 %s\n" \
+ "%s\n" /* conditional decode() */ \
+ "%s\n" /* conditional encode() */ \
+ "#define decode %s\n" \
+ "#define encode %s\n" \
+ "uniform mediump %csampler%s samp;\n" \
+ "in vec4 tc;\n" \
+ "out cvec4 FragColor;\n" \
+ "void main() {\n" \
+ " cvec4 texel = decode(cvec4(texture(samp, tc%s)));\n" \
+ " FragColor = encode(cvec4(%s));\n" \
"}\n"
-#define FS_TEXFETCH_COL_GLES_1D_BODY \
- "%s" \
- "#define cvec4 %s\n" \
- "uniform mediump %csampler%s samp;\n" \
- "in vec4 tc;\n" \
- "out cvec4 FragColor;\n" \
- "void main() {\n" \
- " cvec4 texel = texture(samp, vec2(tc%s, 0.5));\n" \
- " FragColor = cvec4(%s);\n" \
+#define FS_TEXFETCH_COL_GLES_1D_BODY \
+ "#define cvec4 %s\n" \
+ "%s\n" /* conditional decode() */ \
+ "%s\n" /* conditional encode() */ \
+ "#define decode %s\n" \
+ "#define encode %s\n" \
+ "uniform mediump %csampler%s samp;\n" \
+ "in vec4 tc;\n" \
+ "out cvec4 FragColor;\n" \
+ "void main() {\n" \
+ " cvec4 texel = decode(texture(samp, vec2(tc%s, 0.5)));\n" \
+ " FragColor = encode(cvec4(%s));\n" \
"}\n"
-#define FS_TEXFETCH_COL_GL HEADER_GL FS_TEXFETCH_COL_BODY
-#define FS_TEXFETCH_COL_GLES HEADER_GLES FS_TEXFETCH_COL_BODY
-#define FS_TEXFETCH_COL_GLES_1D HEADER_GLES FS_TEXFETCH_COL_GLES_1D_BODY
-
-#define FS_TEXFETCH_COL_MSAA_BODY \
- "%s" \
- "#define cvec4 %s\n" \
- "uniform mediump %csampler%s samp;\n" \
- "in vec4 tc;\n" \
- "out cvec4 FragColor;\n" \
- "void main() {\n" \
- " const int num_samples = %d;\n" \
- " cvec4 texel = cvec4(0);\n" \
- " for (int i = 0; i < num_samples; ++i) \n" \
- " texel += texelFetch(samp, %s(tc%s), i);\n" \
- " texel = texel / cvec4(num_samples);\n" \
- " FragColor = cvec4(%s);\n" \
+#define FS_TEXFETCH_COL_GL FS_HEADER_GL FS_TEXFETCH_COL_BODY
+#define FS_TEXFETCH_COL_GLES FS_HEADER_GLES FS_TEXFETCH_COL_BODY
+#define FS_TEXFETCH_COL_GLES_1D FS_HEADER_GLES FS_TEXFETCH_COL_GLES_1D_BODY
+
+#define FS_TEXFETCH_COL_MSAA_BODY \
+ "#define cvec4 %s\n" \
+ "%s\n" /* conditional decode() */ \
+ "%s\n" /* conditional encode() */ \
+ "#define decode %s\n" \
+ "#define encode %s\n" \
+ "uniform mediump %csampler%s samp;\n" \
+ "in vec4 tc;\n" \
+ "out cvec4 FragColor;\n" \
+ "void main() {\n" \
+ " const int num_samples = %d;\n" \
+ " cvec4 texel = cvec4(0);\n" \
+ " for (int i = 0; i < num_samples; ++i) \n" \
+ " texel += decode(texelFetch(samp, %s(tc%s), i));\n" \
+ " texel = texel / cvec4(num_samples);\n" \
+ " FragColor = encode(cvec4(%s));\n" \
"}\n"
-#define FS_TEXFETCH_COL_MSAA_GL HEADER_GL FS_TEXFETCH_COL_MSAA_BODY
-#define FS_TEXFETCH_COL_MSAA_GLES HEADER_GLES FS_TEXFETCH_COL_MSAA_BODY
-#define FS_TEXFETCH_COL_MSAA_ARRAY_GLES HEADER_GLES_MS_ARRAY FS_TEXFETCH_COL_MSAA_BODY
+#define FS_TEXFETCH_COL_MSAA_GL FS_HEADER_GL FS_TEXFETCH_COL_MSAA_BODY
+#define FS_TEXFETCH_COL_MSAA_GLES FS_HEADER_GLES FS_TEXFETCH_COL_MSAA_BODY
+#define FS_TEXFETCH_COL_MSAA_ARRAY_GLES FS_HEADER_GLES_MS_ARRAY FS_TEXFETCH_COL_MSAA_BODY
#define FS_TEXFETCH_DS_BODY \
"uniform mediump sampler%s samp;\n" \
@@ -115,20 +163,29 @@
"uniform sampler%s samp;\n" \
"in vec4 tc;\n" \
"void main() {\n" \
- " gl_FragDepth = float(texelFetch(samp, %s(tc%s), int(tc.z)).x);\n" \
+ " gl_FragDepth = float(texelFetch(samp, %s(tc%s), 0).x);\n" \
"}\n"
#define FS_TEXFETCH_DS_MSAA_BODY_GLES \
"uniform mediump sampler%s samp;\n" \
"in vec4 tc;\n" \
"void main() {\n" \
- " gl_FragDepth = float(texelFetch(samp, %s(tc%s), int(tc.z)).x);\n" \
+ " gl_FragDepth = float(texelFetch(samp, %s(tc%s), 0).x);\n" \
"}\n"
+struct vrend_context;
+struct vrend_resource;
+struct vrend_blit_info;
#define FS_TEXFETCH_DS_MSAA_GL HEADER_GL FS_TEXFETCH_DS_MSAA_BODY
#define FS_TEXFETCH_DS_MSAA_GLES HEADER_GLES FS_TEXFETCH_DS_MSAA_BODY_GLES
#define FS_TEXFETCH_DS_MSAA_ARRAY_GLES HEADER_GLES_MS_ARRAY FS_TEXFETCH_DS_MSAA_BODY_GLES
+/* implement blitting using OpenGL. */
+void vrend_renderer_blit_gl(ASSERTED struct vrend_context *ctx,
+ struct vrend_resource *src_res,
+ struct vrend_resource *dst_res,
+ const struct vrend_blit_info *info);
+void vrend_blitter_fini(void);
#endif
diff --git a/src/vrend_debug.c b/src/vrend_debug.c
index a0ab7bce..9143013e 100644
--- a/src/vrend_debug.c
+++ b/src/vrend_debug.c
@@ -81,6 +81,16 @@ static const char *command_names[VIRGL_MAX_COMMANDS] = {
"PIPE_RESOURCE_SET_TYPE",
"GET_MEMORY_INFO",
"SEND_STRING_MARKER",
+ "LINK_SHADER",
+ "CREATE_VIDEO_CODEC",
+ "DESTROY_VIDEO_CODEC",
+ "CREATE_VIDEO_BUFFER",
+ "DESTROY_VIDEO_BUFFER",
+ "BEGIN_FRAME",
+ "DECODE_MACROBLOCK",
+ "DECODE_BITSTREAM",
+ "ENCODE_BITSTREAM",
+ "END_FRAME",
};
static const char *object_type_names[VIRGL_MAX_OBJECTS] = {
@@ -155,7 +165,7 @@ int vrend_get_debug_flags(const char *flagstring)
return retval;
}
-void vrend_init_debug_flags()
+void vrend_init_debug_flags(void)
{
if (!vrend_debug_flags_initalized) {
vrend_debug_flags_initalized = 1;
diff --git a/src/vrend_debug.h b/src/vrend_debug.h
index 40ff226a..7afa69b4 100644
--- a/src/vrend_debug.h
+++ b/src/vrend_debug.h
@@ -79,31 +79,30 @@ static inline void vrend_printf(const char *fmt, ...)
va_end(va);
}
-#ifndef NDEBUG
+#ifdef NDEBUG
+#define VREND_DEBUG_ENABLED (false)
+#else
+#define VREND_DEBUG_ENABLED (true)
+#endif
+
#define VREND_DEBUG(flag, ctx, ...) \
- if (vrend_debug(ctx, flag)) \
+ if (VREND_DEBUG_ENABLED && vrend_debug(ctx, flag)) \
do { \
vrend_print_context_name(ctx); \
vrend_printf(__VA_ARGS__); \
} while (0)
#define VREND_DEBUG_EXT(flag, ctx, X) \
- if (vrend_debug(ctx, flag)) \
+ if (VREND_DEBUG_ENABLED && vrend_debug(ctx, flag)) \
do { \
vrend_print_context_name(ctx); \
X; \
} while (0)
#define VREND_DEBUG_NOCTX(flag, ctx, ...) \
- if (vrend_debug(ctx, flag)) \
+ if (VREND_DEBUG_ENABLED && vrend_debug(ctx, flag)) \
do { \
vrend_printf(__VA_ARGS__); \
} while (0)
-#else
-#define VREND_DEBUG(flag, ctx, ...) (void)ctx
-#define VREND_DEBUG_EXT(flag, ctx, X) (void)ctx
-#define VREND_DEBUG_NOCTX(flag, ctx, ...) (void)ctx
-#endif
-
#endif
diff --git a/src/vrend_decode.c b/src/vrend_decode.c
index 25a9204c..8bf6de73 100644
--- a/src/vrend_decode.c
+++ b/src/vrend_decode.c
@@ -41,6 +41,10 @@
#include "vrend_tweaks.h"
#include "virgl_util.h"
+#ifdef ENABLE_VIDEO
+#include "vrend_video.h"
+#endif
+
/* decode side */
#define DECODE_MAX_TOKENS 8000
@@ -229,15 +233,7 @@ static int vrend_decode_clear_texture(struct vrend_context *ctx, const uint32_t
arr[2] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_C);
arr[3] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_D);
- vrend_clear_texture(ctx, handle, level, &box, (void *) &arr);
- return 0;
-}
-
-static float uif(unsigned int ui)
-{
- union { float f; unsigned int ui; } myuif;
- myuif.ui = ui;
- return myuif.f;
+ return vrend_clear_texture(ctx, handle, level, &box, (void *) &arr);
}
static int vrend_decode_set_viewport_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
@@ -676,6 +672,7 @@ static int vrend_decode_create_sampler_state(struct vrend_context *ctx, const ui
state.compare_func = (tmp >> 16) & 0x7;
state.seamless_cube_map = (tmp >> 19) & 0x1;
state.max_anisotropy = (float)((tmp >> 20) & 0x3f);
+ state.normalized_coords = 0;
state.lod_bias = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS));
state.min_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MIN_LOD));
@@ -1162,6 +1159,23 @@ static int vrend_decode_destroy_sub_ctx(struct vrend_context *ctx, const uint32_
return 0;
}
+static int vrend_decode_link_shader(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
+{
+ if (length != VIRGL_LINK_SHADER_SIZE)
+ return EINVAL;
+
+ uint32_t handles[PIPE_SHADER_TYPES];
+ handles[PIPE_SHADER_VERTEX] = get_buf_entry(buf, VIRGL_LINK_SHADER_VERTEX_HANDLE);
+ handles[PIPE_SHADER_FRAGMENT] = get_buf_entry(buf, VIRGL_LINK_SHADER_FRAGMENT_HANDLE);
+ handles[PIPE_SHADER_GEOMETRY] = get_buf_entry(buf, VIRGL_LINK_SHADER_GEOMETRY_HANDLE);
+ handles[PIPE_SHADER_TESS_CTRL] = get_buf_entry(buf, VIRGL_LINK_SHADER_TESS_CTRL_HANDLE);
+ handles[PIPE_SHADER_TESS_EVAL] = get_buf_entry(buf, VIRGL_LINK_SHADER_TESS_EVAL_HANDLE);
+ handles[PIPE_SHADER_COMPUTE] = get_buf_entry(buf, VIRGL_LINK_SHADER_COMPUTE_HANDLE);
+
+ vrend_link_program_hook(ctx, handles);
+ return 0;
+}
+
static int vrend_decode_bind_shader(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t handle, type;
@@ -1266,8 +1280,9 @@ static int vrend_decode_set_shader_images(struct vrend_context *ctx, const uint3
if (num_images < 1) {
return 0;
}
+
if (start_slot > PIPE_MAX_SHADER_IMAGES ||
- start_slot > PIPE_MAX_SHADER_IMAGES - num_images)
+ start_slot + num_images > PIPE_MAX_SHADER_IMAGES)
return EINVAL;
for (uint32_t i = 0; i < num_images; i++) {
@@ -1415,14 +1430,30 @@ static int vrend_decode_copy_transfer3d(struct vrend_context *ctx, const uint32_
memset(&info, 0, sizeof(info));
info.box = &box;
- vrend_decode_transfer_common(buf, &dst_handle, &info);
- info.offset = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
- info.synchronized = (get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SYNCHRONIZED) != 0);
- src_handle = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
+ // synchronized is set either to 1 or 0. This means that we can use other bits
+ // to identify the direction of copy transfer
+ uint32_t flags = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_FLAGS);
+ bool read_from_host = (flags & VIRGL_COPY_TRANSFER3D_FLAGS_READ_FROM_HOST) != 0;
+ info.synchronized = (flags & VIRGL_COPY_TRANSFER3D_FLAGS_SYNCHRONIZED) != 0;
+
+ if (!read_from_host) {
+ // this means that guest would like to make transfer to host
+ // it can also mean that guest is using legacy copy transfer path
+ vrend_decode_transfer_common(buf, &dst_handle, &info);
+ info.offset = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
+ src_handle = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
+
+ return vrend_renderer_copy_transfer3d(ctx, dst_handle, src_handle,
+ &info);
+ } else {
+ vrend_decode_transfer_common(buf, &src_handle, &info);
+ info.offset = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
+ dst_handle = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
- return vrend_renderer_copy_transfer3d(ctx, dst_handle, src_handle,
- &info);
+ return vrend_renderer_copy_transfer3d_from_host(ctx, dst_handle, src_handle,
+ &info);
+ }
}
static int vrend_decode_pipe_resource_create(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
@@ -1479,11 +1510,11 @@ static int vrend_decode_pipe_resource_set_type(struct vrend_context *ctx, const
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
uint32_t ctx_id);
-static void vrend_decode_ctx_fence_retire(void *fence_cookie,
+static void vrend_decode_ctx_fence_retire(uint64_t fence_id,
void *retire_data)
{
struct vrend_decode_ctx *dctx = retire_data;
- dctx->base.fence_retire(&dctx->base, 0, fence_cookie);
+ dctx->base.fence_retire(&dctx->base, 0, fence_id);
}
struct virgl_context *vrend_renderer_context_create(uint32_t handle,
@@ -1543,12 +1574,15 @@ static int vrend_decode_ctx_transfer_3d(struct virgl_context *ctx,
{
TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
- return vrend_renderer_transfer_iov(dctx->grctx, res->res_id, info,
- transfer_mode);
+ int ret = vrend_renderer_transfer_iov(dctx->grctx, res->res_id, info,
+ transfer_mode);
+ return vrend_check_no_error(dctx->grctx) || ret ? ret : EINVAL;
}
static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
+ UNUSED uint32_t res_id,
uint64_t blob_id,
+ UNUSED uint64_t blob_size,
UNUSED uint32_t blob_flags,
struct virgl_context_blob *blob)
{
@@ -1598,6 +1632,187 @@ static int vrend_decode_send_string_marker(struct vrend_context *ctx, const uint
return 0;
}
+#ifdef ENABLE_VIDEO
+/* video codec related functions */
+
+static int vrend_decode_create_video_codec(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_CREATE_VIDEO_CODEC_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t handle = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_HANDLE);
+ uint32_t profile = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_PROFILE);
+ uint32_t entrypoint = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_ENTRYPOINT);
+ uint32_t chroma_fmt = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_CHROMA_FMT);
+ uint32_t level = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_LEVEL);
+ uint32_t width = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_WIDTH);
+ uint32_t height = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_HEIGHT);
+ uint32_t max_ref = 2; /* The max number of ref frames is 2 by default */
+
+ if (length >= VIRGL_CREATE_VIDEO_CODEC_MAX_REF)
+ max_ref = get_buf_entry(buf, VIRGL_CREATE_VIDEO_CODEC_MAX_REF);
+
+ vrend_video_create_codec(vctx, handle, profile, entrypoint,
+ chroma_fmt, level, width, height, max_ref, 0);
+
+ return 0;
+}
+
+static int vrend_decode_destroy_video_codec(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_DESTROY_VIDEO_CODEC_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t handle = get_buf_entry(buf, VIRGL_DESTROY_VIDEO_CODEC_HANDLE);
+ vrend_video_destroy_codec(vctx, handle);
+
+ return 0;
+}
+
+static int vrend_decode_create_video_buffer(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ uint32_t i, num_res;
+ uint32_t res_handles[VREND_VIDEO_BUFFER_PLANE_NUM];
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_CREATE_VIDEO_BUFFER_MIN_SIZE)
+ return EINVAL;
+
+ num_res = length - VIRGL_CREATE_VIDEO_BUFFER_RES_BASE + 1;
+ if (num_res > VREND_VIDEO_BUFFER_PLANE_NUM)
+ num_res = VREND_VIDEO_BUFFER_PLANE_NUM;
+
+ uint32_t handle = get_buf_entry(buf, VIRGL_CREATE_VIDEO_BUFFER_HANDLE);
+ uint32_t format = get_buf_entry(buf, VIRGL_CREATE_VIDEO_BUFFER_FORMAT);
+ uint32_t width = get_buf_entry(buf, VIRGL_CREATE_VIDEO_BUFFER_WIDTH);
+ uint32_t height = get_buf_entry(buf, VIRGL_CREATE_VIDEO_BUFFER_HEIGHT);
+
+ memset(res_handles, 0, sizeof(res_handles));
+ for (i = 0; i < num_res; i++)
+ res_handles[i] = get_buf_entry(buf,
+ VIRGL_CREATE_VIDEO_BUFFER_RES_BASE + i);
+
+ vrend_video_create_buffer(vctx, handle, format, width, height,
+ res_handles, num_res);
+
+ return 0;
+}
+
+static int vrend_decode_destroy_video_buffer(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_DESTROY_VIDEO_BUFFER_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t handle = get_buf_entry(buf, VIRGL_DESTROY_VIDEO_BUFFER_HANDLE);
+ vrend_video_destroy_buffer(vctx, handle);
+
+ return 0;
+}
+
+static int vrend_decode_begin_frame(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_BEGIN_FRAME_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t cdc_handle = get_buf_entry(buf, VIRGL_BEGIN_FRAME_CDC_HANDLE);
+ uint32_t tgt_handle = get_buf_entry(buf, VIRGL_BEGIN_FRAME_TGT_HANDLE);
+ vrend_video_begin_frame(vctx, cdc_handle, tgt_handle);
+
+ return 0;
+}
+
+static int vrend_decode_decode_bitstream(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_DECODE_BS_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t cdc_handle = get_buf_entry(buf, VIRGL_DECODE_BS_CDC_HANDLE);
+ uint32_t tgt_handle = get_buf_entry(buf, VIRGL_DECODE_BS_TGT_HANDLE);
+ uint32_t dsc_handle = get_buf_entry(buf, VIRGL_DECODE_BS_DSC_HANDLE);
+ uint32_t buf_handle = get_buf_entry(buf, VIRGL_DECODE_BS_BUF_HANDLE);
+ uint32_t buf_size = get_buf_entry(buf, VIRGL_DECODE_BS_BUF_SIZE);
+
+ vrend_video_decode_bitstream(vctx, cdc_handle, tgt_handle,
+ dsc_handle, 1, &buf_handle, &buf_size);
+
+ return 0;
+}
+
+static int vrend_decode_encode_bitstream(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_ENCODE_BS_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t cdc_handle = get_buf_entry(buf, VIRGL_ENCODE_BS_CDC_HANDLE);
+ uint32_t src_handle = get_buf_entry(buf, VIRGL_ENCODE_BS_SRC_HANDLE);
+ uint32_t dest_handle = get_buf_entry(buf, VIRGL_ENCODE_BS_DEST_HANDLE);
+ uint32_t desc_handle = get_buf_entry(buf, VIRGL_ENCODE_BS_DESC_HANDLE);
+ uint32_t feed_handle = get_buf_entry(buf, VIRGL_ENCODE_BS_FEED_HANDLE);
+
+ vrend_video_encode_bitstream(vctx, cdc_handle, src_handle, dest_handle,
+ desc_handle, feed_handle);
+
+ return 0;
+}
+
+static int vrend_decode_end_frame(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ struct vrend_video_context *vctx = vrend_context_get_video_ctx(ctx);
+
+ if (length < VIRGL_END_FRAME_MIN_SIZE)
+ return EINVAL;
+
+ uint32_t cdc_handle = get_buf_entry(buf, VIRGL_END_FRAME_CDC_HANDLE);
+ uint32_t tgt_handle = get_buf_entry(buf, VIRGL_END_FRAME_TGT_HANDLE);
+
+ vrend_video_end_frame(vctx, cdc_handle, tgt_handle);
+
+ return 0;
+}
+
+#else
+
+static int vrend_unsupported(struct vrend_context *ctx,
+ const uint32_t *buf,
+ uint32_t length)
+{
+ (void)ctx;
+ (void)buf;
+ (void)length;
+ return EINVAL;
+}
+
+#endif /* ENABLE_VIDEO */
+
+
typedef int (*vrend_decode_callback)(struct vrend_context *ctx, const uint32_t *buf, uint32_t length);
static int vrend_decode_dummy(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
@@ -1661,6 +1876,28 @@ static const vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
[VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE] = vrend_decode_pipe_resource_set_type,
[VIRGL_CCMD_GET_MEMORY_INFO] = vrend_decode_get_memory_info,
[VIRGL_CCMD_SEND_STRING_MARKER] = vrend_decode_send_string_marker,
+ [VIRGL_CCMD_LINK_SHADER] = vrend_decode_link_shader,
+#ifdef ENABLE_VIDEO
+ [VIRGL_CCMD_CREATE_VIDEO_CODEC] = vrend_decode_create_video_codec,
+ [VIRGL_CCMD_DESTROY_VIDEO_CODEC] = vrend_decode_destroy_video_codec,
+ [VIRGL_CCMD_CREATE_VIDEO_BUFFER] = vrend_decode_create_video_buffer,
+ [VIRGL_CCMD_DESTROY_VIDEO_BUFFER] = vrend_decode_destroy_video_buffer,
+ [VIRGL_CCMD_BEGIN_FRAME] = vrend_decode_begin_frame,
+ [VIRGL_CCMD_DECODE_MACROBLOCK] = vrend_decode_dummy,
+ [VIRGL_CCMD_DECODE_BITSTREAM] = vrend_decode_decode_bitstream,
+ [VIRGL_CCMD_ENCODE_BITSTREAM] = vrend_decode_encode_bitstream,
+ [VIRGL_CCMD_END_FRAME] = vrend_decode_end_frame,
+#else
+ [VIRGL_CCMD_CREATE_VIDEO_CODEC] = vrend_unsupported,
+ [VIRGL_CCMD_DESTROY_VIDEO_CODEC] = vrend_unsupported,
+ [VIRGL_CCMD_CREATE_VIDEO_BUFFER] = vrend_unsupported,
+ [VIRGL_CCMD_DESTROY_VIDEO_BUFFER] = vrend_unsupported,
+ [VIRGL_CCMD_BEGIN_FRAME] = vrend_unsupported,
+ [VIRGL_CCMD_DECODE_MACROBLOCK] = vrend_unsupported,
+ [VIRGL_CCMD_DECODE_BITSTREAM] = vrend_unsupported,
+ [VIRGL_CCMD_ENCODE_BITSTREAM] = vrend_unsupported,
+ [VIRGL_CCMD_END_FRAME] = vrend_unsupported,
+#endif
};
static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
@@ -1681,10 +1918,7 @@ static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
uint32_t buf_offset = 0;
while (buf_offset < buf_total) {
-#ifndef NDEBUG
const uint32_t cur_offset = buf_offset;
-#endif
-
const uint32_t *buf = &typed_buf[buf_offset];
uint32_t len = *buf >> 16;
uint32_t cmd = *buf & 0xff;
@@ -1707,7 +1941,11 @@ static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
TRACE_SCOPE_SLOW(vrend_get_comand_name(cmd));
ret = decode_table[cmd](gdctx->grctx, buf, len);
+ if (!vrend_check_no_error(gdctx->grctx) && !ret)
+ ret = EINVAL;
if (ret) {
+ vrend_printf("context %d failed to dispatch %s: %d\n",
+ gdctx->base.ctx_id, vrend_get_comand_name(cmd), ret);
if (ret == EINVAL)
vrend_report_buffer_error(gdctx->grctx, *buf);
return ret;
@@ -1728,15 +1966,15 @@ static void vrend_decode_ctx_retire_fences(UNUSED struct virgl_context *ctx)
static int vrend_decode_ctx_submit_fence(struct virgl_context *ctx,
uint32_t flags,
- uint64_t queue_id,
- void *fence_cookie)
+ uint32_t ring_idx,
+ uint64_t fence_id)
{
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
- if (queue_id)
+ if (ring_idx)
return -EINVAL;
- return vrend_renderer_create_fence(dctx->grctx, flags, fence_cookie);
+ return vrend_renderer_create_fence(dctx->grctx, flags, fence_id);
}
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
@@ -1753,7 +1991,6 @@ static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
ctx->detach_resource = vrend_decode_ctx_detach_resource;
ctx->transfer_3d = vrend_decode_ctx_transfer_3d;
ctx->get_blob = vrend_decode_ctx_get_blob;
- ctx->get_blob_done = NULL;
ctx->submit_cmd = vrend_decode_ctx_submit_cmd;
ctx->get_fencing_fd = vrend_decode_ctx_get_fencing_fd;
diff --git a/src/vrend_formats.c b/src/vrend_formats.c
index 47ed34ec..fde607c2 100644
--- a/src/vrend_formats.c
+++ b/src/vrend_formats.c
@@ -31,6 +31,7 @@
#define NO_SWIZZLE { SWIZZLE_INVALID, SWIZZLE_INVALID, SWIZZLE_INVALID, SWIZZLE_INVALID }
#define RRR1_SWIZZLE { PIPE_SWIZZLE_RED, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_ONE }
#define RGB1_SWIZZLE { PIPE_SWIZZLE_RED, PIPE_SWIZZLE_GREEN, PIPE_SWIZZLE_BLUE, PIPE_SWIZZLE_ONE }
+#define OOOR_SWIZZLE { PIPE_SWIZZLE_ZERO, PIPE_SWIZZLE_ZERO, PIPE_SWIZZLE_ZERO, PIPE_SWIZZLE_RED }
#define BGR1_SWIZZLE { PIPE_SWIZZLE_BLUE, PIPE_SWIZZLE_GREEN, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_ONE }
#define BGRA_SWIZZLE { PIPE_SWIZZLE_BLUE, PIPE_SWIZZLE_GREEN, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_ALPHA }
@@ -131,11 +132,11 @@ static struct vrend_format_table float_base_formats[] = {
};
static struct vrend_format_table float_la_formats[] = {
- { VIRGL_FORMAT_A16_FLOAT, GL_ALPHA16F_ARB, GL_ALPHA, GL_HALF_FLOAT, NO_SWIZZLE },
+ { VIRGL_FORMAT_A16_FLOAT, GL_R16F, GL_RED, GL_HALF_FLOAT, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L16_FLOAT, GL_R16F, GL_RED, GL_HALF_FLOAT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_FLOAT, GL_LUMINANCE_ALPHA16F_ARB, GL_LUMINANCE_ALPHA, GL_HALF_FLOAT, NO_SWIZZLE },
- { VIRGL_FORMAT_A32_FLOAT, GL_ALPHA32F_ARB, GL_ALPHA, GL_FLOAT, NO_SWIZZLE },
+ { VIRGL_FORMAT_A32_FLOAT, GL_R32F, GL_RED, GL_FLOAT, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L32_FLOAT, GL_R32F, GL_RED, GL_FLOAT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L32A32_FLOAT, GL_LUMINANCE_ALPHA32F_ARB, GL_LUMINANCE_ALPHA, GL_FLOAT, NO_SWIZZLE },
};
@@ -171,26 +172,26 @@ static struct vrend_format_table float_3comp_formats[] = {
static struct vrend_format_table integer_la_formats[] = {
- { VIRGL_FORMAT_A8_UINT, GL_ALPHA8UI_EXT, GL_ALPHA_INTEGER, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ { VIRGL_FORMAT_A8_UINT, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L8_UINT, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_UINT, GL_LUMINANCE_ALPHA8UI_EXT, GL_LUMINANCE_ALPHA_INTEGER_EXT, GL_UNSIGNED_BYTE, NO_SWIZZLE },
- { VIRGL_FORMAT_A8_SINT, GL_ALPHA8I_EXT, GL_ALPHA_INTEGER, GL_BYTE, NO_SWIZZLE },
+ { VIRGL_FORMAT_A8_SINT, GL_R8I, GL_RED_INTEGER, GL_BYTE, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L8_SINT, GL_R8I, GL_RED_INTEGER, GL_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_SINT, GL_LUMINANCE_ALPHA8I_EXT, GL_LUMINANCE_ALPHA_INTEGER_EXT, GL_BYTE, NO_SWIZZLE },
- { VIRGL_FORMAT_A16_UINT, GL_ALPHA16UI_EXT, GL_ALPHA_INTEGER, GL_UNSIGNED_SHORT, NO_SWIZZLE },
+ { VIRGL_FORMAT_A16_UINT, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L16_UINT, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_UINT, GL_LUMINANCE_ALPHA16UI_EXT, GL_LUMINANCE_ALPHA_INTEGER_EXT, GL_UNSIGNED_SHORT, NO_SWIZZLE },
- { VIRGL_FORMAT_A16_SINT, GL_ALPHA16I_EXT, GL_ALPHA_INTEGER, GL_SHORT, NO_SWIZZLE },
+ { VIRGL_FORMAT_A16_SINT, GL_R16I, GL_RED_INTEGER, GL_SHORT, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L16_SINT, GL_R16I, GL_RED_INTEGER, GL_SHORT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_SINT, GL_LUMINANCE_ALPHA16I_EXT, GL_LUMINANCE_ALPHA_INTEGER_EXT, GL_SHORT, NO_SWIZZLE },
- { VIRGL_FORMAT_A32_UINT, GL_ALPHA32UI_EXT, GL_ALPHA_INTEGER, GL_UNSIGNED_INT, NO_SWIZZLE },
+ { VIRGL_FORMAT_A32_UINT, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L32_UINT, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L32A32_UINT, GL_LUMINANCE_ALPHA32UI_EXT, GL_LUMINANCE_ALPHA_INTEGER_EXT, GL_UNSIGNED_INT, NO_SWIZZLE },
- { VIRGL_FORMAT_A32_SINT, GL_ALPHA32I_EXT, GL_ALPHA_INTEGER, GL_INT, NO_SWIZZLE },
+ { VIRGL_FORMAT_A32_SINT, GL_R32I, GL_RED_INTEGER, GL_INT, OOOR_SWIZZLE },
{ VIRGL_FORMAT_L32_SINT, GL_R32I, GL_RED_INTEGER, GL_INT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L32A32_SINT, GL_LUMINANCE_ALPHA32I_EXT, GL_LUMINANCE_ALPHA_INTEGER_EXT, GL_INT, NO_SWIZZLE },
@@ -291,12 +292,8 @@ static struct vrend_format_table srgb_formats[] = {
{ VIRGL_FORMAT_L8_SRGB, GL_SR8_EXT, GL_RED, GL_UNSIGNED_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_R8_SRGB, GL_SR8_EXT, GL_RED, GL_UNSIGNED_BYTE, NO_SWIZZLE },
-};
-static struct vrend_format_table gl_srgb_formats[] =
-{
- { VIRGL_FORMAT_B8G8R8X8_SRGB, GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
- { VIRGL_FORMAT_B8G8R8A8_SRGB, GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ { VIRGL_FORMAT_R8G8_SRGB, GL_SRG8_EXT, GL_RG, GL_UNSIGNED_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table bit10_formats[] = {
@@ -326,6 +323,8 @@ static struct vrend_format_table bptc_formats[] = {
static struct vrend_format_table gl_bgra_formats[] = {
{ VIRGL_FORMAT_B8G8R8X8_UNORM, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B8G8R8A8_UNORM, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
+ { VIRGL_FORMAT_B8G8R8X8_SRGB, GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
+ { VIRGL_FORMAT_B8G8R8A8_SRGB, GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table gles_bgra_formats[] = {
@@ -427,53 +426,19 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
glBindTexture(GL_TEXTURE_2D, tex_id);
glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
- /* we can't probe compressed formats, as we'd need valid payloads to
- * glCompressedTexImage2D. Let's just check for extensions instead.
- */
- if (table[i].format < VIRGL_FORMAT_MAX) {
- const struct util_format_description *desc = util_format_description(table[i].format);
- switch (desc->layout) {
- case UTIL_FORMAT_LAYOUT_S3TC:
- if (epoxy_has_gl_extension("GL_S3_s3tc") ||
- epoxy_has_gl_extension("GL_EXT_texture_compression_s3tc"))
- vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
- continue;
-
- case UTIL_FORMAT_LAYOUT_RGTC:
- if (epoxy_has_gl_extension("GL_ARB_texture_compression_rgtc") ||
- epoxy_has_gl_extension("GL_EXT_texture_compression_rgtc") )
- vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
- continue;
-
- case UTIL_FORMAT_LAYOUT_ETC:
- if ((table[i].format == VIRGL_FORMAT_ETC1_RGB8 &&
- epoxy_has_gl_extension("GL_OES_compressed_ETC1_RGB8_texture")) ||
- (table[i].format != VIRGL_FORMAT_ETC1_RGB8 && gles_ver >= 30))
- vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
- continue;
-
- case UTIL_FORMAT_LAYOUT_BPTC:
- if (epoxy_has_gl_extension("GL_ARB_texture_compression_bptc") ||
- epoxy_has_gl_extension("GL_EXT_texture_compression_bptc"))
- vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
- continue;
-
- case UTIL_FORMAT_LAYOUT_ASTC:
- if(epoxy_has_gl_extension("GL_KHR_texture_compression_astc_ldr"))
- vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
- continue;
- default:
- ;/* do logic below */
- }
- }
-
/* The error state should be clear here */
status = glGetError();
assert(status == GL_NO_ERROR);
glTexImage2D(GL_TEXTURE_2D, 0, table[i].internalformat, 32, 32, 0, table[i].glformat, table[i].gltype, NULL);
status = glGetError();
- if (status == GL_INVALID_VALUE || status == GL_INVALID_ENUM || status == GL_INVALID_OPERATION) {
+ /* Currently possible errors are:
+ * * GL_INVALID_VALUE
+ * * GL_INVALID_ENUM
+ * * GL_INVALID_OPERATION
+ * * GL_OUT_OF_MEMORY
+ */
+ if (status != GL_NO_ERROR) {
struct vrend_format_table *entry = NULL;
uint8_t swizzle[4];
binding = VIRGL_BIND_SAMPLER_VIEW | VIRGL_BIND_RENDER_TARGET;
@@ -483,11 +448,13 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
entry = &rg_base_formats[0];
swizzle[0] = swizzle[1] = swizzle[2] = PIPE_SWIZZLE_ZERO;
swizzle[3] = PIPE_SWIZZLE_RED;
+ flags |= VIRGL_TEXTURE_NEED_SWIZZLE;
break;
case VIRGL_FORMAT_A16_UNORM:
entry = &rg_base_formats[2];
swizzle[0] = swizzle[1] = swizzle[2] = PIPE_SWIZZLE_ZERO;
swizzle[3] = PIPE_SWIZZLE_RED;
+ flags |= VIRGL_TEXTURE_NEED_SWIZZLE;
break;
default:
break;
@@ -501,6 +468,14 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
continue;
}
+ if (is_desktop_gl) {
+ glTexImage2D(GL_TEXTURE_RECTANGLE_NV, 0, table[i].internalformat, 32, 32, 0, table[i].glformat, table[i].gltype, NULL);
+ status = glGetError();
+ if (status == GL_NO_ERROR) {
+ flags |= VIRGL_TEXTURE_CAN_TARGET_RECTANGLE;
+ }
+ }
+
if (table[i].format < VIRGL_FORMAT_MAX && util_format_is_depth_or_stencil(table[i].format)) {
GLenum attachment;
@@ -523,14 +498,19 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
binding = VIRGL_BIND_SAMPLER_VIEW;
- if (status == GL_FRAMEBUFFER_COMPLETE) {
+ if (status == GL_FRAMEBUFFER_COMPLETE)
binding |= is_depth ? VIRGL_BIND_DEPTH_STENCIL : VIRGL_BIND_RENDER_TARGET;
- if (is_desktop_gl ||
- (is_depth && depth_stencil_formats_can_readback(table[i].format)) ||
- color_format_can_readback(&table[i], gles_ver))
- flags |= VIRGL_TEXTURE_CAN_READBACK;
- }
+ /* On OpenGL all textures can be read back using glGetTexImage, but on GLES
+ we have to be able to bind textures to framebuffers, and use glReadPixels
+ to get the data. And apart from a few formats where support is required
+ (by the GLES version), we have to query the driver to identify additional
+ formats that are supported as destination formats by glReadPixels. */
+ if (is_desktop_gl ||
+ (status == GL_FRAMEBUFFER_COMPLETE &&
+ (is_depth ? depth_stencil_formats_can_readback(table[i].format) :
+ color_format_can_readback(&table[i], gles_ver))))
+ flags |= VIRGL_TEXTURE_CAN_READBACK;
glDeleteTextures(1, &tex_id);
glDeleteFramebuffers(1, &fb_id);
@@ -542,7 +522,17 @@ static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
}
}
+static void vrend_add_compressed_formats(struct vrend_format_table *table, int num_entries)
+{
+ int flags = epoxy_is_desktop_gl() ? VIRGL_TEXTURE_CAN_READBACK : 0;
+ for (int i = 0; i < num_entries; i++) {
+ vrend_insert_format(&table[i], VIRGL_BIND_SAMPLER_VIEW, flags);
+ }
+}
+
+
#define add_formats(x) vrend_add_formats((x), ARRAY_SIZE((x)))
+#define add_compressed_formats(x) vrend_add_compressed_formats((x), ARRAY_SIZE((x)))
void vrend_build_format_list_common(void)
{
@@ -572,10 +562,20 @@ void vrend_build_format_list_common(void)
add_formats(snorm_la_formats);
/* compressed */
- add_formats(etc2_formats);
- add_formats(rgtc_formats);
- add_formats(dxtn_formats);
- add_formats(dxtn_srgb_formats);
+ if (epoxy_has_gl_extension("GL_S3_s3tc") ||
+ epoxy_has_gl_extension("GL_EXT_texture_compression_s3tc") ||
+ epoxy_has_gl_extension("GL_ANGLE_texture_compression_dxt")) {
+ add_compressed_formats(dxtn_formats);
+ add_compressed_formats(dxtn_srgb_formats);
+ }
+
+ if (epoxy_has_gl_extension("GL_ARB_texture_compression_rgtc") ||
+ epoxy_has_gl_extension("GL_EXT_texture_compression_rgtc") )
+ add_compressed_formats(rgtc_formats);
+
+ if (epoxy_has_gl_extension("GL_ARB_texture_compression_bptc") ||
+ epoxy_has_gl_extension("GL_EXT_texture_compression_bptc"))
+ add_compressed_formats(bptc_formats);
add_formats(srgb_formats);
@@ -583,8 +583,6 @@ void vrend_build_format_list_common(void)
add_formats(packed_float_formats);
add_formats(exponent_float_formats);
-
- add_formats(bptc_formats);
}
@@ -595,7 +593,6 @@ void vrend_build_format_list_gl(void)
*/
add_formats(gl_base_rgba_formats);
add_formats(gl_bgra_formats);
- add_formats(gl_srgb_formats);
}
void vrend_build_format_list_gles(void)
@@ -615,7 +612,14 @@ void vrend_build_format_list_gles(void)
*/
add_formats(gles_z32_format);
add_formats(gles_bit10_formats);
- add_formats(astc_formats);
+
+ if (epoxy_has_gl_extension("GL_KHR_texture_compression_astc_ldr"))
+ add_compressed_formats(astc_formats);
+
+ if (epoxy_gl_version() >= 30) {
+ add_compressed_formats(etc2_formats);
+ }
+
}
/* glTexStorage may not support all that is supported by glTexImage,
@@ -639,7 +643,35 @@ void vrend_check_texture_storage(struct vrend_format_table *table)
}
}
-bool vrend_check_framebuffer_mixed_color_attachements()
+void vrend_check_texture_multisample(struct vrend_format_table *table,
+ bool enable_storage)
+{
+ bool is_desktop_gl = epoxy_is_desktop_gl();
+ for (int i = 0; i < VIRGL_FORMAT_MAX_EXTENDED; i++) {
+ bool function_available =
+ (table[i].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE) ? enable_storage : is_desktop_gl;
+
+ if (table[i].internalformat != 0 &&
+ !(table[i].flags & VIRGL_TEXTURE_CAN_MULTISAMPLE) &&
+ function_available) {
+ GLuint tex_id;
+ glGenTextures(1, &tex_id);
+ glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, tex_id);
+ if (table[i].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE) {
+ glTexStorage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 2,
+ table[i].internalformat, 32, 32, GL_TRUE);
+ } else {
+ glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 2,
+ table[i].internalformat, 32, 32, GL_TRUE);
+ }
+ if (glGetError() == GL_NO_ERROR)
+ table[i].flags |= VIRGL_TEXTURE_CAN_MULTISAMPLE;
+ glDeleteTextures(1, &tex_id);
+ }
+ }
+}
+
+bool vrend_check_framebuffer_mixed_color_attachements(void)
{
GLuint tex_id[2];
GLuint fb_id;
@@ -836,20 +868,21 @@ static boolean format_compressed_compressed_copy_compatible(enum virgl_formats s
(src == VIRGL_FORMAT_ASTC_10x8 && dst == VIRGL_FORMAT_ASTC_10x8_SRGB) ||
(src == VIRGL_FORMAT_ASTC_10x10 && dst == VIRGL_FORMAT_ASTC_10x10_SRGB) ||
(src == VIRGL_FORMAT_ASTC_12x10 && dst == VIRGL_FORMAT_ASTC_12x10_SRGB) ||
- (src == VIRGL_FORMAT_ASTC_12x12 && dst == VIRGL_FORMAT_ASTC_12x12_SRGB))
+ (src == VIRGL_FORMAT_ASTC_12x12 && dst == VIRGL_FORMAT_ASTC_12x12_SRGB) ||
+ (src == VIRGL_FORMAT_ETC2_R11_UNORM && dst == VIRGL_FORMAT_ETC2_R11_SNORM) ||
+ (src == VIRGL_FORMAT_ETC2_RG11_UNORM && dst == VIRGL_FORMAT_ETC2_RG11_SNORM) ||
+ (src == VIRGL_FORMAT_ETC2_RGBA8 && dst == VIRGL_FORMAT_ETC2_SRGBA8) ||
+ (src == VIRGL_FORMAT_ETC2_RGB8A1 && dst == VIRGL_FORMAT_ETC2_SRGB8A1) ||
+ (src == VIRGL_FORMAT_ETC2_RGB8 && dst == VIRGL_FORMAT_ETC2_SRGB8))
return true;
}
if ((src == VIRGL_FORMAT_RGTC1_UNORM && dst == VIRGL_FORMAT_RGTC1_SNORM) ||
(src == VIRGL_FORMAT_RGTC2_UNORM && dst == VIRGL_FORMAT_RGTC2_SNORM) ||
(src == VIRGL_FORMAT_BPTC_RGBA_UNORM && dst == VIRGL_FORMAT_BPTC_SRGBA) ||
- (src == VIRGL_FORMAT_BPTC_RGB_FLOAT && dst == VIRGL_FORMAT_BPTC_RGB_UFLOAT) ||
- (src == VIRGL_FORMAT_ETC2_R11_UNORM && dst == VIRGL_FORMAT_ETC2_R11_SNORM) ||
- (src == VIRGL_FORMAT_ETC2_RG11_UNORM && dst == VIRGL_FORMAT_ETC2_RG11_SNORM) ||
- (src == VIRGL_FORMAT_ETC2_RGBA8 && dst == VIRGL_FORMAT_ETC2_SRGBA8) ||
- (src == VIRGL_FORMAT_ETC2_RGB8A1 && dst == VIRGL_FORMAT_ETC2_SRGB8A1) ||
- (src == VIRGL_FORMAT_ETC2_RGB8 && dst == VIRGL_FORMAT_ETC2_SRGB8))
- return true;
+ (src == VIRGL_FORMAT_BPTC_RGB_FLOAT && dst == VIRGL_FORMAT_BPTC_RGB_UFLOAT))
+ return true;
+
return false;
}
diff --git a/src/vrend_object.c b/src/vrend_object.c
index 7025cd9e..084e4ff5 100644
--- a/src/vrend_object.c
+++ b/src/vrend_object.c
@@ -61,7 +61,7 @@ static void free_object(void *value)
struct util_hash_table *vrend_object_init_ctx_table(void)
{
struct util_hash_table *ctx_hash;
- ctx_hash = util_hash_table_create(hash_func_u32, compare_func, free_object);
+ ctx_hash = util_hash_table_create(hash_func_u32, equal_func, free_object);
return ctx_hash;
}
@@ -82,7 +82,7 @@ struct util_hash_table *
vrend_ctx_resource_init_table(void)
{
return util_hash_table_create(hash_func_u32,
- compare_func,
+ equal_func,
vrend_ctx_resource_destroy_func);
}
diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
index 4b5f881f..4b7ee369 100644
--- a/src/vrend_renderer.c
+++ b/src/vrend_renderer.c
@@ -26,19 +26,18 @@
#endif
#include <unistd.h>
+#include <stdatomic.h>
#include <stdio.h>
#include <errno.h>
#include "pipe/p_shader_tokens.h"
-#include "pipe/p_context.h"
#include "pipe/p_defines.h"
-#include "pipe/p_screen.h"
#include "pipe/p_state.h"
#include "util/u_inlines.h"
#include "util/u_memory.h"
#include "util/u_dual_blend.h"
-#include "os/os_thread.h"
+#include "util/u_thread.h"
#include "util/u_format.h"
#include "tgsi/tgsi_parse.h"
@@ -46,8 +45,10 @@
#include "vrend_shader.h"
#include "vrend_renderer.h"
+#include "vrend_blitter.h"
#include "vrend_debug.h"
#include "vrend_winsys.h"
+#include "vrend_blitter.h"
#include "virgl_util.h"
@@ -63,6 +64,10 @@
#include <epoxy/glx.h>
#endif
+#ifdef ENABLE_VIDEO
+#include <vrend_video.h>
+#endif
+
/*
* VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both
* virgl_caps_v1 and are exactly the same.
@@ -87,7 +92,7 @@ struct vrend_fence {
*/
struct vrend_context *ctx;
uint32_t flags;
- void *fence_cookie;
+ uint64_t fence_id;
union {
GLsync glsyncobj;
@@ -106,8 +111,8 @@ struct vrend_query {
GLuint index;
GLuint gltype;
struct vrend_context *ctx;
+ int sub_ctx_id;
struct vrend_resource *res;
- uint64_t current_total;
bool fake_samples_passed;
};
@@ -140,7 +145,7 @@ enum features_id
feat_depth_clamp,
feat_draw_instance,
feat_dual_src_blend,
- feat_egl_image_external,
+ feat_egl_image,
feat_egl_image_storage,
feat_enhanced_layouts,
feat_fb_no_attach,
@@ -167,6 +172,7 @@ enum features_id
feat_multi_draw_indirect,
feat_nv_conditional_render,
feat_nv_prim_restart,
+ feat_shader_noperspective_interpolation,
feat_nvx_gpu_memory_info,
feat_polygon_offset_clamp,
feat_occlusion_query,
@@ -191,6 +197,7 @@ enum features_id
feat_texture_gather,
feat_texture_multisample,
feat_texture_query_lod,
+ feat_texture_shadow_lod,
feat_texture_srgb_decode,
feat_texture_storage,
feat_texture_view,
@@ -243,7 +250,7 @@ static const struct {
FEAT(dual_src_blend, 33, UNAVAIL, "GL_ARB_blend_func_extended", "GL_EXT_blend_func_extended" ),
FEAT(depth_clamp, 32, UNAVAIL, "GL_ARB_depth_clamp", "GL_EXT_depth_clamp", "GL_NV_depth_clamp"),
FEAT(enhanced_layouts, 44, UNAVAIL, "GL_ARB_enhanced_layouts"),
- FEAT(egl_image_external, UNAVAIL, UNAVAIL, "GL_OES_EGL_image_external"),
+ FEAT(egl_image, UNAVAIL, UNAVAIL, "GL_OES_EGL_image"),
FEAT(egl_image_storage, UNAVAIL, UNAVAIL, "GL_EXT_EGL_image_storage"),
FEAT(fb_no_attach, 43, 31, "GL_ARB_framebuffer_no_attachments" ),
FEAT(framebuffer_fetch, UNAVAIL, UNAVAIL, "GL_EXT_shader_framebuffer_fetch" ),
@@ -269,6 +276,7 @@ static const struct {
FEAT(multi_draw_indirect, 43, UNAVAIL, "GL_ARB_multi_draw_indirect", "GL_EXT_multi_draw_indirect" ),
FEAT(nv_conditional_render, UNAVAIL, UNAVAIL, "GL_NV_conditional_render" ),
FEAT(nv_prim_restart, UNAVAIL, UNAVAIL, "GL_NV_primitive_restart" ),
+ FEAT(shader_noperspective_interpolation, 31, UNAVAIL, "GL_NV_shader_noperspective_interpolation", "GL_EXT_gpu_shader4"),
FEAT(nvx_gpu_memory_info, UNAVAIL, UNAVAIL, "GL_NVX_gpu_memory_info" ),
FEAT(polygon_offset_clamp, 46, UNAVAIL, "GL_ARB_polygon_offset_clamp", "GL_EXT_polygon_offset_clamp"),
FEAT(occlusion_query, 15, UNAVAIL, "GL_ARB_occlusion_query"),
@@ -293,6 +301,7 @@ static const struct {
FEAT(texture_gather, 40, 31, "GL_ARB_texture_gather" ),
FEAT(texture_multisample, 32, 31, "GL_ARB_texture_multisample" ),
FEAT(texture_query_lod, 40, UNAVAIL, "GL_ARB_texture_query_lod", "GL_EXT_texture_query_lod"),
+ FEAT(texture_shadow_lod, UNAVAIL, UNAVAIL, "GL_EXT_texture_shadow_lod"),
FEAT(texture_srgb_decode, UNAVAIL, UNAVAIL, "GL_EXT_texture_sRGB_decode" ),
FEAT(texture_storage, 42, 30, "GL_ARB_texture_storage" ),
FEAT(texture_view, 43, UNAVAIL, "GL_ARB_texture_view", "GL_OES_texture_view", "GL_EXT_texture_view" ),
@@ -313,56 +322,66 @@ struct global_renderer_state {
struct vrend_context *current_ctx;
struct vrend_context *current_hw_ctx;
- /* fence_mutex should be locked before using the query list
- * if async fence callback are enabled
- */
struct list_head waiting_query_list;
struct list_head fence_list;
struct list_head fence_wait_list;
struct vrend_fence *fence_waiting;
- struct vrend_context *current_sync_thread_ctx;
int gl_major_ver;
int gl_minor_ver;
- pipe_mutex fence_mutex;
- pipe_thread sync_thread;
+ mtx_t fence_mutex;
+ thrd_t sync_thread;
virgl_gl_context sync_context;
- pipe_condvar fence_cond;
+ cnd_t fence_cond;
+
+ /* only used with async fence callback */
+ atomic_bool has_waiting_queries;
+ bool polling;
+ mtx_t poll_mutex;
+ cnd_t poll_cond;
float tess_factors[6];
int eventfd;
uint32_t max_draw_buffers;
+ uint32_t max_texture_buffer_size;
uint32_t max_texture_2d_size;
uint32_t max_texture_3d_size;
uint32_t max_texture_cube_size;
+ uint32_t max_shader_patch_varyings;
/* inferred GL caching type */
uint32_t inferred_gl_caching_type;
uint64_t features[feat_last / 64 + 1];
- uint32_t finishing : 1;
- uint32_t use_gles : 1;
- uint32_t use_core_profile : 1;
- uint32_t use_external_blob : 1;
- uint32_t use_integer : 1;
+ bool finishing : 1;
+ bool use_gles : 1;
+ bool use_core_profile : 1;
+ bool use_external_blob : 1;
+ bool use_integer : 1;
/* these appeared broken on at least one driver */
- uint32_t use_explicit_locations : 1;
+ bool use_explicit_locations : 1;
/* threaded sync */
- uint32_t stop_sync_thread : 1;
+ bool stop_sync_thread : 1;
/* async fence callback */
bool use_async_fence_cb : 1;
- /* Needed on GLES to inject a TCS */
- uint32_t bgra_srgb_emulation_loaded : 1;
#ifdef HAVE_EPOXY_EGL_H
- uint32_t use_egl_fence : 1;
+ bool use_egl_fence : 1;
#endif
};
+struct sysval_uniform_block {
+ GLfloat clipp[VIRGL_NUM_CLIP_PLANES][4];
+ GLuint stipple_pattern[VREND_POLYGON_STIPPLE_SIZE][4];
+ GLfloat winsys_adjust_y;
+ GLfloat alpha_ref_val;
+ GLfloat clip_plane_enabled;
+};
+
static struct global_renderer_state vrend_state;
static inline bool has_feature(enum features_id feature_id)
@@ -395,7 +414,11 @@ static inline void clear_feature(enum features_id feature_id)
struct vrend_linked_shader_program {
struct list_head head;
struct list_head sl[PIPE_SHADER_TYPES];
- GLuint id;
+ bool is_pipeline;
+ union {
+ GLuint program;
+ GLuint pipeline;
+ } id;
bool dual_src_linked;
struct vrend_shader *ss[PIPE_SHADER_TYPES];
@@ -412,14 +435,10 @@ struct vrend_linked_shader_program {
GLuint *attrib_locs;
uint32_t shadow_samp_mask[PIPE_SHADER_TYPES];
- GLuint vs_ws_adjust_loc;
- float viewport_neg_val;
-
- GLint fs_stipple_loc;
-
- GLint fs_alpha_ref_val_loc;
-
- GLuint clip_locs[8];
+ GLuint separate_virgl_block_id[PIPE_SHADER_TYPES];
+ GLint virgl_block_bind;
+ uint32_t sysvalue_data_cookie;
+ GLint ubo_sysval_buffer_id;
uint32_t images_used_mask[PIPE_SHADER_TYPES];
GLint *img_locs[PIPE_SHADER_TYPES];
@@ -441,8 +460,11 @@ struct vrend_shader {
struct vrend_strarray glsl_strings;
GLuint id;
+ GLuint program_id; /* only used for separable shaders */
+ GLuint last_pipeline_id;
uint32_t uid;
bool is_compiled;
+ bool is_linked; /* only used for separable shaders */
struct vrend_shader_key key;
struct list_head programs;
};
@@ -450,8 +472,7 @@ struct vrend_shader {
struct vrend_shader_selector {
struct pipe_reference reference;
- unsigned num_shaders;
- unsigned type;
+ enum pipe_shader_type type;
struct vrend_shader_info sinfo;
struct vrend_shader *current;
@@ -502,9 +523,9 @@ struct vrend_sampler_view {
GLenum target;
GLuint val0, val1;
GLint gl_swizzle[4];
- GLenum depth_texture_mode;
GLuint srgb_decode;
GLuint levels;
+ bool emulated_rect;
struct vrend_resource *texture;
};
@@ -512,6 +533,7 @@ struct vrend_image_view {
GLuint id;
GLenum access;
GLenum format;
+ uint32_t vformat;
union {
struct {
unsigned first_layer:16; /**< first layer to use for array textures */
@@ -551,6 +573,8 @@ struct vrend_vertex_element_array {
GLuint id;
uint32_t signed_int_bitmask;
uint32_t unsigned_int_bitmask;
+ uint32_t zyxw_bitmask;
+ struct vrend_sub_context *owning_sub;
};
struct vrend_constants {
@@ -650,7 +674,7 @@ struct vrend_sub_context {
int32_t n_samplers[PIPE_SHADER_TYPES];
uint32_t fb_id;
- int nr_cbufs, old_nr_cbufs;
+ int nr_cbufs;
struct vrend_surface *zsurf;
struct vrend_surface *surf[PIPE_MAX_COLOR_BUFS];
@@ -672,7 +696,7 @@ struct vrend_sub_context {
bool viewport_is_negative;
/* this is set if the contents of the FBO look upside down when viewed
with 0,0 as the bottom corner */
- bool inverted_fbo_content;
+ bool fbo_origin_upper_left;
GLuint blit_fb_ids[2];
@@ -685,7 +709,6 @@ struct vrend_sub_context {
bool stencil_test_enabled;
bool framebuffer_srgb_enabled;
- GLuint program_id;
int last_shader_idx;
GLint draw_indirect_buffer;
@@ -713,12 +736,16 @@ struct vrend_sub_context {
uint32_t abo_used_mask;
struct vrend_context_tweaks tweaks;
uint8_t swizzle_output_rgb_to_bgr;
- uint8_t convert_linear_to_srgb_on_write;
+ uint8_t needs_manual_srgb_encode_bitmask;
int fake_occlusion_query_samples_passed_multiplier;
int prim_mode;
bool drawing;
struct vrend_context *parent;
+ struct sysval_uniform_block sysvalue_data;
+ uint32_t sysvalue_data_cookie;
+ uint32_t current_program_id;
+ uint32_t current_pipeline_id;
};
struct vrend_untyped_resource {
@@ -732,6 +759,10 @@ struct vrend_context {
struct list_head sub_ctxs;
struct list_head vrend_resources;
+#ifdef ENABLE_VIDEO
+ struct vrend_video_context *video;
+#endif
+
struct vrend_sub_context *sub;
struct vrend_sub_context *sub0;
@@ -739,9 +770,6 @@ struct vrend_context {
/* has this ctx gotten an error? */
bool in_error;
bool ctx_switch_pending;
- bool pstip_inited;
-
- GLuint pstipple_tex_id;
enum virgl_ctx_errors last_error;
@@ -769,8 +797,6 @@ struct vrend_context {
struct list_head untyped_resources;
struct virgl_resource *untyped_resource_cache;
- struct list_head active_nontimer_query_list;
-
struct vrend_shader_cfg shader_cfg;
unsigned debug_flags;
@@ -779,7 +805,6 @@ struct vrend_context {
void *fence_retire_data;
};
-static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle);
static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause);
static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx);
static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx);
@@ -827,6 +852,11 @@ static inline bool vrend_format_can_readback(enum virgl_formats format)
return tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_READBACK;
}
+static inline bool vrend_format_can_multisample(enum virgl_formats format)
+{
+ return tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_MULTISAMPLE;
+}
+
static inline bool vrend_format_can_render(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VIRGL_BIND_RENDER_TARGET;
@@ -882,25 +912,7 @@ bool vrend_format_is_bgra(enum virgl_formats format) {
format == VIRGL_FORMAT_B8G8R8A8_SRGB);
}
-static bool vrend_resource_is_emulated_bgra(struct vrend_resource *res)
-{
- /* On all hosts, BGR* resources are swizzled on upload and stored with RGB*
- * internal format. On GLES hosts, we must perform that swizzle ourselves.
- * However, for externally-stored resources such as EGL images and
- * GBM-allocated dma-bufs, the pixel data is expected to be stored with BGR*
- * byte-ordering. Emulation is added during texture sampling, blitting, and
- * rendering to correct the red/blue color inversion caused by the mismatch
- * between storage expectation and the RGB* internal format given to the host
- * GL[ES] API.
- */
- if (vrend_format_is_bgra(res->base.format) &&
- (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE) || res->egl_image ||
- has_bit(res->storage_bits, VREND_STORAGE_GBM_BUFFER) || res->gbm_bo))
- return true;
- return false;
-}
-
-static bool vrend_resource_has_24bpp_internal_format(struct vrend_resource *res)
+static bool vrend_resource_has_24bpp_internal_format(const struct vrend_resource *res)
{
/* Some shared resources imported to guest mesa as EGL images occupy 24bpp instead of more common 32bpp. */
return (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE) &&
@@ -908,6 +920,48 @@ static bool vrend_resource_has_24bpp_internal_format(struct vrend_resource *res)
res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM));
}
+static bool vrend_resource_supports_view(const struct vrend_resource *res,
+ UNUSED enum virgl_formats view_format)
+{
+ /* Texture views on eglimage-backed bgr* resources are not supported and
+ * lead to unexpected format interpretation since internally allocated
+ * bgr* resources use GL_RGBA8 internal format, while eglimage-backed
+ * resources use BGRA8, but GL lacks an equivalent internalformat enum.
+ *
+ * For views that don't require colorspace conversion, we can add swizzles
+ * instead. For views that do require colorspace conversion, manual srgb
+ * decode/encode is required. */
+ return !(vrend_format_is_bgra(res->base.format) &&
+ has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE)) &&
+ !vrend_resource_has_24bpp_internal_format(res);
+}
+
+static inline bool
+vrend_resource_needs_redblue_swizzle(struct vrend_resource *res,
+ enum virgl_formats view_format)
+{
+ return !vrend_resource_supports_view(res, view_format) &&
+ vrend_format_is_bgra(res->base.format) ^ vrend_format_is_bgra(view_format);
+}
+
+static inline bool
+vrend_resource_needs_srgb_decode(struct vrend_resource *res,
+ enum virgl_formats view_format)
+{
+ return !vrend_resource_supports_view(res, view_format) &&
+ util_format_is_srgb(res->base.format) &&
+ !util_format_is_srgb(view_format);
+}
+
+static inline bool
+vrend_resource_needs_srgb_encode(struct vrend_resource *res,
+ enum virgl_formats view_format)
+{
+ return !vrend_resource_supports_view(res, view_format) &&
+ !util_format_is_srgb(res->base.format) &&
+ util_format_is_srgb(view_format);
+}
+
static bool vrend_blit_needs_swizzle(enum virgl_formats src,
enum virgl_formats dst)
{
@@ -918,7 +972,7 @@ static bool vrend_blit_needs_swizzle(enum virgl_formats src,
return false;
}
-static inline const char *pipe_shader_to_prefix(int shader_type)
+static inline const char *pipe_shader_to_prefix(enum pipe_shader_type shader_type)
{
switch (shader_type) {
case PIPE_SHADER_VERTEX: return "vs";
@@ -970,6 +1024,9 @@ static const char *vrend_ctx_error_strings[] = {
[VIRGL_ERROR_CTX_ILLEGAL_FORMAT] = "Illegal format ID",
[VIRGL_ERROR_CTX_ILLEGAL_SAMPLER_VIEW_TARGET] = "Illegat target for sampler view",
[VIRGL_ERROR_CTX_TRANSFER_IOV_BOUNDS] = "IOV data size exceeds resource capacity",
+ [VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND]= "Dual source blend not supported",
+ [VIRGL_ERROR_CTX_UNSUPPORTED_FUNCTION] = "Unsupported host function called",
+ [VIRGL_ERROR_CTX_ILLEGAL_PROGRAM_PIPELINE] = "Illegal shader program pipeline",
};
void vrend_report_context_error_internal(const char *fname, struct vrend_context *ctx,
@@ -1015,7 +1072,6 @@ static void __report_core_warn(const char *fname, struct vrend_context *ctx,
#define GLES_WARN_POINT_SIZE 4
#define GLES_WARN_SEAMLESS_CUBE_MAP 5
#define GLES_WARN_LOD_BIAS 6
-#define GLES_WARN_TEXTURE_RECT 7
#define GLES_WARN_OFFSET_LINE 8
#define GLES_WARN_OFFSET_POINT 9
//#define GLES_WARN_ free slot 10
@@ -1027,7 +1083,7 @@ static void __report_core_warn(const char *fname, struct vrend_context *ctx,
#define GLES_WARN_TIMESTAMP 16
#define GLES_WARN_IMPLICIT_MSAA_SURFACE 17
-MAYBE_UNUSED
+ASSERTED
static const char *vrend_gles_warn_strings[] = {
[GLES_WARN_NONE] = "None",
[GLES_WARN_STIPPLE] = "Stipple",
@@ -1036,7 +1092,6 @@ static const char *vrend_gles_warn_strings[] = {
[GLES_WARN_POINT_SIZE] = "Point Size",
[GLES_WARN_SEAMLESS_CUBE_MAP] = "Seamless Cube Map",
[GLES_WARN_LOD_BIAS] = "Lod Bias",
- [GLES_WARN_TEXTURE_RECT] = "Texture Rect",
[GLES_WARN_OFFSET_LINE] = "Offset Line",
[GLES_WARN_OFFSET_POINT] = "Offset Point",
[GLES_WARN_FLATSHADE_FIRST] = "Flatshade First",
@@ -1048,17 +1103,17 @@ static const char *vrend_gles_warn_strings[] = {
[GLES_WARN_IMPLICIT_MSAA_SURFACE] = "Implicit MSAA Surface",
};
-static void __report_gles_warn(MAYBE_UNUSED const char *fname,
- MAYBE_UNUSED struct vrend_context *ctx,
- MAYBE_UNUSED enum virgl_ctx_errors error)
+static void __report_gles_warn(ASSERTED const char *fname,
+ ASSERTED struct vrend_context *ctx,
+ ASSERTED enum virgl_ctx_errors error)
{
VREND_DEBUG(dbg_gles, ctx, "%s: GLES violation - %s\n", fname, vrend_gles_warn_strings[error]);
}
#define report_gles_warn(ctx, error) __report_gles_warn(__func__, ctx, error)
-static void __report_gles_missing_func(MAYBE_UNUSED const char *fname,
- MAYBE_UNUSED struct vrend_context *ctx,
- MAYBE_UNUSED const char *missf)
+static void __report_gles_missing_func(ASSERTED const char *fname,
+ ASSERTED struct vrend_context *ctx,
+ ASSERTED const char *missf)
{
VREND_DEBUG(dbg_gles, ctx, "%s: GLES function %s is missing\n", fname, missf);
}
@@ -1145,6 +1200,9 @@ vrend_so_target_reference(struct vrend_so_target **ptr, struct vrend_so_target *
static void vrend_shader_dump(struct vrend_shader *shader)
{
const char *prefix = pipe_shader_to_prefix(shader->sel->type);
+ if (shader->sel->tmp_buf)
+ vrend_printf("%s: %d TGSI:\n%s\n", prefix, shader->id, shader->sel->tmp_buf);
+
vrend_printf("%s: %d GLSL:\n", prefix, shader->id);
strarray_dump_with_line_numbers(&shader->glsl_strings);
vrend_printf("\n");
@@ -1158,6 +1216,8 @@ static void vrend_shader_destroy(struct vrend_shader *shader)
vrend_destroy_program(ent);
}
+ if (shader->sel->sinfo.separable_program)
+ glDeleteProgram(shader->program_id);
glDeleteShader(shader->id);
strarray_free(&shader->glsl_strings, true);
free(shader);
@@ -1219,6 +1279,14 @@ static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx,
vrend_shader_dump(shader);
return false;
}
+
+ if (shader->sel->sinfo.separable_program) {
+ shader->program_id = glCreateProgram();
+ shader->last_pipeline_id = 0xffffffff;
+ glProgramParameteri(shader->program_id, GL_PROGRAM_SEPARABLE, GL_TRUE);
+ glAttachShader(shader->program_id, shader->id);
+ }
+
shader->is_compiled = true;
return true;
}
@@ -1265,25 +1333,36 @@ static bool vrend_is_timer_query(GLenum gltype)
gltype == GL_TIME_ELAPSED;
}
-static void vrend_use_program(struct vrend_sub_context *sub_ctx, GLuint program_id)
+static inline void use_program(struct vrend_sub_context *sub_ctx, uint32_t id)
{
- if (sub_ctx->program_id != program_id) {
- glUseProgram(program_id);
- sub_ctx->program_id = program_id;
- }
+ if (sub_ctx->current_program_id != id) {
+ sub_ctx->current_program_id = id;
+ glUseProgram(id);
+ }
}
-static void vrend_init_pstipple_texture(struct vrend_context *ctx)
+static inline void bind_pipeline(struct vrend_sub_context *sub_ctx, uint32_t id)
{
- glGenTextures(1, &ctx->pstipple_tex_id);
- glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, 32, 32, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ if (sub_ctx->current_pipeline_id != id) {
+ sub_ctx->current_pipeline_id = id;
+ glBindProgramPipeline(id);
+ }
+}
- ctx->pstip_inited = true;
+static void vrend_use_program(struct vrend_sub_context *sub_ctx,
+ struct vrend_linked_shader_program *program)
+{
+ GLuint id = !program ? 0 :
+ program->is_pipeline ? program->id.pipeline :
+ program->id.program;
+ if (program && program->is_pipeline) {
+ use_program(sub_ctx, 0);
+ bind_pipeline(sub_ctx, id);
+ } else {
+ if (has_feature(feat_separate_shader_objects))
+ bind_pipeline(sub_ctx, 0);
+ use_program(sub_ctx, id);
+ }
}
static void vrend_depth_test_enable(struct vrend_context *ctx, bool depth_test_enable)
@@ -1323,7 +1402,7 @@ static void vrend_stencil_test_enable(struct vrend_sub_context *sub_ctx, bool st
}
}
-MAYBE_UNUSED
+ASSERTED
static void dump_stream_out(struct pipe_stream_output_info *so)
{
unsigned i;
@@ -1371,7 +1450,7 @@ static char *get_skip_str(int *skip_val)
return start_skip;
}
-static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_sub_context *sub_ctx,
+static void set_stream_out_varyings(ASSERTED struct vrend_sub_context *sub_ctx,
int prog_id,
struct vrend_shader_info *sinfo)
{
@@ -1431,8 +1510,28 @@ static void set_stream_out_varyings(MAYBE_UNUSED struct vrend_sub_context *sub_c
free(varyings[i]);
}
+static inline int
+vrend_get_uniform_location(struct vrend_linked_shader_program *sprog,
+ char *name, int shader_type)
+{
+ assert(!sprog->is_pipeline || sprog->ss[shader_type]->sel->sinfo.separable_program);
+
+ GLint id = sprog->is_pipeline ?
+ sprog->ss[shader_type]->program_id :
+ sprog->id.program;
+
+ return glGetUniformLocation(id, name);
+}
+
+static inline void
+vrend_set_active_pipeline_stage(struct vrend_linked_shader_program *sprog, int shader_type)
+{
+ if (sprog->is_pipeline && sprog->ss[shader_type])
+ glActiveShaderProgram(sprog->id.pipeline, sprog->ss[shader_type]->program_id);
+}
+
static int bind_sampler_locs(struct vrend_linked_shader_program *sprog,
- int shader_type, int next_sampler_id)
+ enum pipe_shader_type shader_type, int next_sampler_id)
{
const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo;
@@ -1457,13 +1556,17 @@ static int bind_sampler_locs(struct vrend_linked_shader_program *sprog,
} else
snprintf(name, 32, "%ssamp%d", prefix, i);
- glUniform1i(glGetUniformLocation(sprog->id, name), next_sampler_id++);
+ vrend_set_active_pipeline_stage(sprog, shader_type);
+ glUniform1i(vrend_get_uniform_location(sprog, name, shader_type),
+ next_sampler_id++);
if (sinfo->shadow_samp_mask & (1 << i)) {
snprintf(name, 32, "%sshadmask%d", prefix, i);
- sprog->shadow_samp_mask_locs[shader_type][sampler_index] = glGetUniformLocation(sprog->id, name);
+ sprog->shadow_samp_mask_locs[shader_type][sampler_index] =
+ vrend_get_uniform_location(sprog, name, shader_type);
snprintf(name, 32, "%sshadadd%d", prefix, i);
- sprog->shadow_samp_add_locs[shader_type][sampler_index] = glGetUniformLocation(sprog->id, name);
+ sprog->shadow_samp_add_locs[shader_type][sampler_index] =
+ vrend_get_uniform_location(sprog, name, shader_type);
}
sampler_index++;
}
@@ -1478,22 +1581,46 @@ static int bind_sampler_locs(struct vrend_linked_shader_program *sprog,
}
static void bind_const_locs(struct vrend_linked_shader_program *sprog,
- int shader_type)
+ enum pipe_shader_type shader_type)
{
if (sprog->ss[shader_type]->sel->sinfo.num_consts) {
char name[32];
snprintf(name, 32, "%sconst0", pipe_shader_to_prefix(shader_type));
- sprog->const_location[shader_type] = glGetUniformLocation(sprog->id, name);
+ sprog->const_location[shader_type] = vrend_get_uniform_location(sprog, name,
+ shader_type);
} else
- sprog->const_location[shader_type] = -1;
+ sprog->const_location[shader_type] = -1;
}
-static int bind_ubo_locs(struct vrend_linked_shader_program *sprog,
- int shader_type, int next_ubo_id)
+static inline GLuint
+vrend_get_uniform_block_index(struct vrend_linked_shader_program *sprog,
+ char *name, int shader_type)
+{
+ assert(!sprog->is_pipeline || sprog->ss[shader_type]->sel->sinfo.separable_program);
+
+ GLuint id = sprog->is_pipeline ?
+ sprog->ss[shader_type]->program_id :
+ sprog->id.program;
+
+ return glGetUniformBlockIndex(id, name);
+}
+
+static inline void
+vrend_uniform_block_binding(struct vrend_linked_shader_program *sprog,
+ int shader_type, int loc, int value)
{
- if (!has_feature(feat_ubo))
- return next_ubo_id;
+ assert(!sprog->is_pipeline || sprog->ss[shader_type]->sel->sinfo.separable_program);
+ GLint id = sprog->is_pipeline ?
+ sprog->ss[shader_type]->program_id :
+ sprog->id.program;
+
+ glUniformBlockBinding(id, loc, value);
+}
+
+static int bind_ubo_locs(struct vrend_linked_shader_program *sprog,
+ enum pipe_shader_type shader_type, int next_ubo_id)
+{
const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo;
if (sinfo->ubo_used_mask) {
const char *prefix = pipe_shader_to_prefix(shader_type);
@@ -1507,8 +1634,8 @@ static int bind_ubo_locs(struct vrend_linked_shader_program *sprog,
else
snprintf(name, 32, "%subo%d", prefix, ubo_idx);
- GLuint loc = glGetUniformBlockIndex(sprog->id, name);
- glUniformBlockBinding(sprog->id, loc, next_ubo_id++);
+ GLuint loc = vrend_get_uniform_block_index(sprog, name, shader_type);
+ vrend_uniform_block_binding(sprog, shader_type, loc, next_ubo_id++);
}
}
@@ -1517,8 +1644,77 @@ static int bind_ubo_locs(struct vrend_linked_shader_program *sprog,
return next_ubo_id;
}
+static void bind_virgl_block_loc(struct vrend_linked_shader_program *sprog,
+ enum pipe_shader_type shader_type,
+ int virgl_block_ubo_id)
+{
+ sprog->separate_virgl_block_id[shader_type] =
+ vrend_get_uniform_block_index(sprog, "VirglBlock", shader_type);
+
+ if (sprog->separate_virgl_block_id[shader_type] != GL_INVALID_INDEX) {
+ bool created_virgl_block_buffer = false;
+
+ if (sprog->virgl_block_bind == -1) {
+ sprog->virgl_block_bind = virgl_block_ubo_id;
+ if (sprog->ubo_sysval_buffer_id == -1) {
+ glGenBuffers(1, (GLuint *) &sprog->ubo_sysval_buffer_id);
+ created_virgl_block_buffer = true;
+ }
+ }
+
+ vrend_set_active_pipeline_stage(sprog, shader_type);
+ vrend_uniform_block_binding(sprog, shader_type,
+ sprog->separate_virgl_block_id[shader_type],
+ sprog->virgl_block_bind);
+
+ GLint virgl_block_size;
+ int prog_id = sprog->is_pipeline ? sprog->ss[shader_type]->program_id :
+ sprog->id.program;
+ glGetActiveUniformBlockiv(prog_id, sprog->separate_virgl_block_id[shader_type],
+ GL_UNIFORM_BLOCK_DATA_SIZE, &virgl_block_size);
+ assert((size_t) virgl_block_size >= sizeof(struct sysval_uniform_block));
+
+ if (created_virgl_block_buffer) {
+ glBindBuffer(GL_UNIFORM_BUFFER, sprog->ubo_sysval_buffer_id);
+ glBufferData(GL_UNIFORM_BUFFER, virgl_block_size, NULL, GL_DYNAMIC_DRAW);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+ }
+ }
+}
+
+static void rebind_ubo_and_sampler_locs(struct vrend_linked_shader_program *sprog,
+ enum pipe_shader_type last_shader)
+{
+ int next_sampler_id = 0;
+ int next_ubo_id = 0;
+
+ for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX;
+ shader_type <= last_shader;
+ shader_type++) {
+ if (!sprog->ss[shader_type])
+ continue;
+
+ next_sampler_id = bind_sampler_locs(sprog, shader_type, next_sampler_id);
+ next_ubo_id = bind_ubo_locs(sprog, shader_type, next_ubo_id);
+
+ if (sprog->is_pipeline)
+ sprog->ss[shader_type]->last_pipeline_id = sprog->id.pipeline;
+ }
+
+ /* Now `next_ubo_id` is the last ubo id, which is used for the VirglBlock. */
+ sprog->virgl_block_bind = -1;
+ for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX;
+ shader_type <= last_shader;
+ shader_type++) {
+ if (!sprog->ss[shader_type])
+ continue;
+
+ bind_virgl_block_loc(sprog, shader_type, next_ubo_id);
+ }
+}
+
static void bind_ssbo_locs(struct vrend_linked_shader_program *sprog,
- int shader_type)
+ enum pipe_shader_type shader_type)
{
if (!has_feature(feat_ssbo))
return;
@@ -1526,7 +1722,7 @@ static void bind_ssbo_locs(struct vrend_linked_shader_program *sprog,
}
static void bind_image_locs(struct vrend_linked_shader_program *sprog,
- int shader_type)
+ enum pipe_shader_type shader_type)
{
int i;
char name[32];
@@ -1553,7 +1749,8 @@ static void bind_image_locs(struct vrend_linked_shader_program *sprog,
struct vrend_array *img_array = &sinfo->image_arrays[i];
for (int j = 0; j < img_array->array_size; j++) {
snprintf(name, 32, "%simg%d[%d]", prefix, img_array->first, j);
- sprog->img_locs[shader_type][img_array->first + j] = glGetUniformLocation(sprog->id, name);
+ sprog->img_locs[shader_type][img_array->first + j] =
+ vrend_get_uniform_location(sprog, name, shader_type);
if (sprog->img_locs[shader_type][img_array->first + j] == -1)
vrend_printf( "failed to get uniform loc for image %s\n", name);
}
@@ -1562,7 +1759,8 @@ static void bind_image_locs(struct vrend_linked_shader_program *sprog,
for (i = 0; i < nsamp; i++) {
if (mask & (1 << i)) {
snprintf(name, 32, "%simg%d", prefix, i);
- sprog->img_locs[shader_type][i] = glGetUniformLocation(sprog->id, name);
+ sprog->img_locs[shader_type][i] =
+ vrend_get_uniform_location(sprog, name, shader_type);
if (sprog->img_locs[shader_type][i] == -1)
vrend_printf( "failed to get uniform loc for image %s\n", name);
} else {
@@ -1573,22 +1771,90 @@ static void bind_image_locs(struct vrend_linked_shader_program *sprog,
sprog->images_used_mask[shader_type] = mask;
}
+static bool vrend_link(GLuint id)
+{
+ GLint lret;
+ glLinkProgram(id);
+ glGetProgramiv(id, GL_LINK_STATUS, &lret);
+ if (lret == GL_FALSE) {
+ char infolog[65536];
+ int len;
+ glGetProgramInfoLog(id, 65536, &len, infolog);
+ vrend_printf("Error linking program:\n%s\n", infolog);
+ return false;
+ }
+ return true;
+}
+
+static bool vrend_link_separable_shader(struct vrend_sub_context *sub_ctx,
+ struct vrend_shader *shader, int type)
+{
+ int i;
+ char name[64];
+
+ if (type == PIPE_SHADER_VERTEX || type == PIPE_SHADER_GEOMETRY ||
+ type == PIPE_SHADER_TESS_EVAL)
+ set_stream_out_varyings(sub_ctx, shader->program_id, &shader->sel->sinfo);
+
+ if (type == PIPE_SHADER_FRAGMENT && shader->sel->sinfo.num_outputs > 1) {
+ bool dual_src_linked = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
+ if (dual_src_linked) {
+ if (has_feature(feat_dual_src_blend)) {
+ if (!vrend_state.use_gles) {
+ glBindFragDataLocationIndexed(shader->program_id, 0, 0, "fsout_c0");
+ glBindFragDataLocationIndexed(shader->program_id, 0, 1, "fsout_c1");
+ } else {
+ glBindFragDataLocationIndexedEXT(shader->program_id, 0, 0, "fsout_c0");
+ glBindFragDataLocationIndexedEXT(shader->program_id, 0, 1, "fsout_c1");
+ }
+ } else {
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
+ }
+ } else if (!vrend_state.use_gles && has_feature(feat_dual_src_blend)) {
+ /* On GLES without dual source blending we emit the layout directly in the shader
+ * so there is no need to define the binding here */
+ for (int i = 0; i < shader->sel->sinfo.num_outputs; ++i) {
+ if (shader->sel->sinfo.fs_output_layout[i] >= 0) {
+ char buf[64];
+ snprintf(buf, sizeof(buf), "fsout_c%d",
+ shader->sel->sinfo.fs_output_layout[i]);
+ glBindFragDataLocationIndexed(shader->program_id,
+ shader->sel->sinfo.fs_output_layout[i],
+ 0, buf);
+ }
+ }
+ }
+ }
+
+ if (type == PIPE_SHADER_VERTEX && has_feature(feat_gles31_vertex_attrib_binding)) {
+ uint32_t mask = shader->sel->sinfo.attrib_input_mask;
+ while (mask) {
+ i = u_bit_scan(&mask);
+ snprintf(name, 32, "in_%d", i);
+ glBindAttribLocation(shader->program_id, i, name);
+ }
+ }
+
+ shader->is_linked = vrend_link(shader->program_id);
+
+ if (!shader->is_linked) {
+ /* dump shaders */
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
+ vrend_shader_dump(shader);
+ }
+
+ return shader->is_linked;
+}
+
static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_context *ctx,
struct vrend_shader *cs)
{
struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
GLuint prog_id;
- GLint lret;
prog_id = glCreateProgram();
glAttachShader(prog_id, cs->id);
- glLinkProgram(prog_id);
- glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
- if (lret == GL_FALSE) {
- char infolog[65536];
- int len;
- glGetProgramInfoLog(prog_id, 65536, &len, infolog);
- vrend_printf("got error linking\n%s\n", infolog);
+ if (!vrend_link(prog_id)) {
/* dump shaders */
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_shader_dump(cs);
@@ -1599,10 +1865,10 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
sprog->ss[PIPE_SHADER_COMPUTE] = cs;
list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs);
- sprog->id = prog_id;
+ sprog->id.program = prog_id;
list_addtail(&sprog->head, &ctx->sub->cs_programs);
- vrend_use_program(ctx->sub, prog_id);
+ vrend_use_program(ctx->sub, sprog);
bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE, 0);
bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE, 0);
@@ -1612,54 +1878,92 @@ static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_co
return sprog;
}
+static inline bool
+vrend_link_stage(struct vrend_shader *stage) {
+ if (!stage->is_linked)
+ stage->is_linked = vrend_link(stage->program_id);
+ return stage->is_linked;
+}
+
static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_context *sub_ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs,
struct vrend_shader *tcs,
- struct vrend_shader *tes)
+ struct vrend_shader *tes,
+ bool separable)
{
struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
char name[64];
int i;
- GLuint prog_id;
- GLint lret;
- int last_shader;
+ GLuint prog_id = 0;
+ GLuint pipeline_id = 0;
+ GLuint vs_id, fs_id, gs_id, tes_id = 0;
+ enum pipe_shader_type last_shader;
if (!sprog)
return NULL;
- prog_id = glCreateProgram();
- glAttachShader(prog_id, vs->id);
- if (tcs && tcs->id > 0)
- glAttachShader(prog_id, tcs->id);
- if (tes && tes->id > 0)
- glAttachShader(prog_id, tes->id);
+ if (separable) {
+ glGenProgramPipelines(1, &pipeline_id);
+
+ vs_id = vs->program_id;
+ fs_id = fs->program_id;
+ if (gs)
+ gs_id = gs->program_id;
+ if (tes)
+ tes_id = tes->program_id;
+ } else { /* inseparable programs */
+ prog_id = glCreateProgram();
+ glAttachShader(prog_id, vs->id);
+ if (tcs && tcs->id > 0)
+ glAttachShader(prog_id, tcs->id);
+ if (tes && tes->id > 0)
+ glAttachShader(prog_id, tes->id);
+ if (gs && gs->id > 0)
+ glAttachShader(prog_id, gs->id);
+ glAttachShader(prog_id, fs->id);
+
+ /* For the non-separable codepath (the usual path), all these shader stages are
+ * contained inside a single program. */
+ vs_id = prog_id;
+ fs_id = prog_id;
+ if (gs)
+ gs_id = prog_id;
+ if (tes)
+ tes_id = prog_id;
+ }
if (gs) {
- if (gs->id > 0)
- glAttachShader(prog_id, gs->id);
- set_stream_out_varyings(sub_ctx, prog_id, &gs->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, gs_id, &gs->sel->sinfo);
} else if (tes)
- set_stream_out_varyings(sub_ctx, prog_id, &tes->sel->sinfo);
+ set_stream_out_varyings(sub_ctx, tes_id, &tes->sel->sinfo);
else
- set_stream_out_varyings(sub_ctx, prog_id, &vs->sel->sinfo);
- glAttachShader(prog_id, fs->id);
+ set_stream_out_varyings(sub_ctx, vs_id, &vs->sel->sinfo);
if (fs->sel->sinfo.num_outputs > 1) {
- if (util_blend_state_is_dual(&sub_ctx->blend_state, 0)) {
+ sprog->dual_src_linked = util_blend_state_is_dual(&sub_ctx->blend_state, 0);
+ if (sprog->dual_src_linked) {
if (has_feature(feat_dual_src_blend)) {
- glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
- glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
+ if (!vrend_state.use_gles) {
+ glBindFragDataLocationIndexed(fs_id, 0, 0, "fsout_c0");
+ glBindFragDataLocationIndexed(fs_id, 0, 1, "fsout_c1");
+ } else {
+ glBindFragDataLocationIndexedEXT(fs_id, 0, 0, "fsout_c0");
+ glBindFragDataLocationIndexedEXT(fs_id, 0, 1, "fsout_c1");
+ }
} else {
vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0);
}
- sprog->dual_src_linked = true;
- } else {
- if (has_feature(feat_dual_src_blend)) {
- glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
- glBindFragDataLocationIndexed(prog_id, 1, 0, "fsout_c1");
+ } else if (!vrend_state.use_gles && has_feature(feat_dual_src_blend)) {
+ /* On GLES without dual source blending we emit the layout directly in the shader
+ * so there is no need to define the binding here */
+ for (int i = 0; i < fs->sel->sinfo.num_outputs; ++i) {
+ if (fs->sel->sinfo.fs_output_layout[i] >= 0) {
+ char buf[64];
+ snprintf(buf, sizeof(buf), "fsout_c%d", fs->sel->sinfo.fs_output_layout[i]);
+ glBindFragDataLocationIndexed(fs_id, fs->sel->sinfo.fs_output_layout[i], 0, buf);
+ }
}
- sprog->dual_src_linked = false;
}
} else
sprog->dual_src_linked = false;
@@ -1669,29 +1973,58 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_c
while (mask) {
i = u_bit_scan(&mask);
snprintf(name, 32, "in_%d", i);
- glBindAttribLocation(prog_id, i, name);
+ glBindAttribLocation(vs_id, i, name);
}
}
- glLinkProgram(prog_id);
+ bool link_success;
+ if (separable) { /* separable programs */
+ link_success = vrend_link_stage(vs);
+ link_success &= vrend_link_stage(fs);
+ if (gs) link_success &= vrend_link_stage(gs);
+ if (tcs) link_success &= vrend_link_stage(tcs);
+ if (tes) link_success &= vrend_link_stage(tes);
+ } else { /* non-separable programs */
+ link_success = vrend_link(prog_id);
+ }
+
+ if (!link_success) {
+ if (separable) {
+ glDeleteProgramPipelines(1, &pipeline_id);
+ } else {
+ glDeleteProgram(prog_id);
+ }
+
+ free(sprog);
- glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
- if (lret == GL_FALSE) {
- char infolog[65536];
- int len;
- glGetProgramInfoLog(prog_id, 65536, &len, infolog);
- vrend_printf("got error linking\n%s\n", infolog);
/* dump shaders */
vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_shader_dump(vs);
+ if (tcs)
+ vrend_shader_dump(tcs);
+ if (tes)
+ vrend_shader_dump(tes);
if (gs)
vrend_shader_dump(gs);
vrend_shader_dump(fs);
- glDeleteProgram(prog_id);
- free(sprog);
return NULL;
}
+ if (separable) {
+ glUseProgramStages(pipeline_id, GL_VERTEX_SHADER_BIT, vs->program_id);
+ if (tcs) glUseProgramStages(pipeline_id, GL_TESS_CONTROL_SHADER_BIT, tcs->program_id);
+ if (tes) glUseProgramStages(pipeline_id, GL_TESS_EVALUATION_SHADER_BIT, tes->program_id);
+ if (gs) glUseProgramStages(pipeline_id, GL_GEOMETRY_SHADER_BIT, gs->program_id);
+ glUseProgramStages(pipeline_id, GL_FRAGMENT_SHADER_BIT, fs->program_id);
+
+ glValidateProgramPipeline(pipeline_id);
+ GLint validation_status;
+ glGetProgramPipelineiv(pipeline_id, GL_VALIDATE_STATUS, &validation_status);
+ if (!validation_status) {
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_PROGRAM_PIPELINE, 0);
+ }
+ }
+
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
sprog->vs_fs_key = (((uint64_t)fs->id) << 32) | (vs->id & ~VREND_PROGRAM_NQUEUE_MASK) |
@@ -1711,33 +2044,31 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_c
list_add(&sprog->sl[PIPE_SHADER_TESS_EVAL], &tes->programs);
last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
- sprog->id = prog_id;
+
+ sprog->is_pipeline = separable;
+ if (sprog->is_pipeline)
+ sprog->id.pipeline = pipeline_id;
+ else
+ sprog->id.program = prog_id;
list_addtail(&sprog->head, &sub_ctx->gl_programs[vs->id & VREND_PROGRAM_NQUEUE_MASK]);
- if (fs->key.pstipple_tex)
- sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
- else
- sprog->fs_stipple_loc = -1;
- if (vrend_shader_needs_alpha_func(&fs->key))
- sprog->fs_alpha_ref_val_loc = glGetUniformLocation(prog_id, "alpha_ref_val");
- else
- sprog->fs_alpha_ref_val_loc = -1;
- sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust_y");
+ sprog->virgl_block_bind = -1;
+ sprog->ubo_sysval_buffer_id = -1;
- vrend_use_program(sub_ctx, prog_id);
+ vrend_use_program(sub_ctx, sprog);
- int next_ubo_id = 0, next_sampler_id = 0;
- for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= last_shader; shader_type++) {
+ for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX;
+ shader_type <= last_shader;
+ shader_type++) {
if (!sprog->ss[shader_type])
continue;
- next_sampler_id = bind_sampler_locs(sprog, shader_type, next_sampler_id);
bind_const_locs(sprog, shader_type);
- next_ubo_id = bind_ubo_locs(sprog, shader_type, next_ubo_id);
bind_image_locs(sprog, shader_type);
bind_ssbo_locs(sprog, shader_type);
}
+ rebind_ubo_and_sampler_locs(sprog, last_shader);
if (!has_feature(feat_gles31_vertex_attrib_binding)) {
if (vs->sel->sinfo.num_inputs) {
@@ -1745,19 +2076,13 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_c
if (sprog->attrib_locs) {
for (i = 0; i < vs->sel->sinfo.num_inputs; i++) {
snprintf(name, 32, "in_%d", i);
- sprog->attrib_locs[i] = glGetAttribLocation(prog_id, name);
+ sprog->attrib_locs[i] = glGetAttribLocation(vs_id, name);
}
}
} else
sprog->attrib_locs = NULL;
}
- if (vs->var_sinfo.num_ucp) {
- for (i = 0; i < vs->var_sinfo.num_ucp; i++) {
- snprintf(name, 32, "clipp[%d]", i);
- sprog->clip_locs[i] = glGetUniformLocation(prog_id, name);
- }
- }
return sprog;
}
@@ -1818,7 +2143,15 @@ static void vrend_destroy_program(struct vrend_linked_shader_program *ent)
if (ent->ref_context && ent->ref_context->prog == ent)
ent->ref_context->prog = NULL;
- glDeleteProgram(ent->id);
+ if (ent->ubo_sysval_buffer_id != -1) {
+ glDeleteBuffers(1, (GLuint *) &ent->ubo_sysval_buffer_id);
+ }
+
+ if (ent->is_pipeline)
+ glDeleteProgramPipelines(1, &ent->id.pipeline);
+ else
+ glDeleteProgram(ent->id.program);
+
list_del(&ent->head);
for (i = PIPE_SHADER_VERTEX; i <= PIPE_SHADER_COMPUTE; i++) {
@@ -1913,14 +2246,19 @@ int vrend_create_surface(struct vrend_context *ctx,
int first_layer = surf->val1 & 0xffff;
int last_layer = (surf->val1 >> 16) & 0xffff;
- if ((first_layer != last_layer &&
- (first_layer != 0 || (last_layer != (int)util_max_layer(&res->base, surf->val0)))) ||
- surf->format != res->base.format) {
+ bool needs_view = first_layer != last_layer &&
+ (first_layer != 0 || (last_layer != (int)util_max_layer(&res->base, surf->val0)));
+ if (!needs_view && surf->format != res->base.format)
+ needs_view = true;
+
+ if (needs_view && vrend_resource_supports_view(res, surf->format)) {
GLenum target = res->target;
GLenum internalformat = tex_conv_table[format].internalformat;
- if (vrend_resource_has_24bpp_internal_format(res))
- internalformat = GL_RGB8;
+ if (target == GL_TEXTURE_CUBE_MAP && first_layer == last_layer) {
+ first_layer = 0;
+ last_layer = 5;
+ }
VREND_DEBUG(dbg_tex, ctx, "Create texture view from %s for %s\n",
util_format_name(res->base.format),
@@ -1928,13 +2266,17 @@ int vrend_create_surface(struct vrend_context *ctx,
glGenTextures(1, &surf->id);
if (vrend_state.use_gles) {
- if (target == GL_TEXTURE_RECTANGLE_NV ||
- target == GL_TEXTURE_1D)
+ if (target == GL_TEXTURE_1D)
target = GL_TEXTURE_2D;
else if (target == GL_TEXTURE_1D_ARRAY)
target = GL_TEXTURE_2D_ARRAY;
}
+ if (target == GL_TEXTURE_RECTANGLE_NV &&
+ !(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TARGET_RECTANGLE)) {
+ target = GL_TEXTURE_2D;
+ }
+
glTextureView(surf->id, target, res->id, internalformat,
0, res->base.last_level + 1,
first_layer, last_layer - first_layer + 1);
@@ -2004,6 +2346,9 @@ static void vrend_destroy_vertex_elements_object(void *obj_ptr)
{
struct vrend_vertex_element_array *v = obj_ptr;
+ if (v == v->owning_sub->ve)
+ v->owning_sub->ve = NULL;
+
if (has_feature(feat_gles31_vertex_attrib_binding)) {
glDeleteVertexArrays(1, &v->id);
}
@@ -2038,14 +2383,14 @@ static GLuint convert_wrap(int wrap)
}
}
-static inline GLenum convert_mag_filter(unsigned int filter)
+static inline GLenum convert_mag_filter(enum pipe_tex_filter filter)
{
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST;
return GL_LINEAR;
}
-static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_filter)
+static inline GLenum convert_min_filter(enum pipe_tex_filter filter, enum pipe_tex_mipfilter mip_filter)
{
if (mip_filter == PIPE_TEX_MIPFILTER_NONE)
return convert_mag_filter(filter);
@@ -2115,7 +2460,9 @@ int vrend_create_sampler_state(struct vrend_context *ctx,
}
apply_sampler_border_color(state->ids[i], templ->border_color.ui);
- glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT, i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT);
+ if (has_feature(feat_texture_srgb_decode))
+ glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT,
+ i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT);
}
}
ret_handle = vrend_renderer_object_insert(ctx, state, handle,
@@ -2129,7 +2476,7 @@ int vrend_create_sampler_state(struct vrend_context *ctx,
return 0;
}
-static inline GLenum to_gl_swizzle(int swizzle)
+static inline GLenum to_gl_swizzle(enum pipe_swizzle swizzle)
{
switch (swizzle) {
case PIPE_SWIZZLE_RED: return GL_RED;
@@ -2144,6 +2491,21 @@ static inline GLenum to_gl_swizzle(int swizzle)
}
}
+static inline enum pipe_swizzle to_pipe_swizzle(GLenum swizzle)
+{
+ switch (swizzle) {
+ case GL_RED: return PIPE_SWIZZLE_RED;
+ case GL_GREEN: return PIPE_SWIZZLE_GREEN;
+ case GL_BLUE: return PIPE_SWIZZLE_BLUE;
+ case GL_ALPHA: return PIPE_SWIZZLE_ALPHA;
+ case GL_ZERO: return PIPE_SWIZZLE_ZERO;
+ case GL_ONE: return PIPE_SWIZZLE_ONE;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
int vrend_create_sampler_view(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle, uint32_t format,
@@ -2183,15 +2545,20 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
view->target = tgsitargettogltarget(pipe_target, res->base.nr_samples);
- /* Work around TEXTURE_RECTANGLE and TEXTURE_1D missing on GLES */
+ /* Work around TEXTURE_1D missing on GLES */
if (vrend_state.use_gles) {
- if (view->target == GL_TEXTURE_RECTANGLE_NV ||
- view->target == GL_TEXTURE_1D)
+ if (view->target == GL_TEXTURE_1D)
view->target = GL_TEXTURE_2D;
else if (view->target == GL_TEXTURE_1D_ARRAY)
view->target = GL_TEXTURE_2D_ARRAY;
}
+ if (view->target == GL_TEXTURE_RECTANGLE_NV &&
+ !(tex_conv_table[view->format].flags & VIRGL_TEXTURE_CAN_TARGET_RECTANGLE)) {
+ view->emulated_rect = true;
+ view->target = GL_TEXTURE_2D;
+ }
+
view->val0 = val0;
view->val1 = val1;
@@ -2235,15 +2602,7 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]];
}
- if (vrend_resource_is_emulated_bgra(view->texture)) {
- uint8_t temp = swizzle[0];
- swizzle[0] = swizzle[2];
- swizzle[2] = temp;
- VREND_DEBUG(dbg_bgra, ctx, "swizzling sampler channels on %s resource: (%d %d %d %d)\n",
- util_format_name(view->texture->base.format),
- swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
- }
- for (unsigned i = 0; i < 4; ++i)
+ for (enum pipe_swizzle i = 0; i < 4; ++i)
view->gl_swizzle[i] = to_gl_swizzle(swizzle[i]);
if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) {
@@ -2285,6 +2644,24 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
int max_level = (view->val1 >> 8) & 0xff;
view->levels = (max_level - base_level) + 1;
+ /* texture views for eglimage-backed bgr* resources are usually not
+ * supported since they cause unintended red/blue channel-swapping.
+ * Since we have control over the swizzle parameters of the sampler, we
+ * can just compensate in this case by swapping the red/blue channels
+ * back, and still benefit from automatic srgb decoding.
+ * If the red/blue swap is intended, we just let it happen and don't
+ * need to explicit change to the sampler's swizzle parameters. */
+ if (!vrend_resource_supports_view(view->texture, view->format) &&
+ vrend_format_is_bgra(view->format)) {
+ VREND_DEBUG(dbg_tex, ctx, "texture view with red/blue swizzle created for EGL-backed texture sampler"
+ " (format: %s; view: %s)\n",
+ util_format_name(view->texture->base.format),
+ util_format_name(view->format));
+ GLint temp = view->gl_swizzle[0];
+ view->gl_swizzle[0] = view->gl_swizzle[2];
+ view->gl_swizzle[2] = temp;
+ }
+
glTextureView(view->id, view->target, view->texture->id, internalformat,
base_level, view->levels,
base_layer, max_layer - base_layer + 1);
@@ -2294,10 +2671,7 @@ int vrend_create_sampler_view(struct vrend_context *ctx,
if (util_format_is_depth_or_stencil(view->format)) {
if (vrend_state.use_core_profile == false) {
/* setting depth texture mode is deprecated in core profile */
- if (view->depth_texture_mode != GL_RED) {
- glTexParameteri(view->target, GL_DEPTH_TEXTURE_MODE, GL_RED);
- view->depth_texture_mode = GL_RED;
- }
+ glTexParameteri(view->target, GL_DEPTH_TEXTURE_MODE, GL_RED);
}
if (has_feature(feat_stencil_texturing)) {
const struct util_format_description *desc = util_format_description(view->format);
@@ -2375,9 +2749,9 @@ static void vrend_framebuffer_texture_2d(struct vrend_resource *res,
}
static
-void debug_texture(MAYBE_UNUSED const char *f, const struct vrend_resource *gt)
+void debug_texture(ASSERTED const char *f, const struct vrend_resource *gt)
{
- MAYBE_UNUSED const struct pipe_resource *pr = &gt->base;
+ ASSERTED const struct pipe_resource *pr = &gt->base;
#define PRINT_TARGET(X) case X: vrend_printf( #X); break
VREND_DEBUG_EXT(dbg_tex, NULL,
vrend_printf("%s: ", f);
@@ -2565,31 +2939,31 @@ static void vrend_hw_emit_framebuffer_state(struct vrend_sub_context *sub_ctx)
}
sub_ctx->swizzle_output_rgb_to_bgr = 0;
- sub_ctx->convert_linear_to_srgb_on_write = 0;
+ sub_ctx->needs_manual_srgb_encode_bitmask = 0;
for (int i = 0; i < sub_ctx->nr_cbufs; i++) {
- if (sub_ctx->surf[i]) {
- struct vrend_surface *surf = sub_ctx->surf[i];
- if (vrend_resource_is_emulated_bgra(surf->texture)) {
- VREND_DEBUG(dbg_bgra, sub_ctx->parent, "swizzling output for 0x%x (surface format is %s; resource format is %s)\n",
- i, util_format_name(surf->format), util_format_name(surf->texture->base.format));
- sub_ctx->swizzle_output_rgb_to_bgr |= 1 << i;
- }
+ struct vrend_surface *surf = sub_ctx->surf[i];
+ if (!surf)
+ continue;
- /* [R8G8B8|B8G8R8]X8_UNORM formatted resources imported to mesa as EGL images occupy 24bpp instead of
- * more common 32bpp (with an ignored alpha channel). GL_RGB8 internal format must be specified when
- * interacting with these textures in the host driver. Unfortunately, GL_SRGB8 is not guaranteed to
- * be color-renderable on either GL or GLES, and is typically not supported. Thus, rendering to such
- * surfaces by using an SRGB texture view will have no colorspace conversion effects.
- * To work around this, manual colorspace conversion is used instead in the fragment shader and
- * during glClearColor() setting.
- */
- if (vrend_resource_has_24bpp_internal_format(surf->texture) && util_format_is_srgb(surf->format)) {
- VREND_DEBUG(dbg_tex, sub_ctx->parent,
- "manually converting linear->srgb for EGL-backed framebuffer color attachment 0x%x"
- " (surface format is %s; resource format is %s)\n",
- i, util_format_name(surf->format), util_format_name(surf->texture->base.format));
- sub_ctx->convert_linear_to_srgb_on_write |= 1 << i;
- }
+ /* glTextureView() is not applied to eglimage-backed surfaces, because it
+ * causes unintended format interpretation errors. But a swizzle may still
+ * be necessary, e.g. for rgb* views on bgr* resources. Ensure this
+ * happens by adding a shader swizzle to the final write of such surfaces.
+ */
+ if (vrend_resource_needs_redblue_swizzle(surf->texture, surf->format))
+ sub_ctx->swizzle_output_rgb_to_bgr |= 1 << i;
+
+ /* glTextureView() on eglimage-backed bgr* textures for is not supported.
+ * To work around this for colorspace conversion, views are avoided
+ * manual colorspace conversion is instead injected in the fragment
+ * shader writing to such surfaces and during glClearColor(). */
+ if (util_format_is_srgb(surf->format) &&
+ !vrend_resource_supports_view(surf->texture, surf->format)) {
+ VREND_DEBUG(dbg_tex, sub_ctx->parent,
+ "manually converting linear->srgb for EGL-backed framebuffer color attachment 0x%x"
+ " (surface format is %s; resource format is %s)\n",
+ i, util_format_name(surf->format), util_format_name(surf->texture->base.format));
+ sub_ctx->needs_manual_srgb_encode_bitmask |= 1 << i;
}
}
@@ -2605,7 +2979,7 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
int old_num;
GLenum status;
GLint new_height = -1;
- bool new_ibf = false;
+ bool new_fbo_origin_upper_left = false;
struct vrend_sub_context *sub_ctx = ctx->sub;
@@ -2627,7 +3001,6 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
old_num = sub_ctx->nr_cbufs;
sub_ctx->nr_cbufs = nr_cbufs;
- sub_ctx->old_nr_cbufs = old_num;
for (i = 0; i < (int)nr_cbufs; i++) {
if (surf_handle[i] != 0) {
@@ -2655,10 +3028,10 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
/* find a buffer to set fb_height from */
if (sub_ctx->nr_cbufs == 0 && !sub_ctx->zsurf) {
new_height = 0;
- new_ibf = false;
+ new_fbo_origin_upper_left = false;
} else if (sub_ctx->nr_cbufs == 0) {
new_height = u_minify(sub_ctx->zsurf->texture->base.height0, sub_ctx->zsurf->val0);
- new_ibf = sub_ctx->zsurf->texture->y_0_top ? true : false;
+ new_fbo_origin_upper_left = sub_ctx->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
@@ -2673,13 +3046,14 @@ void vrend_set_framebuffer_state(struct vrend_context *ctx,
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
- new_ibf = surf->texture->y_0_top ? true : false;
+ new_fbo_origin_upper_left = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
- if (sub_ctx->fb_height != (uint32_t)new_height || sub_ctx->inverted_fbo_content != new_ibf) {
+ if (sub_ctx->fb_height != (uint32_t)new_height ||
+ sub_ctx->fbo_origin_upper_left != new_fbo_origin_upper_left) {
sub_ctx->fb_height = new_height;
- sub_ctx->inverted_fbo_content = new_ibf;
+ sub_ctx->fbo_origin_upper_left = new_fbo_origin_upper_left;
sub_ctx->viewport_state_dirty = (1 << 0);
}
}
@@ -2771,8 +3145,12 @@ void vrend_set_viewport_states(struct vrend_context *ctx,
}
if (idx == 0) {
- if (ctx->sub->viewport_is_negative != viewport_is_negative)
+ if (ctx->sub->viewport_is_negative != viewport_is_negative) {
ctx->sub->viewport_is_negative = viewport_is_negative;
+ ctx->sub->sysvalue_data.winsys_adjust_y =
+ viewport_is_negative ? -1.f : 1.f;
+ ctx->sub->sysvalue_data_cookie++;
+ }
}
}
}
@@ -2867,12 +3245,13 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
v->elements[i].type = type;
if (desc->channel[0].normalized)
v->elements[i].norm = GL_TRUE;
- if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z)
- v->elements[i].nr_chan = GL_BGRA;
- else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
+ if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
v->elements[i].nr_chan = 3;
else
v->elements[i].nr_chan = desc->nr_channels;
+
+ if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z)
+ v->zyxw_bitmask |= 1 << i;
}
if (has_feature(feat_gles31_vertex_attrib_binding)) {
@@ -2880,15 +3259,16 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
glBindVertexArray(v->id);
for (i = 0; i < num_elements; i++) {
struct vrend_vertex_element *ve = &v->elements[i];
+ GLint size = !vrend_state.use_gles && (v->zyxw_bitmask & (1 << i)) ? GL_BGRA : ve->nr_chan;
if (util_format_is_pure_integer(ve->base.src_format)) {
UPDATE_INT_SIGN_MASK(ve->base.src_format, i,
v->signed_int_bitmask,
v->unsigned_int_bitmask);
- glVertexAttribIFormat(i, ve->nr_chan, ve->type, ve->base.src_offset);
+ glVertexAttribIFormat(i, size, ve->type, ve->base.src_offset);
}
else
- glVertexAttribFormat(i, ve->nr_chan, ve->type, ve->norm, ve->base.src_offset);
+ glVertexAttribFormat(i, size, ve->type, ve->norm, ve->base.src_offset);
glVertexAttribBinding(i, ve->base.vertex_buffer_index);
glVertexBindingDivisor(i, ve->base.instance_divisor);
glEnableVertexAttribArray(i);
@@ -2900,6 +3280,7 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
FREE(v);
return ENOMEM;
}
+ v->owning_sub = ctx->sub;
return 0;
}
@@ -2955,9 +3336,6 @@ void vrend_set_uniform_buffer(struct vrend_context *ctx,
{
struct vrend_resource *res;
- if (!has_feature(feat_ubo))
- return;
-
struct pipe_constant_buffer *cbs = &ctx->sub->cbs[shader][index];
const uint32_t mask = 1u << index;
@@ -3065,6 +3443,48 @@ void vrend_set_num_vbo(struct vrend_context *ctx,
vrend_set_num_vbo_sub(ctx->sub, num_vbo);
}
+static GLenum vrend_get_arb_format(enum virgl_formats format)
+{
+ switch (format) {
+ case VIRGL_FORMAT_A8_UNORM: return GL_R8;
+ case VIRGL_FORMAT_A8_SINT: return GL_R8I;
+ case VIRGL_FORMAT_A8_UINT: return GL_R8UI;
+ case VIRGL_FORMAT_L8_UNORM: return GL_R8;
+ case VIRGL_FORMAT_L8_SINT: return GL_R8I;
+ case VIRGL_FORMAT_L8_UINT: return GL_R8UI;
+ case VIRGL_FORMAT_L16_UNORM: return GL_R16F;
+ case VIRGL_FORMAT_L16_SINT: return GL_R16I;
+ case VIRGL_FORMAT_L16_UINT: return GL_R16UI;
+ case VIRGL_FORMAT_L16_FLOAT: return GL_R16F;
+ case VIRGL_FORMAT_L32_SINT: return GL_R32F;
+ case VIRGL_FORMAT_L32_UINT: return GL_R32I;
+ case VIRGL_FORMAT_L32_FLOAT: return GL_R32UI;
+ case VIRGL_FORMAT_L8A8_UNORM: return GL_RG8;
+ case VIRGL_FORMAT_L8A8_SINT: return GL_RG8I;
+ case VIRGL_FORMAT_L8A8_UINT: return GL_RG8UI;
+ case VIRGL_FORMAT_L16A16_UNORM: return GL_RG16;
+ case VIRGL_FORMAT_L16A16_SINT: return GL_RG16I;
+ case VIRGL_FORMAT_L16A16_UINT: return GL_RG16UI;
+ case VIRGL_FORMAT_L16A16_FLOAT: return GL_RG16F;
+ case VIRGL_FORMAT_L32A32_FLOAT: return GL_RG32F;
+ case VIRGL_FORMAT_L32A32_SINT: return GL_RG32I;
+ case VIRGL_FORMAT_L32A32_UINT: return GL_RG32UI;
+ case VIRGL_FORMAT_I8_UNORM: return GL_R8;
+ case VIRGL_FORMAT_I8_SINT: return GL_R8I;
+ case VIRGL_FORMAT_I8_UINT: return GL_R8UI;
+ case VIRGL_FORMAT_I16_UNORM: return GL_R16;
+ case VIRGL_FORMAT_I16_SINT: return GL_R16I;
+ case VIRGL_FORMAT_I16_UINT: return GL_R16UI;
+ case VIRGL_FORMAT_I16_FLOAT: return GL_R16F;
+ case VIRGL_FORMAT_I32_FLOAT: return GL_R32F;
+ case VIRGL_FORMAT_I32_SINT: return GL_R32I;
+ case VIRGL_FORMAT_I32_UINT: return GL_R32UI;
+ default:
+ vrend_printf("Texture format %s unsupported for texture buffers\n", util_format_name(format));
+ return GL_R8;
+ }
+}
+
void vrend_set_single_sampler_view(struct vrend_context *ctx,
uint32_t shader_type,
uint32_t index,
@@ -3098,10 +3518,7 @@ void vrend_set_single_sampler_view(struct vrend_context *ctx,
if (util_format_is_depth_or_stencil(view->format)) {
if (vrend_state.use_core_profile == false) {
/* setting depth texture mode is deprecated in core profile */
- if (view->depth_texture_mode != GL_RED) {
- glTexParameteri(view->texture->target, GL_DEPTH_TEXTURE_MODE, GL_RED);
- view->depth_texture_mode = GL_RED;
- }
+ glTexParameteri(view->texture->target, GL_DEPTH_TEXTURE_MODE, GL_RED);
}
if (has_feature(feat_stencil_texturing)) {
const struct util_format_description *desc = util_format_description(view->format);
@@ -3155,11 +3572,19 @@ void vrend_set_single_sampler_view(struct vrend_context *ctx,
glBindTexture(GL_TEXTURE_BUFFER, view->texture->tbo_tex_id);
internalformat = tex_conv_table[view->format].internalformat;
+
+ if (internalformat == GL_NONE ||
+ (vrend_state.use_gles && internalformat == GL_ALPHA8)) {
+ internalformat = vrend_get_arb_format(view->format);
+ }
+
if (has_feature(feat_texture_buffer_range)) {
unsigned offset = view->val0;
unsigned size = view->val1 - view->val0 + 1;
int blsize = util_format_get_blocksize(view->format);
+ if (offset + size > vrend_state.max_texture_buffer_size)
+ size = vrend_state.max_texture_buffer_size - offset;
offset *= blsize;
size *= blsize;
glTexBufferRange(GL_TEXTURE_BUFFER, internalformat, view->texture->id, offset, size);
@@ -3205,6 +3630,7 @@ void vrend_set_single_image_view(struct vrend_context *ctx,
return;
}
iview->texture = res;
+ iview->vformat = format;
iview->format = tex_conv_table[format].internalformat;
iview->access = access;
iview->u.buf.offset = layer_offset;
@@ -3354,10 +3780,10 @@ static inline void vrend_sync_shader_io(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
struct vrend_shader_key *key)
{
- unsigned type = sel->type;
+ enum pipe_shader_type type = sel->type;
- int prev_type = (type != PIPE_SHADER_VERTEX) ?
- PIPE_SHADER_VERTEX : -1;
+ enum pipe_shader_type prev_type =
+ (type != PIPE_SHADER_VERTEX) ? PIPE_SHADER_VERTEX : PIPE_SHADER_INVALID;
/* Gallium sends and binds the shaders in the reverse order, so if an
* old shader is still bound we should ignore the "previous" (as in
@@ -3384,22 +3810,33 @@ static inline void vrend_sync_shader_io(struct vrend_sub_context *sub_ctx,
}
}
- struct vrend_shader_selector *prev = sub_ctx->shaders[prev_type];
- if (prev_type != -1 && prev) {
- key->input = prev->sinfo.out;
- key->force_invariant_inputs = prev->sinfo.invariant_outputs;
- memcpy(key->prev_stage_generic_and_patch_outputs_layout,
- prev->sinfo.generic_outputs_layout,
- prev->sinfo.out.num_generic_and_patch * sizeof (struct vrend_layout_info));
+ struct vrend_shader_selector *prev = prev_type != PIPE_SHADER_INVALID ? sub_ctx->shaders[prev_type] : NULL;
+
+ if (prev) {
+ if (!prev->sinfo.separable_program || !sel->sinfo.separable_program) {
+ key->require_input_arrays = prev->sinfo.has_output_arrays;
+ key->in_generic_expected_mask = prev->sinfo.out_generic_emitted_mask;
+ key->in_texcoord_expected_mask = prev->sinfo.out_texcoord_emitted_mask;
+ key->in_patch_expected_mask = prev->sinfo.out_patch_emitted_mask;
+ key->in_arrays = prev->sinfo.output_arrays;
+
+ memcpy(key->force_invariant_inputs, prev->sinfo.invariant_outputs, 4 * sizeof(uint32_t));
+ }
+
+ key->num_in_clip = sub_ctx->shaders[prev_type]->current->var_sinfo.num_out_clip;
+ key->num_in_cull = sub_ctx->shaders[prev_type]->current->var_sinfo.num_out_cull;
+
+ if (vrend_state.use_gles && type == PIPE_SHADER_FRAGMENT)
+ key->fs.available_color_in_bits = sub_ctx->shaders[prev_type]->current->var_sinfo.legacy_color_bits;
}
- int next_type = -1;
+ enum pipe_shader_type next_type = PIPE_SHADER_INVALID;
if (type == PIPE_SHADER_FRAGMENT) {
- key->fs.invert_origin = !sub_ctx->inverted_fbo_content;
+ key->fs.lower_left_origin = !sub_ctx->fbo_origin_upper_left;
key->fs.swizzle_output_rgb_to_bgr = sub_ctx->swizzle_output_rgb_to_bgr;
- key->fs.convert_linear_to_srgb_on_write = sub_ctx->convert_linear_to_srgb_on_write;
+ key->fs.needs_manual_srgb_encode_bitmask = sub_ctx->needs_manual_srgb_encode_bitmask;
if (vrend_state.use_gles && can_emulate_logicop(sub_ctx->blend_state.logicop_func)) {
key->fs.logicop_enabled = sub_ctx->blend_state.logicop_enable;
key->fs.logicop_func = sub_ctx->blend_state.logicop_func;
@@ -3407,34 +3844,29 @@ static inline void vrend_sync_shader_io(struct vrend_sub_context *sub_ctx,
int fs_prim_mode = sub_ctx->prim_mode; // inherit draw-call's mode
// Only use coord_replace if frag shader receives GL_POINTS
- switch (prev_type) {
+ if (prev) {
+ switch (prev->type) {
case PIPE_SHADER_TESS_EVAL:
- if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
+ if (prev->sinfo.tes_point_mode)
fs_prim_mode = PIPE_PRIM_POINTS;
break;
case PIPE_SHADER_GEOMETRY:
- fs_prim_mode = sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
+ fs_prim_mode = prev->sinfo.gs_out_prim;
break;
+ default:
+ break;
+ }
}
+
key->fs.prim_is_points = (fs_prim_mode == PIPE_PRIM_POINTS);
key->fs.coord_replace = sub_ctx->rs_state.point_quad_rasterization
&& key->fs.prim_is_points
? sub_ctx->rs_state.sprite_coord_enable
: 0x0;
- if (prev_type != -1 && sub_ctx->shaders[prev_type]) {
- key->num_clip = sub_ctx->shaders[prev_type]->current->var_sinfo.num_clip;
- key->num_cull = sub_ctx->shaders[prev_type]->current->var_sinfo.num_cull;
- }
-
} else {
- if (sub_ctx->shaders[PIPE_SHADER_FRAGMENT]) {
- struct vrend_shader *fs =
- sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current;
- key->compiled_fs_uid = fs->uid;
- key->fs_info = &fs->var_sinfo.fs_info;
+ if (sub_ctx->shaders[PIPE_SHADER_FRAGMENT])
next_type = PIPE_SHADER_FRAGMENT;
- }
}
switch (type) {
@@ -3460,42 +3892,126 @@ static inline void vrend_sync_shader_io(struct vrend_sub_context *sub_ctx,
break;
}
- if (next_type != -1 && sub_ctx->shaders[next_type]) {
- key->output = sub_ctx->shaders[next_type]->sinfo.in;
+ if (next_type != PIPE_SHADER_INVALID && sub_ctx->shaders[next_type]) {
+ if (!sub_ctx->shaders[next_type]->sinfo.separable_program ||
+ !sel->sinfo.separable_program) {
+ struct vrend_shader_selector *next = sub_ctx->shaders[next_type];
- /* FS gets the clip/cull info in the key from this shader, so
- * we can avoid re-translating this shader by not updating the
- * info in the key */
- if (next_type != PIPE_SHADER_FRAGMENT) {
- key->num_clip = sub_ctx->shaders[next_type]->current->var_sinfo.num_clip;
- key->num_cull = sub_ctx->shaders[next_type]->current->var_sinfo.num_cull;
- }
+ key->use_pervertex_in = next->sinfo.use_pervertex_in;
+ key->require_output_arrays = next->sinfo.has_input_arrays;
+ key->out_generic_expected_mask = next->sinfo.in_generic_emitted_mask;
+ key->out_texcoord_expected_mask = next->sinfo.in_texcoord_emitted_mask;
- if (type == PIPE_SHADER_VERTEX && next_type == PIPE_SHADER_FRAGMENT) {
- if (sub_ctx->shaders[type]) {
- uint32_t fog_input = sub_ctx->shaders[next_type]->sinfo.fog_input_mask;
- uint32_t fog_output = sub_ctx->shaders[type]->sinfo.fog_output_mask;
+ /* FS gets the clip/cull info in the key from this shader, so
+ * we can avoid re-translating this shader by not updating the
+ * info in the key */
+ if (next_type != PIPE_SHADER_FRAGMENT) {
+ key->num_out_clip = sub_ctx->shaders[next_type]->current->var_sinfo.num_in_clip;
+ key->num_out_cull = sub_ctx->shaders[next_type]->current->var_sinfo.num_in_cull;
+ }
- //We only want to issue the fixup for inputs not fed by the outputs of the
- //previous stage
- key->vs.fog_fixup_mask = (fog_input ^ fog_output) & fog_input;
+ if (next_type == PIPE_SHADER_FRAGMENT) {
+ struct vrend_shader *fs =
+ sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current;
+ key->fs_info = fs->var_sinfo.fs_info;
+ if (type == PIPE_SHADER_VERTEX && sub_ctx->shaders[type]) {
+ uint32_t fog_input = sub_ctx->shaders[next_type]->sinfo.fog_input_mask;
+ uint32_t fog_output = sub_ctx->shaders[type]->sinfo.fog_output_mask;
+
+ // We only want to issue the fixup for inputs not fed by
+ // the outputs of the previous stage
+ key->vs.fog_fixup_mask = (fog_input ^ fog_output) & fog_input;
+ }
}
}
}
}
+static bool vrend_get_swizzle(struct vrend_sampler_view *view,
+ GLint swizzle[4])
+{
+ static const GLint OOOR[] = {GL_ZERO, GL_ZERO, GL_ZERO, GL_RED};
+ static const GLint RRR1[] = {GL_RED, GL_RED, GL_RED, GL_ONE};
+ static const GLint RRRG[] = {GL_RED, GL_RED, GL_RED, GL_GREEN};
+ static const GLint RRRR[] = {GL_RED, GL_RED, GL_RED, GL_RED};
+
+ switch (view->format) {
+ case VIRGL_FORMAT_A8_UNORM:
+ case VIRGL_FORMAT_A8_SINT:
+ case VIRGL_FORMAT_A8_UINT:
+ case VIRGL_FORMAT_A16_UNORM:
+ case VIRGL_FORMAT_A16_SINT:
+ case VIRGL_FORMAT_A16_UINT:
+ case VIRGL_FORMAT_A16_FLOAT:
+ case VIRGL_FORMAT_A32_SINT:
+ case VIRGL_FORMAT_A32_UINT:
+ case VIRGL_FORMAT_A32_FLOAT:
+ memcpy(swizzle, OOOR, 4 * sizeof(GLuint));
+ return true;
+ case VIRGL_FORMAT_L8_UNORM:
+ case VIRGL_FORMAT_L8_SINT:
+ case VIRGL_FORMAT_L8_UINT:
+ case VIRGL_FORMAT_L16_UNORM:
+ case VIRGL_FORMAT_L16_SINT:
+ case VIRGL_FORMAT_L16_UINT:
+ case VIRGL_FORMAT_L16_FLOAT:
+ case VIRGL_FORMAT_L32_SINT:
+ case VIRGL_FORMAT_L32_UINT:
+ case VIRGL_FORMAT_L32_FLOAT:
+ memcpy(swizzle, RRR1, 4 * sizeof(GLuint));
+ return true;
+ case VIRGL_FORMAT_L8A8_UNORM:
+ case VIRGL_FORMAT_L8A8_SINT:
+ case VIRGL_FORMAT_L8A8_UINT:
+ case VIRGL_FORMAT_L16A16_UNORM:
+ case VIRGL_FORMAT_L16A16_SINT:
+ case VIRGL_FORMAT_L16A16_UINT:
+ case VIRGL_FORMAT_L16A16_FLOAT:
+ case VIRGL_FORMAT_L32A32_FLOAT:
+ case VIRGL_FORMAT_L32A32_SINT:
+ case VIRGL_FORMAT_L32A32_UINT:
+ memcpy(swizzle, RRRG, 4 * sizeof(GLuint));
+ return true;
+ case VIRGL_FORMAT_I8_UNORM:
+ case VIRGL_FORMAT_I8_SINT:
+ case VIRGL_FORMAT_I8_UINT:
+ case VIRGL_FORMAT_I16_UNORM:
+ case VIRGL_FORMAT_I16_SINT:
+ case VIRGL_FORMAT_I16_UINT:
+ case VIRGL_FORMAT_I16_FLOAT:
+ case VIRGL_FORMAT_I32_FLOAT:
+ case VIRGL_FORMAT_I32_SINT:
+ case VIRGL_FORMAT_I32_UINT:
+ memcpy(swizzle, RRRR, 4 * sizeof(GLuint));
+ return true;
+ default:
+ if (tex_conv_table[view->format].flags & VIRGL_TEXTURE_NEED_SWIZZLE) {
+ swizzle[0] = tex_conv_table[view->format].swizzle[0];
+ swizzle[1] = tex_conv_table[view->format].swizzle[1];
+ swizzle[2] = tex_conv_table[view->format].swizzle[2];
+ swizzle[3] = tex_conv_table[view->format].swizzle[3];
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
+
+
static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx,
struct vrend_shader_selector *sel,
struct vrend_shader_key *key)
{
- unsigned type = sel->type;
+ enum pipe_shader_type type = sel->type;
if (vrend_state.use_core_profile) {
int i;
bool add_alpha_test = true;
- // Only use integer info when drawing to avoid stale info.
- if (vrend_state.use_integer && sub_ctx->drawing &&
+ /* Only use integer info when drawing to avoid stale info.
+ * Since we can get here from link_shaders before actually drawing anything,
+ * we may have no vertex element array */
+ if (vrend_state.use_integer && sub_ctx->drawing && sub_ctx->ve &&
type == PIPE_SHADER_VERTEX) {
key->vs.attrib_signed_int_bitmask = sub_ctx->ve->signed_int_bitmask;
key->vs.attrib_unsigned_int_bitmask = sub_ctx->ve->unsigned_int_bitmask;
@@ -3507,12 +4023,15 @@ static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx,
if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format))
key->fs.cbufs_are_a8_bitmask |= (1 << i);
if (util_format_is_pure_integer(sub_ctx->surf[i]->format)) {
- add_alpha_test = false;
- UPDATE_INT_SIGN_MASK(sub_ctx->surf[i]->format, i,
- key->fs.cbufs_signed_int_bitmask,
- key->fs.cbufs_unsigned_int_bitmask);
+ add_alpha_test = false;
+ UPDATE_INT_SIGN_MASK(sub_ctx->surf[i]->format, i,
+ key->fs.cbufs_signed_int_bitmask,
+ key->fs.cbufs_unsigned_int_bitmask);
+ }
+ /* Currently we only use this information if logicop_enable is set */
+ if (sub_ctx->blend_state.logicop_enable) {
+ key->fs.surface_component_bits[i] = util_format_get_component_bits(sub_ctx->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
}
- key->fs.surface_component_bits[i] = util_format_get_component_bits(sub_ctx->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0);
}
if (add_alpha_test) {
key->add_alpha_test = sub_ctx->dsa_state.alpha.enabled;
@@ -3520,19 +4039,46 @@ static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx,
}
}
- key->pstipple_tex = sub_ctx->rs_state.poly_stipple_enable;
+ key->pstipple_enabled = sub_ctx->rs_state.poly_stipple_enable;
key->color_two_side = sub_ctx->rs_state.light_twoside;
- key->clip_plane_enable = sub_ctx->rs_state.clip_plane_enable;
key->flatshade = sub_ctx->rs_state.flatshade ? true : false;
}
- key->gs_present = !!sub_ctx->shaders[PIPE_SHADER_GEOMETRY];
- key->tcs_present = !!sub_ctx->shaders[PIPE_SHADER_TESS_CTRL];
- key->tes_present = !!sub_ctx->shaders[PIPE_SHADER_TESS_EVAL];
+ if (vrend_state.use_gles && sub_ctx->ve && type == PIPE_SHADER_VERTEX) {
+ key->vs.attrib_zyxw_bitmask = sub_ctx->ve->zyxw_bitmask;
+ }
+
+ key->gs_present = !!sub_ctx->shaders[PIPE_SHADER_GEOMETRY] || type == PIPE_SHADER_GEOMETRY;
+ key->tcs_present = !!sub_ctx->shaders[PIPE_SHADER_TESS_CTRL] || type == PIPE_SHADER_TESS_CTRL;
+ key->tes_present = !!sub_ctx->shaders[PIPE_SHADER_TESS_EVAL] || type == PIPE_SHADER_TESS_EVAL;
if (type != PIPE_SHADER_COMPUTE)
vrend_sync_shader_io(sub_ctx, sel, key);
+
+ if (type == PIPE_SHADER_GEOMETRY)
+ key->gs.emit_clip_distance = sub_ctx->rs_state.clip_plane_enable != 0;
+
+ for (int i = 0; i < sub_ctx->views[type].num_views; i++) {
+ struct vrend_sampler_view *view = sub_ctx->views[type].views[i];
+ if (!view)
+ continue;
+
+ if (view->emulated_rect) {
+ vrend_shader_sampler_views_mask_set(key->sampler_views_emulated_rect_mask, i);
+ }
+
+ if (view->texture->target == GL_TEXTURE_BUFFER) {
+ GLint swizzle[4];
+ if (vrend_get_swizzle(view, swizzle)) {
+ vrend_shader_sampler_views_mask_set(key->sampler_views_lower_swizzle_mask, i);
+ key->tex_swizzle[i] = to_pipe_swizzle(swizzle[0]) |
+ to_pipe_swizzle(swizzle[1]) << 3 |
+ to_pipe_swizzle(swizzle[2]) << 6 |
+ to_pipe_swizzle(swizzle[3]) << 9;
+ }
+ }
+ }
}
static int vrend_shader_create(struct vrend_context *ctx,
@@ -3554,7 +4100,7 @@ static int vrend_shader_create(struct vrend_context *ctx,
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, shader->sel->type);
return -1;
}
- } else if (!ctx->shader_cfg.use_gles && shader->sel->type != TGSI_PROCESSOR_TESS_CTRL) {
+ } else if (!ctx->shader_cfg.use_gles && shader->sel->type != PIPE_SHADER_TESS_CTRL) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, shader->sel->type);
return -1;
}
@@ -3574,12 +4120,13 @@ static int vrend_shader_select(struct vrend_sub_context *sub_ctx,
memset(&key, 0, sizeof(key));
vrend_fill_shader_key(sub_ctx, sel, &key);
- if (sel->current && !memcmp(&sel->current->key, &key, sizeof(key)))
- return 0;
+ if (sel->current) {
+ if (!memcmp(&sel->current->key, &key, sizeof(key)))
+ return 0;
- if (sel->num_shaders > 1) {
struct vrend_shader *p = sel->current;
struct vrend_shader *c = p->next_variant;
+
while (c && memcmp(&c->key, &key, sizeof(key)) != 0) {
p = c;
c = c->next_variant;
@@ -3599,10 +4146,10 @@ static int vrend_shader_select(struct vrend_sub_context *sub_ctx,
r = vrend_shader_create(sub_ctx->parent, shader, &key);
if (r) {
sel->current = NULL;
+ strarray_free(&shader->glsl_strings, true);
FREE(shader);
return r;
}
- sel->num_shaders++;
}
if (dirty)
*dirty = true;
@@ -3614,7 +4161,7 @@ static int vrend_shader_select(struct vrend_sub_context *sub_ctx,
static void *vrend_create_shader_state(const struct pipe_stream_output_info *so_info,
uint32_t req_local_mem,
- unsigned pipe_shader_type)
+ enum pipe_shader_type pipe_shader_type)
{
struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector);
@@ -3633,15 +4180,13 @@ static int vrend_finish_shader(struct vrend_context *ctx,
struct vrend_shader_selector *sel,
const struct tgsi_token *tokens)
{
- int r;
-
sel->tokens = tgsi_dup_tokens(tokens);
- r = vrend_shader_select(ctx->sub, sel, NULL);
- if (r) {
- return EINVAL;
- }
- return 0;
+ if (!ctx->shader_cfg.use_gles && sel->type != PIPE_SHADER_COMPUTE)
+ sel->sinfo.separable_program =
+ vrend_shader_query_separable_program(sel->tokens, &ctx->shader_cfg);
+
+ return vrend_shader_select(ctx->sub, sel, NULL) ? EINVAL : 0;
}
int vrend_create_shader(struct vrend_context *ctx,
@@ -3649,11 +4194,10 @@ int vrend_create_shader(struct vrend_context *ctx,
const struct pipe_stream_output_info *so_info,
uint32_t req_local_mem,
const char *shd_text, uint32_t offlen, uint32_t num_tokens,
- uint32_t type, uint32_t pkt_length)
+ enum pipe_shader_type type, uint32_t pkt_length)
{
struct vrend_shader_selector *sel = NULL;
int ret_handle;
- bool new_shader = true, long_shader = false;
bool finished = false;
int ret;
@@ -3673,11 +4217,13 @@ int vrend_create_shader(struct vrend_context *ctx,
!has_feature(feat_compute_shader))
return EINVAL;
- if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT)
- new_shader = false;
- else if (((offlen + 3) / 4) > pkt_length)
- long_shader = true;
-
+ /* offlen & VIRGL_OBJ_SHADER_OFFSET_CONT declares whether we have a new shader or
+ * a shader continuation
+ *
+ * offlen & ~VIRGL_OBJ_SHADER_OFFSET_CONT
+ * is the total shader length for a new shader (new_shader == true)
+ * the continuation offset for a shader continuation (new_shader == false) */
+ bool new_shader = !(offlen & VIRGL_OBJ_SHADER_OFFSET_CONT);
struct vrend_sub_context *sub_ctx = ctx->sub;
/* if we have an in progress one - don't allow a new shader
@@ -3689,24 +4235,30 @@ int vrend_create_shader(struct vrend_context *ctx,
return EINVAL;
}
+ const uint32_t pkt_length_bytes = pkt_length * 4;
+
if (new_shader) {
+ const uint32_t expected_token_count = (offlen + 3) / 4; /* round up count */
+ if (expected_token_count < pkt_length)
+ return EINVAL;
+
sel = vrend_create_shader_state(so_info, req_local_mem, type);
- if (sel == NULL)
- return ENOMEM;
-
- sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */
- sel->tmp_buf = malloc(sel->buf_len);
- if (!sel->tmp_buf) {
- ret = ENOMEM;
- goto error;
- }
+ if (sel == NULL)
+ return ENOMEM;
- memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
- if (long_shader) {
- sel->buf_offset = pkt_length * 4;
- sub_ctx->long_shader_in_progress_handle[type] = handle;
- } else
- finished = true;
+ sel->buf_len = expected_token_count * 4;
+ sel->tmp_buf = malloc(sel->buf_len);
+ if (!sel->tmp_buf) {
+ ret = ENOMEM;
+ goto error;
+ }
+
+ memcpy(sel->tmp_buf, shd_text, pkt_length_bytes);
+ if (expected_token_count > pkt_length) {
+ sel->buf_offset = pkt_length_bytes;
+ sub_ctx->long_shader_in_progress_handle[type] = handle;
+ } else
+ finished = true;
} else {
sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
@@ -3724,23 +4276,23 @@ int vrend_create_shader(struct vrend_context *ctx,
}
/*make sure no overflow */
- if (pkt_length * 4 < pkt_length ||
- pkt_length * 4 + sel->buf_offset < pkt_length * 4 ||
- pkt_length * 4 + sel->buf_offset < sel->buf_offset) {
+ if (pkt_length_bytes < pkt_length ||
+ pkt_length_bytes + sel->buf_offset < pkt_length_bytes ||
+ pkt_length_bytes + sel->buf_offset < sel->buf_offset) {
ret = EINVAL;
goto error;
}
- if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) {
- vrend_printf( "Got too large shader continuation %d vs %d\n",
- pkt_length * 4 + sel->buf_offset, sel->buf_len);
+ if ((pkt_length_bytes + sel->buf_offset) > sel->buf_len) {
+ vrend_printf("Got too large shader continuation %d vs %d\n",
+ pkt_length_bytes + sel->buf_offset, sel->buf_len);
ret = EINVAL;
goto error;
}
- memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length * 4);
+ memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length_bytes);
- sel->buf_offset += pkt_length * 4;
+ sel->buf_offset += pkt_length_bytes;
if (sel->buf_offset >= sel->buf_len) {
finished = true;
shd_text = sel->tmp_buf;
@@ -3751,7 +4303,7 @@ int vrend_create_shader(struct vrend_context *ctx,
struct tgsi_token *tokens;
/* check for null termination */
- uint32_t last_chunk_offset = sel->buf_offset ? sel->buf_offset : pkt_length * 4;
+ uint32_t last_chunk_offset = sel->buf_offset ? sel->buf_offset : pkt_length_bytes;
if (last_chunk_offset < 4 || !memchr(shd_text + last_chunk_offset - 4, '\0', 4)) {
ret = EINVAL;
goto error;
@@ -3773,11 +4325,9 @@ int vrend_create_shader(struct vrend_context *ctx,
free(tokens);
ret = EINVAL;
goto error;
- } else {
- if (!vrend_debug(ctx, dbg_shader_tgsi)) {
- free(sel->tmp_buf);
- sel->tmp_buf = NULL;
- }
+ } else if (!VREND_DEBUG_ENABLED) {
+ free(sel->tmp_buf);
+ sel->tmp_buf = NULL;
}
free(tokens);
sub_ctx->long_shader_in_progress_handle[type] = 0;
@@ -3803,7 +4353,7 @@ error:
}
void vrend_bind_shader(struct vrend_context *ctx,
- uint32_t handle, uint32_t type)
+ uint32_t handle, enum pipe_shader_type type)
{
struct vrend_shader_selector *sel;
@@ -3840,7 +4390,7 @@ void vrend_bind_shader(struct vrend_context *ctx,
}
static float
-vrend_color_convert_linear_to_srgb(float color) {
+vrend_color_encode_as_srgb(float color) {
return color <= 0.0031308f
? 12.92f * color
: 1.055f * powf(color, (1.f / 2.4f)) - 0.055f;
@@ -3868,29 +4418,33 @@ void vrend_clear(struct vrend_context *ctx,
if (sub_ctx->viewport_state_dirty)
vrend_update_viewport_state(sub_ctx);
- vrend_use_program(sub_ctx, 0);
+ vrend_use_program(ctx->sub, NULL);
glDisable(GL_SCISSOR_TEST);
float colorf[4];
memcpy(colorf, color->f, sizeof(colorf));
- if (sub_ctx->nr_cbufs && sub_ctx->surf[0] &&
- vrend_resource_has_24bpp_internal_format(sub_ctx->surf[0]->texture) &&
- util_format_is_srgb(sub_ctx->surf[0]->format)) {
- VREND_DEBUG(dbg_tex, ctx,
- "manually converting glClearColor from linear->srgb colorspace for EGL-backed framebuffer color attachment"
- " (surface format is %s; resource format is %s)\n",
- util_format_name(sub_ctx->surf[0]->format),
- util_format_name(sub_ctx->surf[0]->texture->base.format));
- for (int i = 0; i < 3; ++i) // i < 3: don't convert alpha channel
- colorf[i] = vrend_color_convert_linear_to_srgb(colorf[i]);
+ {
+ struct vrend_surface *surf = sub_ctx->surf[0];
+ if (sub_ctx->nr_cbufs && surf &&
+ util_format_is_srgb(surf->format) &&
+ !vrend_resource_supports_view(surf->texture, surf->format)) {
+ VREND_DEBUG(dbg_tex, ctx,
+ "manually converting glClearColor from linear->srgb colorspace for EGL-backed framebuffer color attachment"
+ " (surface format is %s; resource format is %s)\n",
+ util_format_name(surf->format),
+ util_format_name(surf->texture->base.format));
+ for (int i = 0; i < 3; ++i) // i < 3: don't convert alpha channel
+ colorf[i] = vrend_color_encode_as_srgb(colorf[i]);
+ }
}
if (buffers & PIPE_CLEAR_COLOR) {
if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_format_is_emulated_alpha(sub_ctx->surf[0]->format)) {
glClearColor(colorf[3], 0.0, 0.0, 0.0);
- } else if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_resource_is_emulated_bgra(sub_ctx->surf[0]->texture)) {
+ } else if (sub_ctx->nr_cbufs && sub_ctx->surf[0] &&
+ vrend_resource_needs_redblue_swizzle(sub_ctx->surf[0]->texture, sub_ctx->surf[0]->format)) {
VREND_DEBUG(dbg_bgra, ctx, "swizzling glClearColor() since rendering surface is an externally-stored BGR* resource\n");
glClearColor(colorf[2], colorf[1], colorf[0], colorf[3]);
} else {
@@ -4007,7 +4561,7 @@ void vrend_clear(struct vrend_context *ctx,
glDisable(GL_SCISSOR_TEST);
}
-void vrend_clear_texture(struct vrend_context* ctx,
+int vrend_clear_texture(struct vrend_context* ctx,
uint32_t handle, uint32_t level,
const struct pipe_box *box,
const void * data)
@@ -4015,17 +4569,27 @@ void vrend_clear_texture(struct vrend_context* ctx,
GLenum format, type;
struct vrend_resource *res;
- if (handle)
- res = vrend_renderer_ctx_res_lookup(ctx, handle);
- else {
- vrend_printf( "cannot find resource for handle %d\n", handle);
- return;
+ res = vrend_renderer_ctx_res_lookup(ctx, handle);
+ if (!res) {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle);
+ return EINVAL;
}
enum virgl_formats fmt = res->base.format;
format = tex_conv_table[fmt].glformat;
type = tex_conv_table[fmt].gltype;
+ /* 32-bit BGRA resources are always reordered to RGBA ordering before
+ * submission to the host driver. Reorder red/blue color bytes in
+ * the clear color to match. */
+ if (vrend_state.use_gles && vrend_format_is_bgra(fmt)) {
+ assert(util_format_get_blocksizebits(fmt) >= 24);
+ VREND_DEBUG(dbg_bgra, ctx, "swizzling clear_texture color for bgra texture\n");
+ uint8_t temp = ((uint8_t*)data)[0];
+ ((uint8_t*)data)[0] = ((uint8_t*)data)[2];
+ ((uint8_t*)data)[2] = temp;
+ }
+
if (vrend_state.use_gles) {
glClearTexSubImageEXT(res->id, level,
box->x, box->y, box->z,
@@ -4037,6 +4601,7 @@ void vrend_clear_texture(struct vrend_context* ctx,
box->width, box->height, box->depth,
format, type, data);
}
+ return 0;
}
static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx)
@@ -4216,18 +4781,19 @@ static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
glVertexAttrib3fv(loc, data);
break;
case 4:
- default:
glVertexAttrib4fv(loc, data);
break;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
disable_bitmask |= (1 << loc);
} else {
+ GLint size = !vrend_state.use_gles && (va->zyxw_bitmask & (1 << i)) ? GL_BGRA : ve->nr_chan;
+
enable_bitmask |= (1 << loc);
if (util_format_is_pure_integer(ve->base.src_format)) {
- glVertexAttribIPointer(loc, ve->nr_chan, ve->type, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
+ glVertexAttribIPointer(loc, size, ve->type, vbo->base.stride, (void *)(uintptr_t)(ve->base.src_offset + vbo->base.buffer_offset));
} else {
- glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset));
+ glVertexAttribPointer(loc, size, ve->type, ve->norm, vbo->base.stride, (void *)(uintptr_t)(ve->base.src_offset + vbo->base.buffer_offset));
}
glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
}
@@ -4320,6 +4886,24 @@ static int vrend_draw_bind_samplers_shader(struct vrend_sub_context *sub_ctx,
struct vrend_sampler_view *tview = sviews->views[i];
if ((dirty & (1 << i)) && tview) {
if (sub_ctx->prog->shadow_samp_mask[shader_type] & (1 << i)) {
+ struct vrend_texture *tex = (struct vrend_texture *)tview->texture;
+
+ /* The modes LUMINANCE, INTENSITY, and ALPHA only apply when a depth texture
+ * is used by a sampler that returns an RGBA value, i.e. by sampler*D, if
+ * the texture is queries by using sampler*Shadow then these swizzles must
+ * not be applied, therefore, reset the swizzled to the default */
+ static const GLint swizzle[] = {GL_RED,GL_GREEN,GL_BLUE,GL_ALPHA};
+ if (memcmp(tex->cur_swizzle, swizzle, 4 * sizeof(GLint))) {
+ if (vrend_state.use_gles) {
+ for (unsigned int i = 0; i < 4; ++i) {
+ glTexParameteri(tview->texture->target, GL_TEXTURE_SWIZZLE_R + i, swizzle[i]);
+ }
+ } else {
+ glTexParameteriv(tview->texture->target, GL_TEXTURE_SWIZZLE_RGBA, swizzle);
+ }
+ memcpy(tex->cur_swizzle, swizzle, 4 * sizeof(GLint));
+ }
+
glUniform4f(sub_ctx->prog->shadow_samp_mask_locs[shader_type][sampler_index],
(tview->gl_swizzle[0] == GL_ZERO || tview->gl_swizzle[0] == GL_ONE) ? 0.0 : 1.0,
(tview->gl_swizzle[1] == GL_ZERO || tview->gl_swizzle[1] == GL_ONE) ? 0.0 : 1.0,
@@ -4378,9 +4962,6 @@ static int vrend_draw_bind_ubo_shader(struct vrend_sub_context *sub_ctx,
struct pipe_constant_buffer *cb;
struct vrend_resource *res;
- if (!has_feature(feat_ubo))
- return next_ubo_id;
-
mask = sub_ctx->prog->ubo_used_mask[shader_type];
dirty = sub_ctx->const_bufs_dirty[shader_type];
update = dirty & sub_ctx->const_bufs_used_mask[shader_type];
@@ -4503,11 +5084,31 @@ static void vrend_draw_bind_images_shader(struct vrend_sub_context *sub_ctx, int
/* glTexBuffer doesn't accept GL_RGBA8_SNORM, find an appropriate replacement. */
uint32_t format = (iview->format == GL_RGBA8_SNORM) ? GL_RGBA8UI : iview->format;
+ if (format == GL_NONE ||
+ (vrend_state.use_gles && format == GL_ALPHA8)) {
+ format = vrend_get_arb_format(iview->vformat);
+ }
+
glBindBufferARB(GL_TEXTURE_BUFFER, iview->texture->id);
glBindTexture(GL_TEXTURE_BUFFER, iview->texture->tbo_tex_id);
- if (has_feature(feat_arb_or_gles_ext_texture_buffer))
- glTexBuffer(GL_TEXTURE_BUFFER, format, iview->texture->id);
+ if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
+ if (has_feature(feat_texture_buffer_range)) {
+ /* Offset and size are given in byte, but the max_texture_buffer_size
+ * is given as texels, so we have to take the blocksize into account.
+ * To avoid an unsigned int overflow, we divide by blocksize,
+ */
+ int blsize = util_format_get_blocksize(iview->vformat);
+ unsigned offset = iview->u.buf.offset / blsize;
+ unsigned size = iview->u.buf.size / blsize;
+ if (offset + size > vrend_state.max_texture_buffer_size)
+ size = vrend_state.max_texture_buffer_size - offset;
+ glTexBufferRange(GL_TEXTURE_BUFFER, format, iview->texture->id, iview->u.buf.offset,
+ size * blsize);
+ } else {
+ glTexBuffer(GL_TEXTURE_BUFFER, format, iview->texture->id);
+ }
+ }
tex_id = iview->texture->tbo_tex_id;
level = first_layer = 0;
@@ -4541,20 +5142,37 @@ static void vrend_draw_bind_images_shader(struct vrend_sub_context *sub_ctx, int
}
}
+static void
+vrend_fill_sysval_uniform_block (struct vrend_sub_context *sub_ctx)
+{
+ if (sub_ctx->prog->virgl_block_bind == -1)
+ return;
+
+ if (sub_ctx->sysvalue_data_cookie != sub_ctx->prog->sysvalue_data_cookie) {
+ glBindBuffer(GL_UNIFORM_BUFFER, sub_ctx->prog->ubo_sysval_buffer_id);
+ glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(struct sysval_uniform_block),
+ &sub_ctx->sysvalue_data);
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+ sub_ctx->prog->sysvalue_data_cookie = sub_ctx->sysvalue_data_cookie;
+ }
+}
+
static void vrend_draw_bind_objects(struct vrend_sub_context *sub_ctx, bool new_program)
{
int next_ubo_id = 0, next_sampler_id = 0;
for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= sub_ctx->last_shader_idx; shader_type++) {
+ vrend_set_active_pipeline_stage(sub_ctx->prog, shader_type);
+
next_ubo_id = vrend_draw_bind_ubo_shader(sub_ctx, shader_type, next_ubo_id);
vrend_draw_bind_const_shader(sub_ctx, shader_type, new_program);
- next_sampler_id = vrend_draw_bind_samplers_shader(sub_ctx, shader_type,
- next_sampler_id);
+ next_sampler_id = vrend_draw_bind_samplers_shader(sub_ctx, shader_type, next_sampler_id);
vrend_draw_bind_images_shader(sub_ctx, shader_type);
vrend_draw_bind_ssbo_shader(sub_ctx, shader_type);
if (vrend_state.use_gles) {
if (sub_ctx->prog->tex_levels_uniform_id[shader_type] != -1) {
+ vrend_set_active_pipeline_stage(sub_ctx->prog, shader_type);
glUniform1iv(sub_ctx->prog->tex_levels_uniform_id[shader_type],
sub_ctx->n_samplers[shader_type],
sub_ctx->texture_levels[shader_type]);
@@ -4562,17 +5180,14 @@ static void vrend_draw_bind_objects(struct vrend_sub_context *sub_ctx, bool new_
}
}
- vrend_draw_bind_abo_shader(sub_ctx);
+ if (sub_ctx->prog->virgl_block_bind != -1)
+ glBindBufferRange(GL_UNIFORM_BUFFER, sub_ctx->prog->virgl_block_bind,
+ sub_ctx->prog->ubo_sysval_buffer_id,
+ 0, sizeof(struct sysval_uniform_block));
- if (vrend_state.use_core_profile && sub_ctx->prog->fs_stipple_loc != -1) {
- glActiveTexture(GL_TEXTURE0 + next_sampler_id);
- glBindTexture(GL_TEXTURE_2D, sub_ctx->parent->pstipple_tex_id);
- glUniform1i(sub_ctx->prog->fs_stipple_loc, next_sampler_id);
- }
+ vrend_draw_bind_abo_shader(sub_ctx);
- if (vrend_state.use_core_profile && sub_ctx->prog->fs_alpha_ref_val_loc != -1) {
- glUniform1f(sub_ctx->prog->fs_alpha_ref_val_loc, sub_ctx->dsa_state.alpha.ref_value);
- }
+ vrend_set_active_pipeline_stage(sub_ctx->prog, PIPE_SHADER_FRAGMENT);
}
static
@@ -4591,23 +5206,28 @@ void vrend_inject_tcs(struct vrend_sub_context *sub_ctx, int vertices_per_patch)
list_inithead(&shader->programs);
strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS);
- vrend_shader_create_passthrough_tcs(sub_ctx->parent, &sub_ctx->parent->shader_cfg,
- sub_ctx->shaders[PIPE_SHADER_VERTEX]->tokens,
- &shader->key, vrend_state.tess_factors, &sel->sinfo,
- &shader->glsl_strings, vertices_per_patch);
+ if (!vrend_shader_create_passthrough_tcs(sub_ctx->parent, &sub_ctx->parent->shader_cfg,
+ sub_ctx->shaders[PIPE_SHADER_VERTEX]->tokens,
+ &shader->key, vrend_state.tess_factors, &sel->sinfo,
+ &shader->glsl_strings, vertices_per_patch)) {
+ strarray_free(&shader->glsl_strings, true);
+ FREE(shader);
+ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, sel->type);
+ vrend_destroy_shader_selector(sel);
+ return;
+ }
// Need to add inject the selected shader to the shader selector and then the code below
// can continue
sel->tokens = NULL;
sel->current = shader;
sub_ctx->shaders[PIPE_SHADER_TESS_CTRL] = sel;
- sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1;
vrend_compile_shader(sub_ctx, shader);
}
static bool
-vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_info *info)
+vrend_select_program(struct vrend_sub_context *sub_ctx, ubyte vertices_per_patch)
{
struct vrend_linked_shader_program *prog;
bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
@@ -4619,7 +5239,6 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
sub_ctx->shader_dirty = false;
if (!shaders[PIPE_SHADER_VERTEX] || !shaders[PIPE_SHADER_FRAGMENT]) {
- vrend_printf("dropping rendering due to missing shaders: %s\n", sub_ctx->parent->debug_name);
return false;
}
@@ -4628,6 +5247,7 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
// buffer formats when the shader is created, we only know it here.
// Set it to true so the underlying code knows to use the buffer formats
// now.
+
sub_ctx->drawing = true;
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
sub_ctx->drawing = false;
@@ -4636,7 +5256,7 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) {
VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n");
- vrend_inject_tcs(sub_ctx, info->vertices_per_patch);
+ vrend_inject_tcs(sub_ctx, vertices_per_patch);
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
}
@@ -4645,7 +5265,9 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty);
if (shaders[PIPE_SHADER_GEOMETRY])
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
- vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
+
+ if (vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_FRAGMENT], &fs_dirty))
+ goto fail;
// NOTE: run shader selection again as a workaround to #180 - "duplicated shader compilation"
if (shaders[PIPE_SHADER_GEOMETRY])
@@ -4656,7 +5278,7 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty);
else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) {
VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n");
- vrend_inject_tcs(sub_ctx, info->vertices_per_patch);
+ vrend_inject_tcs(sub_ctx, vertices_per_patch);
}
sub_ctx->drawing = true;
vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty);
@@ -4664,13 +5286,13 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
uint8_t gles_emulate_query_texture_levels_mask = 0;
- for (uint i = 0; i < PIPE_SHADER_TYPES; i++) {
+ for (enum pipe_shader_type i = 0; i < PIPE_SHADER_TYPES; i++) {
struct vrend_shader_selector *sel = shaders[i];
if (!sel)
continue;
struct vrend_shader *shader = sel->current;
- if (shader && !shader->is_compiled) {//shader->sel->type == PIPE_SHADER_FRAGMENT || shader->sel->type == PIPE_SHADER_GEOMETRY) {
+ if (shader && !shader->is_compiled) {
if (!vrend_compile_shader(sub_ctx, shader))
return false;
}
@@ -4682,16 +5304,23 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
!shaders[PIPE_SHADER_FRAGMENT]->current ||
(shaders[PIPE_SHADER_GEOMETRY] && !shaders[PIPE_SHADER_GEOMETRY]->current) ||
(shaders[PIPE_SHADER_TESS_CTRL] && !shaders[PIPE_SHADER_TESS_CTRL]->current) ||
- (shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_TESS_EVAL]->current)) {
- vrend_printf( "failure to compile shader variants: %s\n", sub_ctx->parent->debug_name);
- return false;
- }
+ (shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_TESS_EVAL]->current))
+ goto fail;
+
+ struct vrend_shader *vs = shaders[PIPE_SHADER_VERTEX]->current;
+ struct vrend_shader *fs = shaders[PIPE_SHADER_FRAGMENT]->current;
+ struct vrend_shader *gs = shaders[PIPE_SHADER_GEOMETRY] ? shaders[PIPE_SHADER_GEOMETRY]->current : NULL;
+ struct vrend_shader *tcs = shaders[PIPE_SHADER_TESS_CTRL] ? shaders[PIPE_SHADER_TESS_CTRL]->current : NULL;
+ struct vrend_shader *tes = shaders[PIPE_SHADER_TESS_EVAL] ? shaders[PIPE_SHADER_TESS_EVAL]->current : NULL;
+
+ GLuint vs_id = vs->id;
+ GLuint fs_id = fs->id;
+ GLuint gs_id = !gs ? 0 : gs->id;
+ GLuint tcs_id = !tcs ? 0 : tcs->id;
+ GLuint tes_id = !tes ? 0 : tes->id;
- GLuint vs_id = shaders[PIPE_SHADER_VERTEX]->current->id;
- GLuint fs_id = shaders[PIPE_SHADER_FRAGMENT]->current->id;
- GLuint gs_id = shaders[PIPE_SHADER_GEOMETRY] ? shaders[PIPE_SHADER_GEOMETRY]->current->id : 0;
- GLuint tcs_id = shaders[PIPE_SHADER_TESS_CTRL] ? shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0;
- GLuint tes_id = shaders[PIPE_SHADER_TESS_EVAL] ? shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0;
+ if (shaders[PIPE_SHADER_FRAGMENT]->current->sel->sinfo.num_outputs <= 1)
+ dual_src = false;
bool same_prog = sub_ctx->prog &&
vs_id == sub_ctx->prog_ids[PIPE_SHADER_VERTEX] &&
@@ -4701,6 +5330,12 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
tes_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] &&
sub_ctx->prog->dual_src_linked == dual_src;
+ bool separable = vs->sel->sinfo.separable_program &&
+ fs->sel->sinfo.separable_program &&
+ (!gs || gs->sel->sinfo.separable_program) &&
+ (!tcs || tcs->sel->sinfo.separable_program) &&
+ (!tes || tes->sel->sinfo.separable_program);
+
if (!same_prog) {
prog = lookup_shader_program(sub_ctx, vs_id, fs_id, gs_id, tcs_id, tes_id, dual_src);
if (!prog) {
@@ -4709,10 +5344,34 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current,
gs_id ? sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->current : NULL,
tcs_id ? sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL,
- tes_id ? sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL);
+ tes_id ? sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL,
+ separable);
if (!prog)
return false;
prog->gles_use_query_texturelevel_mask = gles_emulate_query_texture_levels_mask;
+ } else if (separable) {
+ /* UBO block bindings are reset to zero if the programs are
+ * re-linked. With separable shaders, the program can be relinked
+ * because it's shared across multiple pipelines and some things like
+ * transform feedback require relinking, so we have to make sure the
+ * blocks are bound. */
+ enum pipe_shader_type last_shader = tes_id ? PIPE_SHADER_TESS_EVAL :
+ (gs_id ? PIPE_SHADER_GEOMETRY :
+ PIPE_SHADER_FRAGMENT);
+ bool need_rebind = false;
+
+ for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX;
+ shader_type <= last_shader && !need_rebind;
+ shader_type++) {
+ if (!prog->ss[shader_type])
+ continue;
+ need_rebind |= prog->ss[shader_type]->last_pipeline_id != prog->id.pipeline;
+ }
+
+ if (need_rebind) {
+ vrend_use_program(sub_ctx, prog);
+ rebind_ubo_and_sampler_locs(prog, last_shader);
+ }
}
sub_ctx->last_shader_idx = sub_ctx->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (sub_ctx->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT);
@@ -4738,6 +5397,68 @@ vrend_select_program(struct vrend_sub_context *sub_ctx, const struct pipe_draw_i
}
sub_ctx->cs_shader_dirty = true;
return new_program;
+
+fail:
+ vrend_printf( "failure to compile shader variants: %s\n", sub_ctx->parent->debug_name);
+ return false;
+}
+
+void vrend_link_program_hook(struct vrend_context *ctx, uint32_t *handles)
+{
+ /* Pre-compiling compute shaders needs some additional work */
+ if (handles[PIPE_SHADER_COMPUTE])
+ return;
+
+ struct vrend_shader_selector *vs = vrend_object_lookup(ctx->sub->object_hash,
+ handles[PIPE_SHADER_VERTEX],
+ VIRGL_OBJECT_SHADER);
+ struct vrend_shader_selector *fs = vrend_object_lookup(ctx->sub->object_hash,
+ handles[PIPE_SHADER_FRAGMENT],
+ VIRGL_OBJECT_SHADER);
+
+ /* If we can't force linking, exit early */
+ if ((!handles[PIPE_SHADER_VERTEX] || !handles[PIPE_SHADER_FRAGMENT]) &&
+ (!vs || !vs->sinfo.separable_program) && (!fs || !fs->sinfo.separable_program))
+ return;
+
+ /* We can't link a pre-link a TCS without a TES, exit early */
+ if (handles[PIPE_SHADER_TESS_CTRL] && !handles[PIPE_SHADER_TESS_EVAL])
+ return;
+
+ struct vrend_shader_selector *prev_handles[PIPE_SHADER_TYPES];
+ memset(prev_handles, 0, sizeof(prev_handles));
+ uint32_t prev_shader_ids[PIPE_SHADER_TYPES];
+ memcpy(prev_shader_ids, ctx->sub->prog_ids, PIPE_SHADER_TYPES * sizeof(uint32_t));
+ struct vrend_linked_shader_program *prev_prog = ctx->sub->prog;
+
+ for (enum pipe_shader_type type = 0; type < PIPE_SHADER_TYPES; ++type) {
+ vrend_shader_state_reference(&prev_handles[type], ctx->sub->shaders[type]);
+ vrend_bind_shader(ctx, handles[type], type);
+ }
+
+ /* Force early-linking for separable shaders, since they don't depend on other stages */
+ for (uint32_t type = 0; type < PIPE_SHADER_TYPES; ++type) {
+ if (ctx->sub->shaders[type] && ctx->sub->shaders[type]->sinfo.separable_program) {
+ if (!ctx->sub->shaders[type]->current->is_compiled)
+ vrend_compile_shader(ctx->sub, ctx->sub->shaders[type]->current);
+ if (!ctx->sub->shaders[type]->current->is_linked)
+ vrend_link_separable_shader(ctx->sub, ctx->sub->shaders[type]->current, type);
+ }
+ }
+
+ /* Force early-link of the whole shader program. */
+ vrend_select_program(ctx->sub, 1);
+
+ ctx->sub->shader_dirty = true;
+ ctx->sub->cs_shader_dirty = true;
+
+ /* undo state changes */
+ for (enum pipe_shader_type type = 0; type < PIPE_SHADER_TYPES; ++type) {
+ vrend_shader_state_reference(&ctx->sub->shaders[type], prev_handles[type]);
+ vrend_shader_state_reference(&prev_handles[type], NULL);
+ }
+ memcpy(ctx->sub->prog_ids, prev_shader_ids, PIPE_SHADER_TYPES * sizeof(uint32_t));
+ ctx->sub->prog = prev_prog;
}
int vrend_draw_vbo(struct vrend_context *ctx,
@@ -4745,7 +5466,6 @@ int vrend_draw_vbo(struct vrend_context *ctx,
uint32_t cso, uint32_t indirect_handle,
uint32_t indirect_draw_count_handle)
{
- int i;
bool new_program = false;
struct vrend_resource *indirect_res = NULL;
struct vrend_resource *indirect_params_res = NULL;
@@ -4810,25 +5530,31 @@ int vrend_draw_vbo(struct vrend_context *ctx,
sub_ctx->prim_mode = (int)info->mode;
}
+ if (!sub_ctx->ve) {
+ vrend_printf("illegal VE setup - skipping renderering\n");
+ return 0;
+ }
+
if (sub_ctx->shader_dirty || sub_ctx->swizzle_output_rgb_to_bgr ||
- sub_ctx->convert_linear_to_srgb_on_write)
- new_program = vrend_select_program(sub_ctx, info);
+ sub_ctx->needs_manual_srgb_encode_bitmask || sub_ctx->vbo_dirty)
+ new_program = vrend_select_program(sub_ctx, info->vertices_per_patch);
if (!sub_ctx->prog) {
vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return 0;
}
- vrend_use_program(sub_ctx, sub_ctx->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog);
if (vrend_state.use_gles) {
/* PIPE_SHADER and TGSI_SHADER have different ordering, so use two
* different prefix arrays */
- for (unsigned i = PIPE_SHADER_VERTEX; i < PIPE_SHADER_COMPUTE; ++i) {
+ for (enum pipe_shader_type i = PIPE_SHADER_VERTEX; i < PIPE_SHADER_COMPUTE; ++i) {
if (sub_ctx->prog->gles_use_query_texturelevel_mask & (1 << i)) {
char loc_name[32];
- snprintf(loc_name, 32, "%s_texlod[0]", pipe_shader_to_prefix(i));
- sub_ctx->prog->tex_levels_uniform_id[i] = glGetUniformLocation(sub_ctx->prog->id, loc_name);
+ snprintf(loc_name, 32, "%s_texlod", pipe_shader_to_prefix(i));
+ sub_ctx->prog->tex_levels_uniform_id[i] =
+ vrend_get_uniform_location(sub_ctx->prog, loc_name, i);
} else {
sub_ctx->prog->tex_levels_uniform_id[i] = -1;
}
@@ -4837,23 +5563,7 @@ int vrend_draw_vbo(struct vrend_context *ctx,
}
vrend_draw_bind_objects(sub_ctx, new_program);
-
-
- if (!sub_ctx->ve) {
- vrend_printf("illegal VE setup - skipping renderering\n");
- return 0;
- }
- float viewport_neg_val = sub_ctx->viewport_is_negative ? -1.0 : 1.0;
- if (sub_ctx->prog->viewport_neg_val != viewport_neg_val) {
- glUniform1f(sub_ctx->prog->vs_ws_adjust_loc, viewport_neg_val);
- sub_ctx->prog->viewport_neg_val = viewport_neg_val;
- }
-
- if (sub_ctx->rs_state.clip_plane_enable) {
- for (i = 0 ; i < 8; i++) {
- glUniform4fv(sub_ctx->prog->clip_locs[i], 1, (const GLfloat *)&sub_ctx->ucp_state.ucp[i]);
- }
- }
+ vrend_fill_sysval_uniform_block(sub_ctx);
if (has_feature(feat_gles31_vertex_attrib_binding))
vrend_draw_bind_vertex_binding(ctx, sub_ctx->ve);
@@ -4944,18 +5654,19 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (indirect_handle) {
if (indirect_params_res)
- glMultiDrawArraysIndirectCountARB(mode, (GLvoid const *)(unsigned long)info->indirect.offset,
+ glMultiDrawArraysIndirectCountARB(mode, (GLvoid const *)(uintptr_t)info->indirect.offset,
info->indirect.indirect_draw_count_offset, info->indirect.draw_count, info->indirect.stride);
else if (info->indirect.draw_count > 1)
- glMultiDrawArraysIndirect(mode, (GLvoid const *)(unsigned long)info->indirect.offset, info->indirect.draw_count, info->indirect.stride);
+ glMultiDrawArraysIndirect(mode, (GLvoid const *)(uintptr_t)info->indirect.offset, info->indirect.draw_count, info->indirect.stride);
+ else
+ glDrawArraysIndirect(mode, (GLvoid const *)(uintptr_t)info->indirect.offset);
+ } else if (info->instance_count > 0) {
+ if (info->start_instance > 0)
+ glDrawArraysInstancedBaseInstance(mode, start, count, info->instance_count, info->start_instance);
else
- glDrawArraysIndirect(mode, (GLvoid const *)(unsigned long)info->indirect.offset);
- } else if (info->instance_count <= 1)
+ glDrawArraysInstancedARB(mode, start, count, info->instance_count);
+ } else
glDrawArrays(mode, start, count);
- else if (info->start_instance)
- glDrawArraysInstancedBaseInstance(mode, start, count, info->instance_count, info->start_instance);
- else
- glDrawArraysInstancedARB(mode, start, count, info->instance_count);
} else {
GLenum elsz;
GLenum mode = info->mode;
@@ -4974,25 +5685,34 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (indirect_handle) {
if (indirect_params_res)
- glMultiDrawElementsIndirectCountARB(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset,
+ glMultiDrawElementsIndirectCountARB(mode, elsz, (GLvoid const *)(uintptr_t)info->indirect.offset,
info->indirect.indirect_draw_count_offset, info->indirect.draw_count, info->indirect.stride);
else if (info->indirect.draw_count > 1)
- glMultiDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset, info->indirect.draw_count, info->indirect.stride);
+ glMultiDrawElementsIndirect(mode, elsz, (GLvoid const *)(uintptr_t)info->indirect.offset, info->indirect.draw_count, info->indirect.stride);
else
- glDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset);
+ glDrawElementsIndirect(mode, elsz, (GLvoid const *)(uintptr_t)info->indirect.offset);
} else if (info->index_bias) {
- if (info->instance_count > 1)
- glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count, info->index_bias);
- else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
+ if (info->instance_count > 0) {
+ if (info->start_instance > 0)
+ glDrawElementsInstancedBaseVertexBaseInstance(mode, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset,
+ info->instance_count, info->index_bias, info->start_instance);
+ else
+ glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset, info->instance_count, info->index_bias);
+
+
+ } else if (info->min_index != 0 || info->max_index != (unsigned)-1)
+ glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset, info->index_bias);
else
- glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias);
- } else if (info->instance_count > 1) {
- glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count);
+ glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset, info->index_bias);
+ } else if (info->instance_count > 0) {
+ if (info->start_instance > 0) {
+ glDrawElementsInstancedBaseInstance(mode, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset, info->instance_count, info->start_instance);
+ } else
+ glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset, info->instance_count);
} else if (info->min_index != 0 || info->max_index != (unsigned)-1)
- glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
+ glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset);
else
- glDrawElements(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset);
+ glDrawElements(mode, info->count, elsz, (void *)(uintptr_t)sub_ctx->ib.offset);
}
if (info->primitive_restart) {
@@ -5011,6 +5731,9 @@ int vrend_draw_vbo(struct vrend_context *ctx,
sub_ctx->current_so->xfb_state = XFB_STATE_PAUSED;
}
}
+
+ if (use_advanced_blending)
+ glDisable(GL_BLEND);
return 0;
}
@@ -5076,8 +5799,9 @@ void vrend_launch_grid(struct vrend_context *ctx,
return;
}
- vrend_use_program(sub_ctx, sub_ctx->prog->id);
+ vrend_use_program(sub_ctx, sub_ctx->prog);
+ vrend_set_active_pipeline_stage(sub_ctx->prog, PIPE_SHADER_COMPUTE);
vrend_draw_bind_ubo_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
vrend_draw_bind_const_shader(sub_ctx, PIPE_SHADER_COMPUTE, new_program);
vrend_draw_bind_samplers_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0);
@@ -5306,13 +6030,12 @@ static void vrend_hw_emit_blend(struct vrend_sub_context *sub_ctx, struct pipe_b
if (state->rt[0].colormask != sub_ctx->hw_blend_state.rt[0].colormask ||
(sub_ctx->hw_blend_state.independent_blend_enable &&
!state->independent_blend_enable)) {
- int i;
- for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
- sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
+ for (int i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
+ sub_ctx->hw_blend_state.rt[i].colormask = state->rt[0].colormask;
}
}
sub_ctx->hw_blend_state.independent_blend_enable = state->independent_blend_enable;
@@ -5474,6 +6197,11 @@ void vrend_object_bind_dsa(struct vrend_context *ctx,
ctx->sub->dsa_state = *state;
ctx->sub->dsa = state;
+ if (ctx->sub->sysvalue_data.alpha_ref_val != state->alpha.ref_value) {
+ ctx->sub->sysvalue_data.alpha_ref_val = state->alpha.ref_value;
+ ctx->sub->sysvalue_data_cookie++;
+ }
+
vrend_hw_emit_dsa(ctx);
}
@@ -5482,7 +6210,7 @@ static void vrend_update_frontface_state(struct vrend_sub_context *sub_ctx)
struct pipe_rasterizer_state *state = &sub_ctx->rs_state;
int front_ccw = state->front_ccw;
- front_ccw ^= (sub_ctx->inverted_fbo_content ? 0 : 1);
+ front_ccw ^= (sub_ctx->fbo_origin_upper_left ? 0 : 1);
if (front_ccw)
glFrontFace(GL_CCW);
else
@@ -5670,9 +6398,6 @@ static void vrend_hw_emit_rs(struct vrend_context *ctx)
glEnable(GL_POLYGON_STIPPLE);
else
glDisable(GL_POLYGON_STIPPLE);
- } else if (state->poly_stipple_enable) {
- if (!ctx->pstip_inited)
- vrend_init_pstipple_texture(ctx);
}
if (state->point_quad_rasterization) {
@@ -5725,6 +6450,13 @@ static void vrend_hw_emit_rs(struct vrend_context *ctx)
else
glDisable(GL_CLIP_PLANE0 + i);
}
+
+ ctx->sub->sysvalue_data_cookie++;
+ if (ctx->sub->rs_state.clip_plane_enable) {
+ ctx->sub->sysvalue_data.clip_plane_enabled = 1.f;
+ } else {
+ ctx->sub->sysvalue_data.clip_plane_enabled = 0.f;
+ }
}
if (vrend_state.use_core_profile == false) {
glLineStipple(state->line_stipple_factor, state->line_stipple_pattern);
@@ -5775,11 +6507,6 @@ static void vrend_hw_emit_rs(struct vrend_context *ctx)
report_core_warn(ctx, CORE_PROFILE_WARN_CLAMP);
}
- /* read-color-clamping is handled in the mesa frontend */
- if (!vrend_state.use_gles) {
- glClampColor(GL_CLAMP_READ_COLOR_ARB, GL_FALSE);
- }
-
if (has_feature(feat_multisample)) {
if (has_feature(feat_sample_mask)) {
if (state->multisample)
@@ -5835,7 +6562,7 @@ void vrend_object_bind_rasterizer(struct vrend_context *ctx,
}
void vrend_bind_sampler_states(struct vrend_context *ctx,
- uint32_t shader_type,
+ enum pipe_shader_type shader_type,
uint32_t start_slot,
uint32_t num_states,
const uint32_t *handles)
@@ -6010,33 +6737,23 @@ static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr
return PIPE_BUFFER;
}
-static inline void lock_sync(void)
-{
- if (vrend_state.sync_thread && vrend_state.use_async_fence_cb)
- pipe_mutex_lock(vrend_state.fence_mutex);
-}
-
-static inline void unlock_sync(void)
-{
- if (vrend_state.sync_thread && vrend_state.use_async_fence_cb)
- pipe_mutex_unlock(vrend_state.fence_mutex);
-}
-
static void vrend_free_sync_thread(void)
{
if (!vrend_state.sync_thread)
return;
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
vrend_state.stop_sync_thread = true;
- pipe_condvar_signal(vrend_state.fence_cond);
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ cnd_signal(&vrend_state.fence_cond);
+ mtx_unlock(&vrend_state.fence_mutex);
- pipe_thread_wait(vrend_state.sync_thread);
+ thrd_join(vrend_state.sync_thread, NULL);
vrend_state.sync_thread = 0;
- pipe_condvar_destroy(vrend_state.fence_cond);
- pipe_mutex_destroy(vrend_state.fence_mutex);
+ cnd_destroy(&vrend_state.fence_cond);
+ mtx_destroy(&vrend_state.fence_mutex);
+ cnd_destroy(&vrend_state.poll_cond);
+ mtx_destroy(&vrend_state.poll_mutex);
}
static void free_fence_locked(struct vrend_fence *fence)
@@ -6071,7 +6788,7 @@ static void vrend_free_fences_for_context(struct vrend_context *ctx)
struct vrend_fence *fence, *stor;
if (vrend_state.sync_thread) {
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
if (fence->ctx == ctx)
free_fence_locked(fence);
@@ -6084,7 +6801,7 @@ static void vrend_free_fences_for_context(struct vrend_context *ctx)
/* mark the fence invalid as the sync thread is still waiting on it */
vrend_state.fence_waiting->ctx = NULL;
}
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ mtx_unlock(&vrend_state.fence_mutex);
} else {
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
if (fence->ctx == ctx)
@@ -6095,18 +6812,13 @@ static void vrend_free_fences_for_context(struct vrend_context *ctx)
static bool do_wait(struct vrend_fence *fence, bool can_block)
{
- bool done = false;
- int timeout = can_block ? 1000000000 : 0;
-
#ifdef HAVE_EPOXY_EGL_H
- if (vrend_state.use_egl_fence) {
- do {
- done = virgl_egl_client_wait_fence(egl, fence->eglsyncobj, timeout);
- } while (!done && can_block);
- return done;
- }
+ if (vrend_state.use_egl_fence)
+ return virgl_egl_client_wait_fence(egl, fence->eglsyncobj, can_block);
#endif
+ bool done = false;
+ int timeout = can_block ? 1000000000 : 0;
do {
GLenum glret = glClientWaitSync(fence->glsyncobj, 0, timeout);
if (glret == GL_WAIT_FAILED) {
@@ -6118,34 +6830,83 @@ static bool do_wait(struct vrend_fence *fence, bool can_block)
return done;
}
-static void vrend_renderer_check_queries_locked(void);
+static void vrend_renderer_check_queries(void);
+
+void vrend_renderer_poll(void) {
+ if (vrend_state.use_async_fence_cb) {
+ flush_eventfd(vrend_state.eventfd);
+ mtx_lock(&vrend_state.poll_mutex);
+
+ /* queries must be checked before fences are retired. */
+ vrend_renderer_check_queries();
+
+ /* wake up the sync thread to keep doing work */
+ vrend_state.polling = false;
+ cnd_signal(&vrend_state.poll_cond);
+ mtx_unlock(&vrend_state.poll_mutex);
+ } else {
+ vrend_renderer_check_fences();
+ }
+}
static void wait_sync(struct vrend_fence *fence)
{
struct vrend_context *ctx = fence->ctx;
+ bool signal_poll = atomic_load(&vrend_state.has_waiting_queries);
do_wait(fence, /* can_block */ true);
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
if (vrend_state.use_async_fence_cb) {
- vrend_renderer_check_queries_locked();
/* to be able to call free_fence_locked without locking */
list_inithead(&fence->fences);
} else {
list_addtail(&fence->fences, &vrend_state.fence_list);
}
vrend_state.fence_waiting = NULL;
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ mtx_unlock(&vrend_state.fence_mutex);
- if (vrend_state.use_async_fence_cb) {
- ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data);
- free_fence_locked(fence);
+ if (!vrend_state.use_async_fence_cb) {
+ if (write_eventfd(vrend_state.eventfd, 1))
+ perror("failed to write to eventfd\n");
return;
}
- if (write_eventfd(vrend_state.eventfd, 1)) {
- perror("failed to write to eventfd\n");
+ /* If the current GL fence completed while one or more query was pending,
+ * check queries on the main thread before notifying the caller about fence
+ * completion.
+ * TODO: store seqno of first query in waiting_query_list and compare to
+ * current fence to avoid polling when it (and all later queries) are after
+ * the current fence. */
+ if (signal_poll) {
+ mtx_lock(&vrend_state.poll_mutex);
+ if (write_eventfd(vrend_state.eventfd, 1))
+ perror("failed to write to eventfd\n");
+
+ struct timespec ts;
+ int ret;
+ vrend_state.polling = true;
+ do {
+ ret = timespec_get(&ts, TIME_UTC);
+ assert(ret);
+ ts.tv_sec += 5;
+ ret = cnd_timedwait(&vrend_state.poll_cond, &vrend_state.poll_mutex, &ts);
+ if (ret)
+ vrend_printf("timeout (5s) waiting for renderer poll() to finish.");
+ } while (vrend_state.polling && ret);
+ }
+
+ /* vrend_free_fences_for_context might have marked the fence invalid
+ * by setting fence->ctx to NULL
+ */
+ if (ctx) {
+ ctx->fence_retire(fence->fence_id, ctx->fence_retire_data);
}
+
+ free_fence_locked(fence);
+
+ if (signal_poll)
+ mtx_unlock(&vrend_state.poll_mutex);
}
static int thread_sync(UNUSED void *arg)
@@ -6153,14 +6914,14 @@ static int thread_sync(UNUSED void *arg)
virgl_gl_context gl_context = vrend_state.sync_context;
struct vrend_fence *fence, *stor;
- pipe_thread_setname("vrend-sync");
+ u_thread_setname("vrend-sync");
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
vrend_clicbs->make_current(gl_context);
while (!vrend_state.stop_sync_thread) {
if (LIST_IS_EMPTY(&vrend_state.fence_wait_list) &&
- pipe_condvar_wait(vrend_state.fence_cond, vrend_state.fence_mutex) != 0) {
+ cnd_wait(&vrend_state.fence_cond, &vrend_state.fence_mutex) != 0) {
vrend_printf( "error while waiting on condition\n");
break;
}
@@ -6170,15 +6931,15 @@ static int thread_sync(UNUSED void *arg)
break;
list_del(&fence->fences);
vrend_state.fence_waiting = fence;
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ mtx_unlock(&vrend_state.fence_mutex);
wait_sync(fence);
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
}
}
vrend_clicbs->make_current(0);
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ mtx_unlock(&vrend_state.fence_mutex);
return 0;
}
@@ -6198,27 +6959,28 @@ static void vrend_renderer_use_threaded_sync(void)
return;
}
- if (!vrend_state.use_async_fence_cb) {
- vrend_state.eventfd = create_eventfd(0);
- if (vrend_state.eventfd == -1) {
- vrend_printf( "Failed to create eventfd\n");
- vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
- return;
- }
+ vrend_state.eventfd = create_eventfd(0);
+ if (vrend_state.eventfd == -1) {
+ vrend_printf( "Failed to create eventfd\n");
+ vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
+ return;
}
- pipe_condvar_init(vrend_state.fence_cond);
- pipe_mutex_init(vrend_state.fence_mutex);
+ cnd_init(&vrend_state.fence_cond);
+ mtx_init(&vrend_state.fence_mutex, mtx_plain);
+ cnd_init(&vrend_state.poll_cond);
+ mtx_init(&vrend_state.poll_mutex, mtx_plain);
+ vrend_state.polling = false;
- vrend_state.sync_thread = pipe_thread_create(thread_sync, NULL);
+ vrend_state.sync_thread = u_thread_create(thread_sync, NULL);
if (!vrend_state.sync_thread) {
- if (vrend_state.eventfd != -1) {
- close(vrend_state.eventfd);
- vrend_state.eventfd = -1;
- }
+ close(vrend_state.eventfd);
+ vrend_state.eventfd = -1;
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
- pipe_condvar_destroy(vrend_state.fence_cond);
- pipe_mutex_destroy(vrend_state.fence_mutex);
+ cnd_destroy(&vrend_state.fence_cond);
+ mtx_destroy(&vrend_state.fence_mutex);
+ cnd_destroy(&vrend_state.poll_cond);
+ mtx_destroy(&vrend_state.poll_mutex);
}
}
@@ -6290,6 +7052,38 @@ static enum virgl_resource_fd_type vrend_pipe_resource_export_fd(UNUSED struct p
return VIRGL_RESOURCE_FD_INVALID;
}
+static uint64_t vrend_pipe_resource_get_size(struct pipe_resource *pres,
+ UNUSED void *data)
+{
+ struct vrend_resource *res = (struct vrend_resource *)pres;
+
+ return res->size;
+}
+
+bool vrend_check_no_error(struct vrend_context *ctx)
+{
+ GLenum err;
+
+ err = glGetError();
+ if (err == GL_NO_ERROR)
+ return true;
+
+ while (err != GL_NO_ERROR) {
+#ifdef CHECK_GL_ERRORS
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_UNKNOWN, err);
+#else
+ vrend_printf("GL error reported (%d) for context %d\n", err, ctx->ctx_id);
+#endif
+ err = glGetError();
+ }
+
+#ifdef CHECK_GL_ERRORS
+ return false;
+#else
+ return true;
+#endif
+}
+
const struct virgl_resource_pipe_callbacks *
vrend_renderer_get_pipe_callbacks(void)
{
@@ -6298,12 +7092,13 @@ vrend_renderer_get_pipe_callbacks(void)
.attach_iov = vrend_pipe_resource_attach_iov,
.detach_iov = vrend_pipe_resource_detach_iov,
.export_fd = vrend_pipe_resource_export_fd,
+ .get_size = vrend_pipe_resource_get_size,
};
return &callbacks;
}
-static bool use_integer() {
+static bool use_integer(void) {
if (getenv("VIRGL_USE_INTEGER"))
return true;
@@ -6329,9 +7124,9 @@ int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags)
vrend_state.max_texture_3d_size =
vrend_state.max_texture_cube_size = 16384;
-#ifndef NDEBUG
- vrend_init_debug_flags();
-#endif
+ if (VREND_DEBUG_ENABLED) {
+ vrend_init_debug_flags();
+ }
ctx_params.shared = false;
for (uint32_t i = 0; i < ARRAY_SIZE(gl_versions); i++) {
@@ -6364,10 +7159,10 @@ int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags)
vrend_printf( "gl_version %d - es profile enabled\n", gl_ver);
vrend_state.use_gles = true;
/* for now, makes the rest of the code use the most GLES 3.x like path */
- vrend_state.use_core_profile = 1;
+ vrend_state.use_core_profile = true;
} else if (gl_ver > 30 && !epoxy_has_gl_extension("GL_ARB_compatibility")) {
vrend_printf( "gl_version %d - core profile enabled\n", gl_ver);
- vrend_state.use_core_profile = 1;
+ vrend_state.use_core_profile = true;
} else {
vrend_printf( "gl_version %d - compat profile\n", gl_ver);
}
@@ -6417,15 +7212,22 @@ int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags)
vrend_check_texture_storage(tex_conv_table);
+ if (has_feature(feat_multisample)) {
+ vrend_check_texture_multisample(tex_conv_table,
+ has_feature(feat_storage_multisample));
+ }
+
/* disable for format testing */
if (has_feature(feat_debug_cb)) {
- glDisable(GL_DEBUG_OUTPUT);
+ glEnable(GL_DEBUG_OUTPUT);
}
vrend_clicbs->destroy_gl_context(gl_context);
list_inithead(&vrend_state.fence_list);
list_inithead(&vrend_state.fence_wait_list);
list_inithead(&vrend_state.waiting_query_list);
+ atomic_store(&vrend_state.has_waiting_queries, false);
+
/* create 0 context */
vrend_state.ctx0 = vrend_create_context(0, strlen("HOST"), "HOST");
@@ -6443,6 +7245,20 @@ int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags)
vrend_state.use_egl_fence = virgl_egl_supports_fences(egl);
#endif
+ if (!vrend_check_no_error(vrend_state.ctx0) || !has_feature(feat_ubo)) {
+ vrend_renderer_fini();
+ return EINVAL;
+ }
+
+#ifdef ENABLE_VIDEO
+ if (flags & VREND_USE_VIDEO) {
+ if (vrend_clicbs->get_drm_fd)
+ vrend_video_init(vrend_clicbs->get_drm_fd());
+ else
+ vrend_printf("video disabled due to missing get_drm_fd\n");
+ }
+#endif
+
return 0;
}
@@ -6459,6 +7275,10 @@ vrend_renderer_fini(void)
vrend_free_fences();
vrend_blitter_fini();
+#ifdef ENABLE_VIDEO
+ vrend_video_fini();
+#endif
+
vrend_destroy_context(vrend_state.ctx0);
vrend_state.current_ctx = NULL;
@@ -6469,7 +7289,6 @@ vrend_renderer_fini(void)
static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
{
- int i, j;
struct vrend_streamout_object *obj, *tmp;
vrend_clicbs->make_current(sub->gl_context);
@@ -6484,7 +7303,7 @@ static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
if (!has_feature(feat_gles31_vertex_attrib_binding)) {
while (sub->enabled_attribs_bitmask) {
- i = u_bit_scan(&sub->enabled_attribs_bitmask);
+ uint32_t i = u_bit_scan(&sub->enabled_attribs_bitmask);
glDisableVertexAttribArray(i);
}
@@ -6511,19 +7330,19 @@ static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
sub->prog->ref_context = NULL;
vrend_free_programs(sub);
- for (i = 0; i < PIPE_SHADER_TYPES; i++) {
- free(sub->consts[i].consts);
- sub->consts[i].consts = NULL;
+ for (enum pipe_shader_type type = 0; type < PIPE_SHADER_TYPES; type++) {
+ free(sub->consts[type].consts);
+ sub->consts[type].consts = NULL;
- for (j = 0; j < PIPE_MAX_SHADER_SAMPLER_VIEWS; j++) {
- vrend_sampler_view_reference(&sub->views[i].views[j], NULL);
+ for (unsigned i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) {
+ vrend_sampler_view_reference(&sub->views[type].views[i], NULL);
}
}
if (sub->zsurf)
vrend_surface_reference(&sub->zsurf, NULL);
- for (i = 0; i < sub->nr_cbufs; i++) {
+ for (int i = 0; i < sub->nr_cbufs; i++) {
if (!sub->surf[i])
continue;
vrend_surface_reference(&sub->surf[i], NULL);
@@ -6532,12 +7351,7 @@ static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
vrend_set_num_vbo_sub(sub, 0);
vrend_resource_reference((struct vrend_resource **)&sub->ib.buffer, NULL);
- /* need to lock mutex before destroying queries, we could
- * be checking these in the sync thread */
- lock_sync();
vrend_object_fini_ctx_table(sub->object_hash);
- unlock_sync();
-
vrend_clicbs->destroy_gl_context(sub->gl_context);
list_del(&sub->head);
@@ -6556,11 +7370,6 @@ void vrend_destroy_context(struct vrend_context *ctx)
vrend_state.current_hw_ctx = NULL;
}
- if (vrend_state.use_core_profile) {
- if (ctx->pstip_inited)
- glDeleteTextures(1, &ctx->pstipple_tex_id);
- ctx->pstip_inited = false;
- }
vrend_clicbs->make_current(ctx->sub->gl_context);
/* reset references on framebuffers */
vrend_set_framebuffer_state(ctx, 0, NULL, 0);
@@ -6583,6 +7392,10 @@ void vrend_destroy_context(struct vrend_context *ctx)
vrend_free_fences_for_context(ctx);
+#ifdef ENABLE_VIDEO
+ vrend_video_destroy_context(ctx->video);
+#endif
+
LIST_FOR_EACH_ENTRY_SAFE(untyped_res, untyped_res_tmp, &ctx->untyped_resources, head)
free(untyped_res);
vrend_ctx_resource_fini_table(ctx->res_hash);
@@ -6613,11 +7426,15 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
list_inithead(&grctx->sub_ctxs);
list_inithead(&grctx->vrend_resources);
- list_inithead(&grctx->active_nontimer_query_list);
+
+#ifdef ENABLE_VIDEO
+ grctx->video = vrend_video_create_context(grctx);
+#endif
grctx->res_hash = vrend_ctx_resource_init_table();
list_inithead(&grctx->untyped_resources);
+ grctx->shader_cfg.max_shader_patch_varyings = vrend_state.max_shader_patch_varyings;
grctx->shader_cfg.use_gles = vrend_state.use_gles;
grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
grctx->shader_cfg.use_explicit_locations = vrend_state.use_explicit_locations;
@@ -6629,6 +7446,9 @@ struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *de
grctx->shader_cfg.use_integer = vrend_state.use_integer;
grctx->shader_cfg.has_dual_src_blend = has_feature(feat_dual_src_blend);
grctx->shader_cfg.has_fbfetch_coherent = has_feature(feat_framebuffer_fetch);
+ grctx->shader_cfg.has_cull_distance = has_feature(feat_cull_distance);
+ grctx->shader_cfg.has_nopersective = has_feature(feat_shader_noperspective_interpolation);
+ grctx->shader_cfg.has_texture_shadow_lod = has_feature(feat_texture_shadow_lod);
vrend_renderer_create_sub_ctx(grctx, 0);
vrend_renderer_set_sub_ctx(grctx, 0);
@@ -6662,8 +7482,9 @@ static int check_resource_valid(const struct vrend_renderer_resource_create_args
/* only texture 2d and 2d array can have multiple samples */
if (args->nr_samples > 0) {
- if (!has_feature(feat_texture_multisample)) {
- snprintf(errmsg, 256, "Multisample textures not supported");
+ if (!vrend_format_can_multisample(args->format)) {
+ snprintf(errmsg, 256, "Unsupported multisample texture format %s",
+ util_format_name(args->format));
return -1;
}
@@ -6676,10 +7497,6 @@ static int check_resource_valid(const struct vrend_renderer_resource_create_args
snprintf(errmsg, 256, "Multisample textures don't support mipmaps");
return -1;
}
- if (!format_can_texture_storage && vrend_state.use_gles) {
- snprintf(errmsg, 256, "Unsupported multisample texture format %d", args->format);
- return -1;
- }
}
if (args->last_level > 0) {
@@ -6958,7 +7775,7 @@ vrend_resource_alloc_buffer(struct vrend_resource *gr, uint32_t flags)
if (bind == VIRGL_BIND_CUSTOM) {
/* use iovec directly when attached */
gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY;
- gr->ptr = malloc(size);
+ gr->ptr = calloc(1, size);
if (!gr->ptr)
return -ENOMEM;
} else if (bind == VIRGL_BIND_STAGING) {
@@ -7065,6 +7882,10 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
else
gr->map_info = VIRGL_RENDERER_MAP_CACHE_WC;
+ int num_planes = gbm_bo_get_plane_count(bo);
+ for (int plane = 0; plane < num_planes; plane++)
+ gr->size += gbm_bo_get_plane_size(bo, plane);
+
if (!virgl_gbm_gpu_import_required(gr->base.bind))
return;
@@ -7108,11 +7929,12 @@ static int vrend_resource_alloc_texture(struct vrend_resource *gr,
gr->target = tgsitargettogltarget(pr->target, pr->nr_samples);
gr->storage_bits |= VREND_STORAGE_GL_TEXTURE;
- /* ugly workaround for texture rectangle missing on GLES */
- if (vrend_state.use_gles && gr->target == GL_TEXTURE_RECTANGLE_NV) {
+ /* ugly workaround for texture rectangle incompatibility */
+ if (gr->target == GL_TEXTURE_RECTANGLE_NV &&
+ !(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TARGET_RECTANGLE)) {
/* for some guests this is the only usage of rect */
if (pr->width0 != 1 || pr->height0 != 1) {
- report_gles_warn(NULL, GLES_WARN_TEXTURE_RECT);
+ vrend_printf("Warning: specifying format incompatible with GL_TEXTURE_RECTANGLE_NV\n");
}
gr->target = GL_TEXTURE_2D;
}
@@ -7136,14 +7958,22 @@ static int vrend_resource_alloc_texture(struct vrend_resource *gr,
if (has_bit(gr->storage_bits, VREND_STORAGE_GL_IMMUTABLE) &&
has_feature(feat_egl_image_storage)) {
glEGLImageTargetTexStorageEXT(gr->target, (GLeglImageOES) image_oes, NULL);
- } else if (has_feature(feat_egl_image_external)) {
+ } else if (has_feature(feat_egl_image)) {
gr->storage_bits &= ~VREND_STORAGE_GL_IMMUTABLE;
+ assert(gr->target == GL_TEXTURE_2D);
glEGLImageTargetTexture2DOES(gr->target, (GLeglImageOES) image_oes);
+ if ((format == VIRGL_FORMAT_NV12 ||
+ format == VIRGL_FORMAT_NV21 ||
+ format == VIRGL_FORMAT_YV12 ||
+ format == VIRGL_FORMAT_P010) && glGetError() != GL_NO_ERROR) {
+ vrend_printf("glEGLImageTargetTexture2DOES maybe fail\n");
+ }
} else {
- vrend_printf( "missing GL_OES_EGL_image_external extensions\n");
+ vrend_printf( "missing GL_OES_EGL_image extensions\n");
glBindTexture(gr->target, 0);
return EINVAL;
}
+ gr->storage_bits |= VREND_STORAGE_EGL_IMAGE;
} else {
internalformat = tex_conv_table[format].internalformat;
glformat = tex_conv_table[format].glformat;
@@ -7533,9 +8363,9 @@ static uint64_t vrend_transfer_size(struct vrend_resource *vres,
*/
int w = box->width > 0 ? box->width : 1;
int h = box->height > 0 ? box->height : 1;
- int d = box->depth > 0 ? box->depth : 1;
- int nblocksx = util_format_get_nblocksx(pres->format, w);
- int nblocksy = util_format_get_nblocksy(pres->format, h);
+ uint64_t d = box->depth > 0 ? box->depth : 1;
+ uint64_t nblocksx = util_format_get_nblocksx(pres->format, w);
+ uint64_t nblocksy = util_format_get_nblocksy(pres->format, h);
/* Calculate the box size, not including the last layer. The last layer
* is the only one which may be incomplete, and is the only layer for
@@ -7609,28 +8439,6 @@ static bool check_iov_bounds(struct vrend_resource *res,
return true;
}
-static void get_current_texture(GLenum target, GLint* tex) {
- switch (target) {
-#define GET_TEXTURE(a) \
- case GL_TEXTURE_ ## a: \
- glGetIntegerv(GL_TEXTURE_BINDING_ ## a, tex); return
- GET_TEXTURE(1D);
- GET_TEXTURE(2D);
- GET_TEXTURE(3D);
- GET_TEXTURE(1D_ARRAY);
- GET_TEXTURE(2D_ARRAY);
- GET_TEXTURE(RECTANGLE);
- GET_TEXTURE(CUBE_MAP);
- GET_TEXTURE(CUBE_MAP_ARRAY);
- GET_TEXTURE(BUFFER);
- GET_TEXTURE(2D_MULTISAMPLE);
- GET_TEXTURE(2D_MULTISAMPLE_ARRAY);
-#undef GET_TEXTURE
- default:
- vrend_printf("Unknown texture target %x\n", target);
- }
-}
-
static void vrend_swizzle_data_bgra(uint64_t size, void *data) {
const size_t bpp = 4;
const size_t num_pixels = size / bpp;
@@ -7695,10 +8503,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
uint32_t stride = info->stride;
uint32_t layer_stride = info->layer_stride;
- if (ctx)
- vrend_use_program(ctx->sub, 0);
- else
- glUseProgram(0);
+ vrend_use_program(ctx->sub, 0);
if (!stride)
stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level)) * elsize;
@@ -7712,9 +8517,8 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
need_temp = true;
}
- if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) &&
- !vrend_resource_is_emulated_bgra(res))
- need_temp = true;
+ if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format))
+ need_temp = true;
if (vrend_state.use_core_profile == true &&
(res->y_0_top || (res->base.format == VIRGL_FORMAT_Z24X8_UNORM))) {
@@ -7727,8 +8531,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
info->box->height) * elsize;
if (res->target == GL_TEXTURE_3D ||
res->target == GL_TEXTURE_2D_ARRAY ||
+ res->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY ||
res->target == GL_TEXTURE_CUBE_MAP_ARRAY)
send_size *= info->box->depth;
+ else if (need_temp && info->box->depth != 1)
+ return EINVAL;
if (need_temp) {
data = malloc(send_size);
@@ -7781,15 +8588,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
buffers = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, &buffers);
glDisable(GL_BLEND);
- if (ctx) {
- vrend_depth_test_enable(ctx, false);
- vrend_alpha_test_enable(ctx, false);
- vrend_stencil_test_enable(ctx->sub, false);
- } else {
- glDisable(GL_DEPTH_TEST);
- glDisable(GL_ALPHA_TEST);
- glDisable(GL_STENCIL_TEST);
- }
+
+ vrend_depth_test_enable(ctx, false);
+ vrend_alpha_test_enable(ctx, false);
+ vrend_stencil_test_enable(ctx->sub, false);
+
glPixelZoom(1.0f, res->y_0_top ? -1.0f : 1.0f);
glWindowPos2i(info->box->x, res->y_0_top ? (int)res->base.height0 - info->box->y : info->box->y);
glDrawPixels(info->box->width, info->box->height, glformat, gltype,
@@ -7797,8 +8600,6 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
glDeleteFramebuffers(1, &fb_id);
} else {
uint32_t comp_size;
- GLint old_tex = 0;
- get_current_texture(res->target, &old_tex);
glBindTexture(res->target, res->id);
if (compressed) {
@@ -7817,8 +8618,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
/* GLES doesn't allow format conversions, which we need for BGRA resources with RGBA
* internal format. So we fallback to performing a CPU swizzle before uploading. */
- if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) &&
- !vrend_resource_is_emulated_bgra(res)) {
+ if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format)) {
VREND_DEBUG(dbg_bgra, ctx, "manually swizzling bgra->rgba on upload since gles+bgra\n");
vrend_swizzle_data_bgra(send_size, data);
}
@@ -7893,7 +8693,6 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, 1.0);
}
- glBindTexture(res->target, old_tex);
}
if (stride && !need_temp) {
@@ -7965,8 +8764,6 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
break;
}
- GLint old_tex = 0;
- get_current_texture(res->target, &old_tex);
glBindTexture(res->target, res->id);
if (res->target == GL_TEXTURE_CUBE_MAP) {
target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z;
@@ -7997,7 +8794,6 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
info->stride, info->box, info->level, info->offset,
false);
free(data);
- glBindTexture(res->target, old_tex);
return 0;
}
@@ -8026,7 +8822,7 @@ static void do_readpixels(struct vrend_resource *res,
But we have found that at least Mesa returned the wrong formats, again
luckily we are able to change Mesa. But just in case there are more bad
drivers out there, or we mess up the format somewhere, we warn here. */
- if (vrend_state.use_gles) {
+ if (vrend_state.use_gles && !vrend_format_is_ds(res->base.format)) {
GLint imp;
if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_INT &&
type != GL_INT && type != GL_FLOAT) {
@@ -8043,6 +8839,11 @@ static void do_readpixels(struct vrend_resource *res,
}
}
+ /* read-color clamping is handled in the mesa frontend */
+ if (!vrend_state.use_gles) {
+ glClampColor(GL_CLAMP_READ_COLOR_ARB, GL_FALSE);
+ }
+
if (has_feature(feat_arb_robustness))
glReadnPixelsARB(x, y, width, height, format, type, bufSize, data);
else if (has_feature(feat_gles_khr_robustness))
@@ -8071,10 +8872,7 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
int row_stride = info->stride / elsize;
GLint old_fbo;
- if (ctx)
- vrend_use_program(ctx->sub, 0);
- else
- glUseProgram(0);
+ vrend_use_program(ctx->sub, 0);
enum virgl_formats fmt = res->base.format;
@@ -8087,16 +8885,15 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
if (actually_invert && !has_feature(feat_mesa_invert))
separate_invert = true;
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
glPixelStorei(GL_PACK_SWAP_BYTES, 1);
#endif
if (num_iovs > 1 || separate_invert)
need_temp = 1;
- if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) &&
- !vrend_resource_is_emulated_bgra(res))
- need_temp = true;
+ if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format))
+ need_temp = true;
if (need_temp) {
send_size = util_format_get_nblocks(res->base.format, info->box->width, info->box->height) * info->box->depth * util_format_get_blocksize(res->base.format);
@@ -8159,8 +8956,7 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
* on upload and need to do the same on readback.
* The notable exception is externally-stored (GBM/EGL) BGR* resources, for which BGR*
* byte-ordering is used instead to match external access patterns. */
- if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) &&
- !vrend_resource_is_emulated_bgra(res)) {
+ if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format)) {
VREND_DEBUG(dbg_bgra, ctx, "manually swizzling rgba->bgra on readback since gles+bgra\n");
vrend_swizzle_data_bgra(send_size, data);
}
@@ -8177,7 +8973,7 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
glPixelStorei(GL_PACK_ALIGNMENT, 4);
-#ifdef PIPE_ARCH_BIG_ENDIAN
+#if UTIL_ARCH_BIG_ENDIAN
glPixelStorei(GL_PACK_SWAP_BYTES, 0);
#endif
@@ -8288,7 +9084,8 @@ static int vrend_renderer_transfer_internal(struct vrend_context *ctx,
if (!info->box)
return EINVAL;
- vrend_hw_switch_context(ctx, true);
+ if (!vrend_hw_switch_context(ctx, true))
+ return EINVAL;
assert(check_transfer_iovec(res, info));
if (info->iovec && info->iovec_cnt) {
@@ -8337,11 +9134,20 @@ int vrend_renderer_transfer_iov(struct vrend_context *ctx,
struct vrend_resource *res;
res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
- if (!res || !check_transfer_iovec(res, info)) {
+ if (!res) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
return EINVAL;
}
+ if (!check_transfer_iovec(res, info)) {
+ if (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE))
+ return 0;
+ else {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
+ return EINVAL;
+ }
+ }
+
return vrend_renderer_transfer_internal(ctx, res, info,
transfer_mode);
}
@@ -8467,6 +9273,83 @@ int vrend_renderer_copy_transfer3d(struct vrend_context *ctx,
src_res->num_iovs, info);
}
+int vrend_renderer_copy_transfer3d_from_host(struct vrend_context *ctx,
+ uint32_t dst_handle,
+ uint32_t src_handle,
+ const struct vrend_transfer_info *info)
+{
+ struct vrend_resource *src_res, *dst_res;
+
+ src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
+ dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
+
+ if (!src_res) {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
+ return EINVAL;
+ }
+
+ if (!dst_res) {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
+ return EINVAL;
+ }
+
+ if (!dst_res->iov) {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
+ return EINVAL;
+ }
+
+ if (!check_transfer_bounds(src_res, info)) {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle);
+ return EINVAL;
+ }
+
+ if (!check_iov_bounds(src_res, info, dst_res->iov, dst_res->num_iovs)) {
+ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle);
+ return EINVAL;
+ }
+
+#ifdef ENABLE_MINIGBM_ALLOCATION
+ if (src_res->gbm_bo) {
+ bool use_gbm = true;
+
+ /* The guest uses copy transfers against busy resources to avoid
+ * waiting. The host GL driver is usually smart enough to avoid
+ * blocking by putting the data in a staging buffer and doing a
+ * pipelined copy. But when there is a GBM bo, we can only do
+ * that if the format is renderable, because we use glReadPixels,
+ * or on OpenGL glGetTexImage.
+ * Otherwise, if the format has a gbm bo we glFinish and use GBM.
+ * Also, EGL images with BGRX format are not compatible with this
+ * transfer type since they are stored with only 3bpp, so gbm transfer
+ * is required.
+ * For now the guest can knows than a texture is backed by a gbm buffer
+ * if it was created with the VIRGL_BIND_SCANOUT flag,
+ */
+ if (info->synchronized) {
+ bool can_readpixels = vrend_format_can_render(src_res->base.format) ||
+ vrend_format_is_ds(src_res->base.format);
+
+ if ((can_readpixels || !vrend_state.use_gles) &&
+ src_res->base.format != VIRGL_FORMAT_B8G8R8X8_UNORM)
+ use_gbm = false;
+ else
+ glFinish();
+ }
+
+ if (use_gbm) {
+ return virgl_gbm_transfer(src_res->gbm_bo,
+ VIRGL_TRANSFER_FROM_HOST,
+ dst_res->iov,
+ dst_res->num_iovs,
+ info);
+ }
+ }
+#endif
+
+ return vrend_renderer_transfer_send_iov(ctx, src_res, dst_res->iov,
+ dst_res->num_iovs, info);
+}
+
void vrend_set_stencil_ref(struct vrend_context *ctx,
struct pipe_stencil_ref *ref)
{
@@ -8491,59 +9374,41 @@ void vrend_set_scissor_state(struct vrend_context *ctx,
uint32_t num_scissor,
struct pipe_scissor_state *ss)
{
- uint i, idx;
-
- if (start_slot > PIPE_MAX_VIEWPORTS ||
- num_scissor > (PIPE_MAX_VIEWPORTS - start_slot)) {
+ if (start_slot < PIPE_MAX_VIEWPORTS &&
+ start_slot + num_scissor <= PIPE_MAX_VIEWPORTS) {
+ for (uint i = 0; i < num_scissor; i++) {
+ uint idx = start_slot + i;
+ ctx->sub->ss[idx] = ss[i];
+ ctx->sub->scissor_state_dirty |= (1 << idx);
+ }
+ } else
vrend_report_buffer_error(ctx, 0);
- return;
- }
-
- for (i = 0; i < num_scissor; i++) {
- idx = start_slot + i;
- ctx->sub->ss[idx] = ss[i];
- ctx->sub->scissor_state_dirty |= (1 << idx);
- }
}
void vrend_set_polygon_stipple(struct vrend_context *ctx,
struct pipe_poly_stipple *ps)
{
if (vrend_state.use_core_profile) {
- static const unsigned bit31 = 1u << 31;
- GLubyte *stip = calloc(1, 1024);
- int i, j;
-
- if (!ctx->pstip_inited)
- vrend_init_pstipple_texture(ctx);
-
- if (!stip)
- return;
- for (i = 0; i < 32; i++) {
- for (j = 0; j < 32; j++) {
- if (ps->stipple[i] & (bit31 >> j))
- stip[i * 32 + j] = 0;
- else
- stip[i * 32 + j] = 255;
- }
- }
-
- glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 32, 32,
- GL_RED, GL_UNSIGNED_BYTE, stip);
- glBindTexture(GL_TEXTURE_2D, 0);
-
- free(stip);
- return;
+ /* std140 aligns array elements at 16 byte */
+ for (int i = 0; i < VREND_POLYGON_STIPPLE_SIZE ; ++i)
+ ctx->sub->sysvalue_data.stipple_pattern[i][0] = ps->stipple[i];
+ ctx->sub->sysvalue_data_cookie++;
+ } else {
+ glPolygonStipple((const GLubyte *)ps->stipple);
}
- glPolygonStipple((const GLubyte *)ps->stipple);
}
void vrend_set_clip_state(struct vrend_context *ctx, struct pipe_clip_state *ucp)
{
if (vrend_state.use_core_profile) {
ctx->sub->ucp_state = *ucp;
+
+ ctx->sub->sysvalue_data_cookie++;
+ for (int i = 0 ; i < VIRGL_NUM_CLIP_PLANES; i++) {
+ memcpy(&ctx->sub->sysvalue_data.clipp[i],
+ (const GLfloat *) &ctx->sub->ucp_state.ucp[i], sizeof(GLfloat) * 4);
+ }
} else {
int i, j;
GLdouble val[4];
@@ -8752,7 +9617,7 @@ static void vrend_resource_copy_fallback(struct vrend_resource *src_res,
* On the contrary, externally-stored BGR* resources are assumed to remain in BGR* format at
* all times.
*/
- if (vrend_format_is_bgra(dst_res->base.format) && !vrend_resource_is_emulated_bgra(dst_res))
+ if (vrend_state.use_gles && vrend_format_is_bgra(dst_res->base.format))
vrend_swizzle_data_bgra(total_size, tptr);
} else {
uint32_t read_chunk_size;
@@ -8849,36 +9714,24 @@ static void vrend_resource_copy_fallback(struct vrend_resource *src_res,
glBindTexture(GL_TEXTURE_2D, 0);
}
-static inline
-GLenum translate_gles_emulation_texture_target(GLenum target)
-{
- switch (target) {
- case GL_TEXTURE_1D:
- case GL_TEXTURE_RECTANGLE: return GL_TEXTURE_2D;
- case GL_TEXTURE_1D_ARRAY: return GL_TEXTURE_2D_ARRAY;
- default: return target;
- }
-}
-
static inline void
vrend_copy_sub_image(struct vrend_resource* src_res, struct vrend_resource * dst_res,
uint32_t src_level, const struct pipe_box *src_box,
uint32_t dst_level, uint32_t dstx, uint32_t dsty, uint32_t dstz)
{
-
- GLenum src_target = tgsitargettogltarget(src_res->base.target, src_res->base.nr_samples);
- GLenum dst_target = tgsitargettogltarget(dst_res->base.target, dst_res->base.nr_samples);
-
- if (vrend_state.use_gles) {
- src_target = translate_gles_emulation_texture_target(src_target);
- dst_target = translate_gles_emulation_texture_target(dst_target);
- }
-
- glCopyImageSubData(src_res->id, src_target, src_level,
+ glCopyImageSubData(src_res->id, src_res->target, src_level,
src_box->x, src_box->y, src_box->z,
- dst_res->id, dst_target, dst_level,
+ dst_res->id, dst_res->target, dst_level,
dstx, dsty, dstz,
src_box->width, src_box->height,src_box->depth);
+
+ // temporarily added to disable strict error checking and fix guests that are still using pre 20.x
+ // mesa/virgl drivers that generate an error here during window resizes:
+ // "ERROR: GL_INVALID_VALUE in glCopyImageSubData(srcX or srcWidth exceeds image bounds)"
+ if (has_bit(src_res->storage_bits, VREND_STORAGE_GBM_BUFFER) &&
+ glGetError() != GL_NO_ERROR) {
+ vrend_printf("glCopyImageSubData maybe fail\n");
+ }
}
@@ -9019,6 +9872,8 @@ static GLuint vrend_make_view(struct vrend_resource *res, enum virgl_formats for
if (!has_bit(res->storage_bits, VREND_STORAGE_GL_IMMUTABLE))
return res->id;
+ assert(vrend_resource_supports_view(res, format));
+
VREND_DEBUG(dbg_blit, NULL, "Create texture view from %s as %s\n",
util_format_name(res->base.format),
util_format_name(format));
@@ -9035,32 +9890,101 @@ static GLuint vrend_make_view(struct vrend_resource *res, enum virgl_formats for
return view_id;
}
-static void vrend_renderer_blit_int(struct vrend_context *ctx,
- struct vrend_resource *src_res,
- struct vrend_resource *dst_res,
- const struct pipe_blit_info *info)
+static bool vrend_blit_needs_redblue_swizzle(struct vrend_resource *src_res,
+ struct vrend_resource *dst_res,
+ const struct pipe_blit_info *info)
{
- GLbitfield glmask = 0;
- int src_y1, src_y2, dst_y1, dst_y2;
- GLenum filter;
- int n_layers = 1, i;
- bool use_gl = false;
- bool needs_swizzle = false;
- bool make_intermediate_copy = false;
- GLuint intermediate_fbo = 0;
- struct vrend_resource *intermediate_copy = 0;
+ /* EGL-backed bgr* resources are always stored with BGR* internal format,
+ * despite Virgl's use of the GL_RGBA8 internal format, so special care must
+ * be taken when determining the swizzling. */
+ bool src_needs_swizzle = vrend_resource_needs_redblue_swizzle(src_res, info->src.format);
+ bool dst_needs_swizzle = vrend_resource_needs_redblue_swizzle(dst_res, info->dst.format);
+ return src_needs_swizzle ^ dst_needs_swizzle;
+}
- GLuint blitter_views[2] = {src_res->id, dst_res->id};
+static void vrend_renderer_prepare_blit_extra_info(struct vrend_context *ctx,
+ struct vrend_resource *src_res,
+ struct vrend_resource *dst_res,
+ struct vrend_blit_info *info)
+{
+ info->can_fbo_blit = true;
- filter = convert_mag_filter(info->filter);
+ info->gl_filter = convert_mag_filter(info->b.filter);
+
+ if (!dst_res->y_0_top) {
+ info->dst_y1 = info->b.dst.box.y + info->b.dst.box.height;
+ info->dst_y2 = info->b.dst.box.y;
+ } else {
+ info->dst_y1 = dst_res->base.height0 - info->b.dst.box.y - info->b.dst.box.height;
+ info->dst_y2 = dst_res->base.height0 - info->b.dst.box.y;
+ }
+
+ if (!src_res->y_0_top) {
+ info->src_y1 = info->b.src.box.y + info->b.src.box.height;
+ info->src_y2 = info->b.src.box.y;
+ } else {
+ info->src_y1 = src_res->base.height0 - info->b.src.box.y - info->b.src.box.height;
+ info->src_y2 = src_res->base.height0 - info->b.src.box.y;
+ }
+
+ if (vrend_blit_needs_swizzle(info->b.dst.format, info->b.src.format)) {
+ info->needs_swizzle = true;
+ info->can_fbo_blit = false;
+ }
+
+ if (info->needs_swizzle && vrend_get_format_table_entry(dst_res->base.format)->flags & VIRGL_TEXTURE_NEED_SWIZZLE)
+ memcpy(info->swizzle, tex_conv_table[dst_res->base.format].swizzle, sizeof(info->swizzle));
+
+ if (vrend_blit_needs_redblue_swizzle(src_res, dst_res, &info->b)) {
+ VREND_DEBUG(dbg_blit, ctx, "Applying red/blue swizzle during blit involving an external BGR* resource\n");
+ uint8_t temp = info->swizzle[0];
+ info->swizzle[0] = info->swizzle[2];
+ info->swizzle[2] = temp;
+ info->can_fbo_blit = false;
+ }
+
+ /* for scaled MS blits we either need extensions or hand roll */
+ if (info->b.mask & PIPE_MASK_RGBA &&
+ src_res->base.nr_samples > 0 &&
+ src_res->base.nr_samples != dst_res->base.nr_samples &&
+ (info->b.src.box.width != info->b.dst.box.width ||
+ info->b.src.box.height != info->b.dst.box.height)) {
+ if (has_feature(feat_ms_scaled_blit))
+ info->gl_filter = GL_SCALED_RESOLVE_NICEST_EXT;
+ else
+ info->can_fbo_blit = false;
+ }
+
+ /* need to apply manual gamma correction in the blitter for external
+ * resources that don't support colorspace conversion via views
+ * (EGL-image bgr* textures). */
+ if (vrend_resource_needs_srgb_decode(src_res, info->b.src.format)) {
+ info->needs_manual_srgb_decode = true;
+ info->can_fbo_blit = false;
+ }
+ if (vrend_resource_needs_srgb_encode(dst_res, info->b.dst.format)) {
+ info->needs_manual_srgb_encode = true;
+ info->can_fbo_blit = false;
+ }
+}
+
+/* Prepare the extra blit info and return true if a FBO blit can be used. */
+static bool vrend_renderer_prepare_blit(struct vrend_context *ctx,
+ struct vrend_resource *src_res,
+ struct vrend_resource *dst_res,
+ const struct vrend_blit_info *info)
+{
+ if (!info->can_fbo_blit)
+ return false;
/* if we can't make FBO's use the fallback path */
if (!vrend_format_can_render(src_res->base.format) &&
!vrend_format_is_ds(src_res->base.format))
- use_gl = true;
- if (!vrend_format_can_render(dst_res->base.format) &&
- !vrend_format_is_ds(dst_res->base.format))
- use_gl = true;
+ return false;
+
+ if (!vrend_format_can_render(src_res->base.format) &&
+ !vrend_format_is_ds(src_res->base.format))
+ return false;
/* different depth formats */
if (vrend_format_is_ds(src_res->base.format) &&
@@ -9068,144 +9992,74 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
if (src_res->base.format != dst_res->base.format) {
if (!(src_res->base.format == PIPE_FORMAT_S8_UINT_Z24_UNORM &&
(dst_res->base.format == PIPE_FORMAT_Z24X8_UNORM))) {
- use_gl = true;
+ return false;
}
}
}
/* glBlitFramebuffer - can support depth stencil with NEAREST
which we use for mipmaps */
- if ((info->mask & (PIPE_MASK_Z | PIPE_MASK_S)) && info->filter == PIPE_TEX_FILTER_LINEAR)
- use_gl = true;
-
- /* for scaled MS blits we either need extensions or hand roll */
- if (info->mask & PIPE_MASK_RGBA &&
- src_res->base.nr_samples > 0 &&
- src_res->base.nr_samples != dst_res->base.nr_samples &&
- (info->src.box.width != info->dst.box.width ||
- info->src.box.height != info->dst.box.height)) {
- if (has_feature(feat_ms_scaled_blit))
- filter = GL_SCALED_RESOLVE_NICEST_EXT;
- else
- use_gl = true;
- }
-
- if (!dst_res->y_0_top) {
- dst_y1 = info->dst.box.y + info->dst.box.height;
- dst_y2 = info->dst.box.y;
- } else {
- dst_y1 = dst_res->base.height0 - info->dst.box.y - info->dst.box.height;
- dst_y2 = dst_res->base.height0 - info->dst.box.y;
- }
-
- if (!src_res->y_0_top) {
- src_y1 = info->src.box.y + info->src.box.height;
- src_y2 = info->src.box.y;
- } else {
- src_y1 = src_res->base.height0 - info->src.box.y - info->src.box.height;
- src_y2 = src_res->base.height0 - info->src.box.y;
- }
+ if ((info->b.mask & (PIPE_MASK_Z | PIPE_MASK_S)) && info->gl_filter != GL_NEAREST)
+ return false;
/* since upstream mesa change
* https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5034
* an imported RGBX texture uses GL_RGB8 as internal format while
* in virgl_formats, we use GL_RGBA8 internal format for RGBX texutre.
* on GLES host, glBlitFramebuffer doesn't work in such case. */
- if (vrend_state.use_gles && !use_gl &&
- info->mask & PIPE_MASK_RGBA &&
+ if (vrend_state.use_gles &&
+ info->b.mask & PIPE_MASK_RGBA &&
src_res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM &&
dst_res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM &&
has_bit(src_res->storage_bits, VREND_STORAGE_EGL_IMAGE) !=
has_bit(dst_res->storage_bits, VREND_STORAGE_EGL_IMAGE) &&
(src_res->base.nr_samples || dst_res->base.nr_samples)) {
- use_gl = true;
+ return false;
}
- if (use_gl) {;}
/* GLES generally doesn't support blitting to a multi-sample FB, and also not
* from a multi-sample FB where the regions are not exatly the same or the
* source and target format are different. For
* downsampling DS blits to zero samples we solve this by doing two blits */
- else if (vrend_state.use_gles &&
- ((dst_res->base.nr_samples > 0) ||
- ((info->mask & PIPE_MASK_RGBA) &&
- (src_res->base.nr_samples > 0) &&
- (info->src.box.x != info->dst.box.x ||
- info->src.box.width != info->dst.box.width ||
- dst_y1 != src_y1 || dst_y2 != src_y2 ||
- info->src.format != info->dst.format))
- )
- ) {
+ if (vrend_state.use_gles &&
+ ((dst_res->base.nr_samples > 0) ||
+ ((info->b.mask & PIPE_MASK_RGBA) &&
+ (src_res->base.nr_samples > 0) &&
+ (info->b.src.box.x != info->b.dst.box.x ||
+ info->b.src.box.width != info->b.dst.box.width ||
+ info->dst_y1 != info->src_y1 || info->dst_y2 != info->src_y2 ||
+ info->b.src.format != info->b.dst.format))
+ )) {
VREND_DEBUG(dbg_blit, ctx, "Use GL fallback because dst:ms:%d src:ms:%d (%d %d %d %d) -> (%d %d %d %d)\n",
- dst_res->base.nr_samples, src_res->base.nr_samples, info->src.box.x, info->src.box.x + info->src.box.width,
- src_y1, src_y2, info->dst.box.x, info->dst.box.x + info->dst.box.width, dst_y1, dst_y2);
- use_gl = true;
- }
- /* for 3D mipmapped blits - hand roll time */
- else if (info->src.box.depth != info->dst.box.depth)
- use_gl = true;
- else if (vrend_blit_needs_swizzle(info->dst.format, info->src.format)) {
- use_gl = true;
- needs_swizzle = true;
+ dst_res->base.nr_samples, src_res->base.nr_samples, info->b.src.box.x, info->b.src.box.x + info->b.src.box.width,
+ info->src_y1, info->src_y2, info->b.dst.box.x, info->b.dst.box.x + info->b.dst.box.width, info->dst_y1, info->dst_y2);
+ return false;
}
- if ((src_res->base.format != info->src.format) && has_feature(feat_texture_view))
- blitter_views[0] = vrend_make_view(src_res, info->src.format);
-
- if ((dst_res->base.format != info->dst.format) && has_feature(feat_texture_view))
- blitter_views[1] = vrend_make_view(dst_res, info->dst.format);
-
- /* Virgl's BGR* formats always use GL_RGBA8 internal format so texture views have no format
- * conversion effects. Swizzling during blits is required instead.
- * Also, GBM/EGL-backed (i.e. external) BGR* resources are always stored with BGR* internal
- * format, despite Virgl's use of the GL_RGBA8 internal format, so special care must be taken
- * when determining the swizzling.
- */
- bool needs_redblue_swizzle = false;
- if (vrend_resource_is_emulated_bgra(src_res) ^ vrend_resource_is_emulated_bgra(dst_res))
- needs_redblue_swizzle = !needs_redblue_swizzle;
-
- /* Virgl blits support "views" on source/dest resources, allowing another level of format
- * conversion on top of the host's GL API. These views need to be reconciled manually when
- * any BGR* resources are involved, since they are internally stored with RGB* byte-ordering,
- * and externally stored with BGR* byte-ordering.
- */
- if (vrend_format_is_bgra(src_res->base.format) ^ vrend_format_is_bgra(info->src.format))
- needs_redblue_swizzle = !needs_redblue_swizzle;
- if (vrend_format_is_bgra(dst_res->base.format) ^ vrend_format_is_bgra(info->dst.format))
- needs_redblue_swizzle = !needs_redblue_swizzle;
-
- uint8_t blit_swizzle[4] = {0, 1, 2, 3};
- if (needs_swizzle && vrend_get_format_table_entry(dst_res->base.format)->flags & VIRGL_TEXTURE_NEED_SWIZZLE)
- memcpy(blit_swizzle, tex_conv_table[dst_res->base.format].swizzle, sizeof(blit_swizzle));
-
- if (needs_redblue_swizzle) {
- VREND_DEBUG(dbg_blit, ctx, "Applying red/blue swizzle during blit involving an external BGR* resource\n");
- use_gl = true;
- uint8_t temp = blit_swizzle[0];
- blit_swizzle[0] = blit_swizzle[2];
- blit_swizzle[2] = temp;
- }
+ /* for 3D mipmapped blits - hand roll time */
+ if (info->b.src.box.depth != info->b.dst.box.depth)
+ return false;
- if (use_gl) {
- VREND_DEBUG(dbg_blit, ctx, "BLIT_INT: use GL fallback\n");
- vrend_renderer_blit_gl(ctx, src_res, dst_res, blitter_views, info,
- has_feature(feat_texture_srgb_decode),
- has_feature(feat_srgb_write_control),
- blit_swizzle);
- vrend_sync_make_current(ctx->sub->gl_context);
- goto cleanup;
- }
+ return true;
+}
- if (info->mask & PIPE_MASK_Z)
+static void vrend_renderer_blit_fbo(struct vrend_context *ctx,
+ struct vrend_resource *src_res,
+ struct vrend_resource *dst_res,
+ const struct vrend_blit_info *info)
+{
+ GLbitfield glmask = 0;
+ if (info->b.mask & PIPE_MASK_Z)
glmask |= GL_DEPTH_BUFFER_BIT;
- if (info->mask & PIPE_MASK_S)
+ if (info->b.mask & PIPE_MASK_S)
glmask |= GL_STENCIL_BUFFER_BIT;
- if (info->mask & PIPE_MASK_RGBA)
+ if (info->b.mask & PIPE_MASK_RGBA)
glmask |= GL_COLOR_BUFFER_BIT;
- if (info->scissor_enable) {
- glScissor(info->scissor.minx, info->scissor.miny, info->scissor.maxx - info->scissor.minx, info->scissor.maxy - info->scissor.miny);
+ if (info->b.scissor_enable) {
+ glScissor(info->b.scissor.minx, info->b.scissor.miny,
+ info->b.scissor.maxx - info->b.scissor.minx,
+ info->b.scissor.maxy - info->b.scissor.miny);
ctx->sub->scissor_state_dirty = (1 << 0);
glEnable(GL_SCISSOR_TEST);
} else
@@ -9221,14 +10075,18 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
* limitations on GLES first copy the full frame to a non-multisample
* surface and then copy the according area to the final target surface.
*/
+ bool make_intermediate_copy = false;
+ GLuint intermediate_fbo = 0;
+ struct vrend_resource *intermediate_copy = 0;
+
if (vrend_state.use_gles &&
- (info->mask & PIPE_MASK_ZS) &&
+ (info->b.mask & PIPE_MASK_ZS) &&
((src_res->base.nr_samples > 0) &&
(src_res->base.nr_samples != dst_res->base.nr_samples)) &&
- ((info->src.box.x != info->dst.box.x) ||
- (src_y1 != dst_y1) ||
- (info->src.box.width != info->dst.box.width) ||
- (src_y2 != dst_y2))) {
+ ((info->b.src.box.x != info->b.dst.box.x) ||
+ (info->src_y1 != info->dst_y1) ||
+ (info->b.src.box.width != info->b.dst.box.width) ||
+ (info->src_y2 != info->dst_y2))) {
make_intermediate_copy = true;
@@ -9239,14 +10097,14 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
args.width = src_res->base.width0;
args.height = src_res->base.height0;
args.depth = src_res->base.depth0;
- args.format = info->src.format;
+ args.format = info->b.src.format;
args.target = src_res->base.target;
args.last_level = src_res->base.last_level;
args.array_size = src_res->base.array_size;
intermediate_copy = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
vrend_renderer_resource_copy_args(&args, intermediate_copy);
/* this is PIPE_MASK_ZS and bgra fixup is not needed */
- MAYBE_UNUSED int r = vrend_resource_alloc_texture(intermediate_copy, args.format, NULL);
+ ASSERTED int r = vrend_resource_alloc_texture(intermediate_copy, args.format, NULL);
assert(!r);
glGenFramebuffers(1, &intermediate_fbo);
@@ -9259,47 +10117,47 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
}
glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
- if (info->mask & PIPE_MASK_RGBA)
+ if (info->b.mask & PIPE_MASK_RGBA)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
else
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
- if (info->mask & PIPE_MASK_RGBA)
+ if (info->b.mask & PIPE_MASK_RGBA)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
- else if (info->mask & (PIPE_MASK_Z | PIPE_MASK_S))
+ else if (info->b.mask & (PIPE_MASK_Z | PIPE_MASK_S))
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
- if (info->src.box.depth == info->dst.box.depth)
- n_layers = info->dst.box.depth;
- for (i = 0; i < n_layers; i++) {
+
+ int n_layers = info->b.src.box.depth == info->b.dst.box.depth ? info->b.dst.box.depth : 1;
+ for (int i = 0; i < n_layers; i++) {
glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
- vrend_fb_bind_texture_id(src_res, blitter_views[0], 0, info->src.level, info->src.box.z + i, 0);
+ vrend_fb_bind_texture_id(src_res, info->src_view, 0, info->b.src.level, info->b.src.box.z + i, 0);
if (make_intermediate_copy) {
- int level_width = u_minify(src_res->base.width0, info->src.level);
- int level_height = u_minify(src_res->base.width0, info->src.level);
+ int level_width = u_minify(src_res->base.width0, info->b.src.level);
+ int level_height = u_minify(src_res->base.width0, info->b.src.level);
glBindFramebuffer(GL_FRAMEBUFFER, intermediate_fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
- vrend_fb_bind_texture(intermediate_copy, 0, info->src.level, info->src.box.z + i);
+ vrend_fb_bind_texture(intermediate_copy, 0, info->b.src.level, info->b.src.box.z + i);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, intermediate_fbo);
glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
glBlitFramebuffer(0, 0, level_width, level_height,
0, 0, level_width, level_height,
- glmask, filter);
+ glmask, info->gl_filter);
}
glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
- vrend_fb_bind_texture_id(dst_res, blitter_views[1], 0, info->dst.level, info->dst.box.z + i, 0);
+ vrend_fb_bind_texture_id(dst_res, info->dst_view, 0, info->b.dst.level, info->b.dst.box.z + i, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
if (has_feature(feat_srgb_write_control)) {
- if (util_format_is_srgb(info->dst.format) ||
- util_format_is_srgb(info->src.format))
+ if (util_format_is_srgb(info->b.dst.format) ||
+ util_format_is_srgb(info->b.src.format))
glEnable(GL_FRAMEBUFFER_SRGB);
else
glDisable(GL_FRAMEBUFFER_SRGB);
@@ -9307,15 +10165,15 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
glBindFramebuffer(GL_READ_FRAMEBUFFER, intermediate_fbo);
- glBlitFramebuffer(info->src.box.x,
- src_y1,
- info->src.box.x + info->src.box.width,
- src_y2,
- info->dst.box.x,
- dst_y1,
- info->dst.box.x + info->dst.box.width,
- dst_y2,
- glmask, filter);
+ glBlitFramebuffer(info->b.src.box.x,
+ info->src_y1,
+ info->b.src.box.x + info->b.src.box.width,
+ info->src_y2,
+ info->b.dst.box.x,
+ info->dst_y1,
+ info->b.dst.box.x + info->b.dst.box.width,
+ info->dst_y2,
+ glmask, info->gl_filter);
}
glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
@@ -9349,12 +10207,49 @@ static void vrend_renderer_blit_int(struct vrend_context *ctx,
else
glDisable(GL_SCISSOR_TEST);
-cleanup:
- if (blitter_views[0] != src_res->id)
- glDeleteTextures(1, &blitter_views[0]);
+}
+
+static void vrend_renderer_blit_int(struct vrend_context *ctx,
+ struct vrend_resource *src_res,
+ struct vrend_resource *dst_res,
+ const struct pipe_blit_info *info)
+{
+ struct vrend_blit_info blit_info = {
+ .b = *info,
+ .src_view = src_res->id,
+ .dst_view = dst_res->id,
+ .swizzle = {0, 1, 2, 3}
+ };
+
+ /* We create the texture views in this function instead of doing it in
+ * vrend_renderer_prepare_blit_extra_info because we also delete them here */
+ if ((src_res->base.format != info->src.format) && has_feature(feat_texture_view) &&
+ vrend_resource_supports_view(src_res, info->src.format))
+ blit_info.src_view = vrend_make_view(src_res, info->src.format);
+
+ if ((dst_res->base.format != info->dst.format) && has_feature(feat_texture_view) &&
+ vrend_resource_supports_view(dst_res, info->dst.format))
+ blit_info.dst_view = vrend_make_view(dst_res, info->dst.format);
+
+ vrend_renderer_prepare_blit_extra_info(ctx, src_res, dst_res, &blit_info);
+
+ if (vrend_renderer_prepare_blit(ctx, src_res, dst_res, &blit_info)) {
+ VREND_DEBUG(dbg_blit, ctx, "BLIT_INT: use FBO blit\n");
+ vrend_renderer_blit_fbo(ctx, src_res, dst_res, &blit_info);
+ } else {
+ blit_info.has_srgb_write_control = has_feature(feat_texture_srgb_decode);
+ blit_info.has_texture_srgb_decode = has_feature(feat_srgb_write_control);
- if (blitter_views[1] != dst_res->id)
- glDeleteTextures(1, &blitter_views[1]);
+ VREND_DEBUG(dbg_blit, ctx, "BLIT_INT: use GL fallback\n");
+ vrend_renderer_blit_gl(ctx, src_res, dst_res, &blit_info);
+ vrend_sync_make_current(ctx->sub->gl_context);
+ }
+
+ if (blit_info.src_view != src_res->id)
+ glDeleteTextures(1, &blit_info.src_view);
+
+ if (blit_info.dst_view != dst_res->id)
+ glDeleteTextures(1, &blit_info.dst_view);
}
void vrend_renderer_blit(struct vrend_context *ctx,
@@ -9363,6 +10258,7 @@ void vrend_renderer_blit(struct vrend_context *ctx,
{
unsigned int comp_flags = 0;
struct vrend_resource *src_res, *dst_res;
+ int src_width, src_height, dst_width, dst_height;
src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
@@ -9418,6 +10314,18 @@ void vrend_renderer_blit(struct vrend_context *ctx,
if (dst_res->egl_image)
comp_flags ^= VREND_COPY_COMPAT_FLAG_ONE_IS_EGL_IMAGE;
+ /* resources that don't support texture views but require colorspace conversion
+ * must have it applied manually in a shader, i.e. require following the
+ * vrend_renderer_blit_int() path. */
+ bool eglimage_copy_compatible =
+ !(vrend_resource_needs_srgb_decode(src_res, info->src.format) ||
+ vrend_resource_needs_srgb_encode(dst_res, info->dst.format));
+
+ src_width = u_minify(src_res->base.width0, info->src.level);
+ src_height = u_minify(src_res->base.height0, info->src.level);
+ dst_width = u_minify(dst_res->base.width0, info->dst.level);
+ dst_height = u_minify(dst_res->base.height0, info->dst.level);
+
/* The Gallium blit function can be called for a general blit that may
* scale, convert the data, and apply some rander states, or it is called via
* glCopyImageSubData. If the src or the dst image are equal, or the two
@@ -9428,9 +10336,14 @@ void vrend_renderer_blit(struct vrend_context *ctx,
if (has_feature(feat_copy_image) &&
(!info->render_condition_enable || !ctx->sub->cond_render_gl_mode) &&
format_is_copy_compatible(info->src.format,info->dst.format, comp_flags) &&
+ eglimage_copy_compatible &&
!info->scissor_enable && (info->filter == PIPE_TEX_FILTER_NEAREST) &&
!info->alpha_blend && (info->mask == PIPE_MASK_RGBA) &&
src_res->base.nr_samples == dst_res->base.nr_samples &&
+ info->src.box.x + info->src.box.width <= src_width &&
+ info->dst.box.x + info->dst.box.width <= dst_width &&
+ info->src.box.y + info->src.box.height <= src_height &&
+ info->dst.box.y + info->dst.box.height <= dst_height &&
info->src.box.width == info->dst.box.width &&
info->src.box.height == info->dst.box.height &&
info->src.box.depth == info->dst.box.depth) {
@@ -9458,7 +10371,7 @@ void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
int vrend_renderer_create_fence(struct vrend_context *ctx,
uint32_t flags,
- void *fence_cookie)
+ uint64_t fence_id)
{
struct vrend_fence *fence;
@@ -9471,7 +10384,7 @@ int vrend_renderer_create_fence(struct vrend_context *ctx,
fence->ctx = ctx;
fence->flags = flags;
- fence->fence_cookie = fence_cookie;
+ fence->fence_id = fence_id;
#ifdef HAVE_EPOXY_EGL_H
if (vrend_state.use_egl_fence) {
@@ -9487,10 +10400,10 @@ int vrend_renderer_create_fence(struct vrend_context *ctx,
goto fail;
if (vrend_state.sync_thread) {
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
list_addtail(&fence->fences, &vrend_state.fence_wait_list);
- pipe_condvar_signal(vrend_state.fence_cond);
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ cnd_signal(&vrend_state.fence_cond);
+ mtx_unlock(&vrend_state.fence_mutex);
} else
list_addtail(&fence->fences, &vrend_state.fence_list);
return 0;
@@ -9527,17 +10440,13 @@ void vrend_renderer_check_fences(void)
struct list_head retired_fences;
struct vrend_fence *fence, *stor;
- /* No need to check the fence list, fences are retired directly in
- * the polling thread in that case.
- */
- if (vrend_state.use_async_fence_cb)
- return;
+ assert(!vrend_state.use_async_fence_cb);
list_inithead(&retired_fences);
if (vrend_state.sync_thread) {
flush_eventfd(vrend_state.eventfd);
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
/* vrend_free_fences_for_context might have marked the fence invalid
* by setting fence->ctx to NULL
@@ -9554,7 +10463,7 @@ void vrend_renderer_check_fences(void)
free_fence_locked(fence);
}
}
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ mtx_unlock(&vrend_state.fence_mutex);
} else {
vrend_renderer_force_ctx_0();
@@ -9577,12 +10486,11 @@ void vrend_renderer_check_fences(void)
if (LIST_IS_EMPTY(&retired_fences))
return;
- /* no need to lock when not using a sync thread */
- vrend_renderer_check_queries_locked();
+ vrend_renderer_check_queries();
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) {
struct vrend_context *ctx = fence->ctx;
- ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data);
+ ctx->fence_retire(fence->fence_id, ctx->fence_retire_data);
free_fence_locked(fence);
}
@@ -9612,17 +10520,17 @@ static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *r
static inline void
vrend_update_oq_samples_multiplier(struct vrend_context *ctx)
{
- if (!vrend_state.current_ctx->sub->fake_occlusion_query_samples_passed_multiplier) {
+ if (!ctx->sub->fake_occlusion_query_samples_passed_multiplier) {
uint32_t multiplier = 0;
bool tweaked = vrend_get_tweak_is_active_with_params(vrend_get_context_tweaks(ctx),
virgl_tweak_gles_tf3_samples_passes_multiplier, &multiplier);
- vrend_state.current_ctx->sub->fake_occlusion_query_samples_passed_multiplier =
+ ctx->sub->fake_occlusion_query_samples_passed_multiplier =
tweaked ? multiplier: fake_occlusion_query_samples_passed_default;
}
}
-static bool vrend_check_query_locked(struct vrend_query *query)
+static bool vrend_check_query(struct vrend_query *query)
{
struct virgl_host_query_state state;
bool ret;
@@ -9637,8 +10545,8 @@ static bool vrend_check_query_locked(struct vrend_query *query)
* blow the number up so that the client doesn't think it was just one pixel
* and discards an object that might be bigger */
if (query->fake_samples_passed) {
- vrend_update_oq_samples_multiplier(vrend_state.current_ctx);
- state.result *= vrend_state.current_ctx->sub->fake_occlusion_query_samples_passed_multiplier;
+ vrend_update_oq_samples_multiplier(query->ctx);
+ state.result *= query->ctx->sub->fake_occlusion_query_samples_passed_multiplier;
}
state.query_state = VIRGL_QUERY_STATE_DONE;
@@ -9653,35 +10561,70 @@ static bool vrend_check_query_locked(struct vrend_query *query)
return true;
}
-static bool vrend_hw_switch_query_context(struct vrend_context *ctx)
+static struct vrend_sub_context *vrend_renderer_find_sub_ctx(struct vrend_context *ctx,
+ int sub_ctx_id)
{
- if (vrend_state.use_async_fence_cb) {
- if (!ctx)
- return false;
+ struct vrend_sub_context *sub;
- if (ctx == vrend_state.current_sync_thread_ctx)
- return true;
+ if (ctx->sub && ctx->sub->sub_ctx_id == sub_ctx_id)
+ return ctx->sub;
- if (ctx->ctx_id != 0 && ctx->in_error)
- return false;
+ LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
+ if (sub->sub_ctx_id == sub_ctx_id)
+ return sub;
+ }
- vrend_clicbs->make_current(ctx->sub->gl_context);
- vrend_state.current_sync_thread_ctx = ctx;
+ return NULL;
+}
+
+static bool vrend_hw_switch_context_with_sub(struct vrend_context *ctx, int sub_ctx_id)
+{
+ if (!ctx)
+ return false;
+
+ if (ctx == vrend_state.current_ctx && sub_ctx_id == ctx->sub->sub_ctx_id &&
+ ctx->ctx_switch_pending == false) {
return true;
- } else {
- return vrend_hw_switch_context(ctx, true);
}
+
+ if (ctx->ctx_id != 0 && ctx->in_error)
+ return false;
+
+ struct vrend_sub_context *sub = vrend_renderer_find_sub_ctx(ctx, sub_ctx_id);
+ if (!sub)
+ return false;
+
+ /* force the gl context switch to occur */
+ if (ctx->sub != sub) {
+ vrend_state.current_hw_ctx = NULL;
+ ctx->sub = sub;
+ }
+
+ ctx->ctx_switch_pending = true;
+ vrend_finish_context_switch(ctx);
+
+ vrend_state.current_ctx = ctx;
+ return true;
}
-static void vrend_renderer_check_queries_locked(void)
+static void vrend_renderer_check_queries(void)
{
struct vrend_query *query, *stor;
LIST_FOR_EACH_ENTRY_SAFE(query, stor, &vrend_state.waiting_query_list, waiting_queries) {
- if (!vrend_hw_switch_query_context(query->ctx) ||
- vrend_check_query_locked(query))
- list_delinit(&query->waiting_queries);
+ if (!vrend_hw_switch_context_with_sub(query->ctx, query->sub_ctx_id)) {
+ vrend_printf("failed to switch to context (%d) with sub (%d) for query %u\n",
+ query->ctx->ctx_id, query->sub_ctx_id, query->id);
+ }
+ else if (!vrend_check_query(query)) {
+ continue;
+ }
+
+ list_delinit(&query->waiting_queries);
}
+
+ atomic_store(&vrend_state.has_waiting_queries,
+ !LIST_IS_EMPTY(&vrend_state.waiting_query_list));
}
bool vrend_hw_switch_context(struct vrend_context *ctx, bool now)
@@ -9734,11 +10677,8 @@ int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
uint32_t query_type, uint32_t query_index,
uint32_t res_handle, UNUSED uint32_t offset)
{
- struct vrend_query *q;
- struct vrend_resource *res;
- uint32_t ret_handle;
bool fake_samples_passed = false;
- res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
+ struct vrend_resource *res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res || !has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) {
vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
@@ -9758,14 +10698,17 @@ int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
return EINVAL;
}
- q = CALLOC_STRUCT(vrend_query);
+ struct vrend_query *q = CALLOC_STRUCT(vrend_query);
if (!q)
return ENOMEM;
+ int err = 0;
+
list_inithead(&q->waiting_queries);
q->type = query_type;
q->index = query_index;
q->ctx = ctx;
+ q->sub_ctx_id = ctx->sub->sub_ctx_id;
q->fake_samples_passed = fake_samples_passed;
vrend_resource_reference(&q->res, res);
@@ -9775,20 +10718,22 @@ int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
q->gltype = GL_SAMPLES_PASSED_ARB;
break;
case PIPE_QUERY_OCCLUSION_PREDICATE:
- if (has_feature(feat_occlusion_query_boolean)) {
+ if (has_feature(feat_occlusion_query_boolean))
q->gltype = GL_ANY_SAMPLES_PASSED;
- break;
- } else
- return EINVAL;
+ else
+ err = EINVAL;
+ break;
case PIPE_QUERY_TIMESTAMP:
- if (!has_feature(feat_timer_query))
- return EINVAL;
- q->gltype = GL_TIMESTAMP;
+ if (has_feature(feat_timer_query))
+ q->gltype = GL_TIMESTAMP;
+ else
+ err = EINVAL;
break;
case PIPE_QUERY_TIME_ELAPSED:
- if (!has_feature(feat_timer_query))
- return EINVAL;
- q->gltype = GL_TIME_ELAPSED;
+ if (has_feature(feat_timer_query))
+ q->gltype = GL_TIME_ELAPSED;
+ else
+ err = EINVAL;
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
q->gltype = GL_PRIMITIVES_GENERATED;
@@ -9800,29 +10745,34 @@ int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
q->gltype = GL_ANY_SAMPLES_PASSED_CONSERVATIVE;
break;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- if (!has_feature(feat_transform_feedback_overflow_query))
- return EINVAL;
- q->gltype = GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB;
+ if (has_feature(feat_transform_feedback_overflow_query))
+ q->gltype = GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB;
+ else
+ err = EINVAL;
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
- if (!has_feature(feat_transform_feedback_overflow_query))
- return EINVAL;
- q->gltype = GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB;
+ if (has_feature(feat_transform_feedback_overflow_query))
+ q->gltype = GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB;
+ else
+ err = EINVAL;
break;
default:
vrend_printf("unknown query object received %d\n", q->type);
break;
}
- glGenQueries(1, &q->id);
+ if (!err) {
+ glGenQueries(1, &q->id);
+ if (!vrend_renderer_object_insert(ctx, q, handle, VIRGL_OBJECT_QUERY)) {
+ glDeleteQueries(1, &q->id);
+ err = ENOMEM;
+ }
+ }
- ret_handle = vrend_renderer_object_insert(ctx, q, handle,
- VIRGL_OBJECT_QUERY);
- if (!ret_handle) {
+ if (err)
FREE(q);
- return ENOMEM;
- }
- return 0;
+
+ return err;
}
static void vrend_destroy_query(struct vrend_query *query)
@@ -9850,9 +10800,7 @@ int vrend_begin_query(struct vrend_context *ctx, uint32_t handle)
if (q->index > 0 && !has_feature(feat_transform_feedback3))
return EINVAL;
- lock_sync();
list_delinit(&q->waiting_queries);
- unlock_sync();
if (q->gltype == GL_TIMESTAMP)
return 0;
@@ -9903,14 +10851,15 @@ void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
if (!q)
return;
- lock_sync();
- ret = vrend_check_query_locked(q);
+ ret = vrend_check_query(q);
if (ret) {
list_delinit(&q->waiting_queries);
} else if (LIST_IS_EMPTY(&q->waiting_queries)) {
list_addtail(&q->waiting_queries, &vrend_state.waiting_query_list);
}
- unlock_sync();
+
+ atomic_store(&vrend_state.has_waiting_queries,
+ !LIST_IS_EMPTY(&vrend_state.waiting_query_list));
}
#define COPY_QUERY_RESULT_TO_BUFFER(resid, offset, pvalue, size, multiplier) \
@@ -10076,7 +11025,7 @@ void vrend_render_condition(struct vrend_context *ctx,
ctx->sub->cond_render_gl_mode = glmode;
if (has_feature(feat_gl_conditional_render))
glBeginConditionalRender(q->id, glmode);
- if (has_feature(feat_nv_conditional_render))
+ else if (has_feature(feat_nv_conditional_render))
glBeginConditionalRenderNV(q->id, glmode);
}
@@ -10119,19 +11068,17 @@ static int vrender_get_glsl_version(void)
{
int major_local = 0, minor_local = 0;
const GLubyte *version_str;
- MAYBE_UNUSED int c;
+ ASSERTED int c;
version_str = glGetString(GL_SHADING_LANGUAGE_VERSION);
if (vrend_state.use_gles) {
- char tmp[20];
- c = sscanf((const char *)version_str, "%s %s %s %s %i.%i",
- tmp, tmp, tmp, tmp, &major_local, &minor_local);
- assert(c == 6);
+ c = sscanf((const char *)version_str, "%*s %*s %*s %*s %i.%i",
+ &major_local, &minor_local);
} else {
c = sscanf((const char *)version_str, "%i.%i",
&major_local, &minor_local);
- assert(c == 2);
}
+ assert(c == 2);
return (major_local * 100) + minor_local;
}
@@ -10250,7 +11197,9 @@ static void vrend_renderer_fill_caps_v1(int gl_ver, int gles_ver, union virgl_ca
if (has_feature(feat_ubo)) {
glGetIntegerv(GL_MAX_VERTEX_UNIFORM_BLOCKS, &max);
- caps->v1.max_uniform_blocks = max + 1;
+ /* GL_MAX_VERTEX_UNIFORM_BLOCKS is omitting the ordinary uniform block, add it
+ * also reduce by 1 as we might generate a VirglBlock helper uniform block */
+ caps->v1.max_uniform_blocks = max + 1 - 1;
}
if (has_feature(feat_depth_clamp))
@@ -10362,7 +11311,7 @@ static void vrend_renderer_fill_caps_v1(int gl_ver, int gles_ver, union virgl_ca
if (has_feature(feat_arb_or_gles_ext_texture_buffer)) {
glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &max);
- caps->v1.max_tbo_size = max;
+ vrend_state.max_texture_buffer_size = caps->v1.max_tbo_size = max;
}
if (has_feature(feat_texture_gather)) {
@@ -10381,6 +11330,10 @@ static void vrend_renderer_fill_caps_v1(int gl_ver, int gles_ver, union virgl_ca
caps->v1.max_viewports = 1;
}
+ if (has_feature(feat_timer_query)) {
+ caps->v1.bset.timer_query = 1;
+ }
+
/* Common limits for all backends. */
caps->v1.max_render_targets = vrend_state.max_draw_buffers;
@@ -10419,11 +11372,18 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
* this value to avoid regressions when a guest with a new mesa version is
* run on an old virgl host. Use it also to indicate non-cap fixes on the
* host that help enable features in the guest. */
- caps->v2.host_feature_check_version = 5;
+ caps->v2.host_feature_check_version = 15;
/* Forward host GL_RENDERER to the guest. */
strncpy(caps->v2.renderer, renderer, sizeof(caps->v2.renderer) - 1);
+ /* glamor reject llvmpipe, and since the renderer string is
+ * composed of "virgl" and this renderer string we have to
+ * hide the "llvmpipe" part */
+ char *llvmpipe_string = strstr(caps->v2.renderer, "llvmpipe");
+ if (llvmpipe_string)
+ memcpy(llvmpipe_string, "LLVMPIPE", 8);
+
glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, range);
caps->v2.min_aliased_point_size = range[0];
caps->v2.max_aliased_point_size = range[1];
@@ -10478,6 +11438,8 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
} else
caps->v2.max_shader_patch_varyings = 0;
+ vrend_state.max_shader_patch_varyings = caps->v2.max_shader_patch_varyings;
+
if (has_feature(feat_texture_gather)) {
glGetIntegerv(GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET, &caps->v2.min_texture_gather_offset);
glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET, &caps->v2.max_texture_gather_offset);
@@ -10543,42 +11505,57 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
}
if (has_feature(feat_atomic_counters)) {
- glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTERS,
- (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_VERTEX));
+
+ /* On GLES hosts we want atomics to be lowered to SSBOs */
+ if (gl_ver > 0) {
+ glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTERS,
+ (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_VERTEX));
+ glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTERS,
+ (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_FRAGMENT));
+
+ if (has_feature(feat_geometry_shader)) {
+ glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTERS,
+ (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_GEOMETRY));
+ }
+
+ if (has_feature(feat_tessellation)) {
+ glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS,
+ (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_CTRL));
+ glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS,
+ (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_EVAL));
+ }
+
+ if (has_feature(feat_compute_shader)) {
+ glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTERS,
+ (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_COMPUTE));
+ }
+
+ glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTERS,
+ (GLint*)&caps->v2.max_combined_atomic_counters);
+ }
+
glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_VERTEX));
- glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTERS,
- (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_FRAGMENT));
+
glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_FRAGMENT));
- if (has_feature(feat_geometry_shader)) {
- glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTERS,
- (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_GEOMETRY));
+ if (has_feature(feat_geometry_shader))
glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_GEOMETRY));
- }
if (has_feature(feat_tessellation)) {
- glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS,
- (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_CTRL));
glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_TESS_CTRL));
- glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS,
- (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_EVAL));
glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_TESS_EVAL));
}
if (has_feature(feat_compute_shader)) {
- glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTERS,
- (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_COMPUTE));
glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_COMPUTE));
}
- glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTERS,
- (GLint*)&caps->v2.max_combined_atomic_counters);
glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS,
(GLint*)&caps->v2.max_combined_atomic_counter_buffers);
}
@@ -10610,10 +11587,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (has_feature(feat_texture_barrier))
caps->v2.capability_bits |= VIRGL_CAP_TEXTURE_BARRIER;
- /* If we enable input arrays and don't have enhanced layouts then we
- * can't support components. */
- if (has_feature(feat_enhanced_layouts))
- caps->v2.capability_bits |= VIRGL_CAP_TGSI_COMPONENTS;
+ caps->v2.capability_bits |= VIRGL_CAP_TGSI_COMPONENTS;
if (has_feature(feat_srgb_write_control))
caps->v2.capability_bits |= VIRGL_CAP_SRGB_WRITE_CONTROL;
@@ -10638,7 +11612,7 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
/* We want to expose ARB_gpu_shader_fp64 when running on top of ES */
if (vrend_state.use_gles) {
- caps->v2.capability_bits |= VIRGL_CAP_FAKE_FP64;
+ caps->v2.capability_bits |= VIRGL_CAP_HOST_IS_GLES;
}
if (has_feature(feat_indirect_draw))
@@ -10653,17 +11627,31 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
for (int i = 0; i < VIRGL_FORMAT_MAX; i++) {
enum virgl_formats fmt = (enum virgl_formats)i;
if (tex_conv_table[i].internalformat != 0) {
+ const char *readback_str = "";
+ const char *multisample_str = "";
+ bool log_texture_feature = false;
if (vrend_format_can_readback(fmt)) {
- VREND_DEBUG(dbg_features, NULL, "Support readback of %s\n",
- util_format_name(fmt));
+ log_texture_feature = true;
+ readback_str = "readback";
set_format_bit(&caps->v2.supported_readback_formats, fmt);
}
+ if (vrend_format_can_multisample(fmt)) {
+ log_texture_feature = true;
+ multisample_str = "multisample";
+ set_format_bit(&caps->v2.supported_multisample_formats, fmt);
+ }
+ if (log_texture_feature)
+ VREND_DEBUG(dbg_features, NULL, "%s: Supports %s %s\n",
+ util_format_name(fmt), readback_str, multisample_str);
}
if (vrend_format_can_scanout(fmt))
set_format_bit(&caps->v2.scanout, fmt);
}
+ /* Needed for framebuffer_no_attachment */
+ set_format_bit(&caps->v2.supported_multisample_formats, VIRGL_FORMAT_NONE);
+
if (has_feature(feat_clear_texture))
caps->v2.capability_bits |= VIRGL_CAP_CLEAR_TEXTURE;
@@ -10680,7 +11668,8 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (has_feature(feat_arb_buffer_storage) && !vrend_state.use_external_blob) {
const char *vendor = (const char *)glGetString(GL_VENDOR);
- bool is_mesa = ((strstr(renderer, "Mesa") != NULL) || (strstr(renderer, "DRM") != NULL));
+ bool is_mesa = ((strstr(renderer, "Mesa") != NULL) || (strstr(renderer, "DRM") != NULL) ||
+ (strstr(renderer, "llvmpipe") != NULL));
/*
* Intel GPUs (aside from Atom, which doesn't expose GL4.5) are cache-coherent.
* Mesa AMDGPUs use write-combine mappings for coherent/persistent memory (see
@@ -10695,6 +11684,8 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
vrend_state.inferred_gl_caching_type = VIRGL_RENDERER_MAP_CACHE_CACHED;
else if (strstr(vendor, "AMD") != NULL)
vrend_state.inferred_gl_caching_type = VIRGL_RENDERER_MAP_CACHE_WC;
+ else if (strstr(vendor, "Mesa") != NULL)
+ vrend_state.inferred_gl_caching_type = VIRGL_RENDERER_MAP_CACHE_CACHED;
} else {
/* This is an educated guess since things don't explode with VMX + Nvidia. */
if (strstr(renderer, "Quadro K2200") != NULL)
@@ -10706,10 +11697,14 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
}
#ifdef ENABLE_MINIGBM_ALLOCATION
- if (has_feature(feat_memory_object) && has_feature(feat_memory_object_fd)) {
- if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915") &&
+ if (gbm) {
+ if (has_feature(feat_memory_object) && has_feature(feat_memory_object_fd)) {
+ if ((!strcmp(gbm_device_get_backend_name(gbm->device), "i915") ||
+ !strcmp(gbm_device_get_backend_name(gbm->device), "amdgpu")) &&
!vrend_winsys_different_gpu())
caps->v2.capability_bits |= VIRGL_CAP_ARB_BUFFER_STORAGE;
+ }
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_SCANOUT_USES_GBM;
}
#endif
@@ -10740,12 +11735,63 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (vrend_winsys_different_gpu())
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_DIFFERENT_GPU;
+ if (has_feature(feat_texture_shadow_lod))
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_TEXTURE_SHADOW_LOD;
+
+ // we use capability bits (not a version of protocol), because
+ // we disable this on client side if virglrenderer is used under
+ // vtest. vtest can't support this, because size of resource
+ // is used to create shmem. On drm path, we can use this, because
+ // size of drm resource (bo) is not passed to virglrenderer and
+ // we can pass "1" as size on drm path, but not on vtest.
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS;
+
if (has_feature(feat_anisotropic_filter)) {
float max_aniso;
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY, &max_aniso);
caps->v2.max_anisotropy = MIN2(max_aniso, 16.0);
}
+ glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &max);
+ caps->v2.max_texture_image_units = MIN2(max, PIPE_MAX_SHADER_SAMPLER_VIEWS);
+
+ if (has_feature(feat_ubo)) {
+ glGetIntegerv(GL_MAX_UNIFORM_BLOCK_SIZE, &max);
+ caps->v2.max_uniform_block_size = max;
+ }
+
+ /* Propagate the max of Uniform Components */
+ glGetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS, &max);
+ caps->v2.max_const_buffer_size[PIPE_SHADER_VERTEX] = max * 4;
+
+ glGetIntegerv(GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max);
+ caps->v2.max_const_buffer_size[PIPE_SHADER_FRAGMENT] = max * 4;
+
+ if (has_feature(feat_geometry_shader)) {
+ glGetIntegerv(GL_MAX_GEOMETRY_UNIFORM_COMPONENTS, &max);
+ caps->v2.max_const_buffer_size[PIPE_SHADER_GEOMETRY] = max * 4;
+ }
+
+ if (has_feature(feat_tessellation)) {
+ glGetIntegerv(GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS, &max);
+ caps->v2.max_const_buffer_size[PIPE_SHADER_TESS_CTRL] = max * 4;
+ glGetIntegerv(GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS, &max);
+ caps->v2.max_const_buffer_size[PIPE_SHADER_TESS_EVAL] = max * 4;
+ }
+
+ if (has_feature(feat_compute_shader)) {
+ glGetIntegerv(GL_MAX_COMPUTE_UNIFORM_COMPONENTS, &max);
+ caps->v2.max_const_buffer_size[PIPE_SHADER_COMPUTE] = max * 4;
+ }
+
+ if (has_feature(feat_separate_shader_objects))
+ caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_SSO;
+
+#ifdef ENABLE_VIDEO
+ vrend_video_fill_caps(caps);
+#else
+ caps->v2.num_video_caps = 0;
+#endif
}
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
@@ -10868,6 +11914,7 @@ void *vrend_renderer_get_cursor_contents(struct pipe_resource *pres,
void vrend_renderer_force_ctx_0(void)
{
+ TRACE_FUNC();
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
vrend_hw_switch_context(vrend_state.ctx0, true);
@@ -10953,7 +12000,7 @@ void vrend_renderer_detach_res_ctx(struct vrend_context *ctx,
vrend_ctx_resource_remove(ctx->res_hash, res->res_id);
}
-static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
+struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
{
return vrend_ctx_resource_lookup(ctx->res_hash, res_handle);
}
@@ -11040,6 +12087,11 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
sub->vps[i].far_val = 1.0;
}
+ /* Default is enabled, so set the initial hardware state accordingly */
+ for (int i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
+ sub->hw_blend_state.rt[i].colormask = 0xf;
+ }
+
if (!has_feature(feat_gles31_vertex_attrib_binding)) {
glGenVertexArrays(1, &sub->vaoid);
glBindVertexArray(sub->vaoid);
@@ -11056,6 +12108,9 @@ void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
sub->object_hash = vrend_object_init_ctx_table();
+ sub->sysvalue_data.winsys_adjust_y = 1.f;
+ sub->sysvalue_data_cookie = 1;
+
ctx->sub = sub;
list_add(&sub->head, &ctx->sub_ctxs);
if (sub_ctx_id == 0)
@@ -11103,18 +12158,10 @@ void vrend_renderer_destroy_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
void vrend_renderer_set_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
{
- struct vrend_sub_context *sub;
- /* find the sub ctx */
-
- if (ctx->sub && ctx->sub->sub_ctx_id == sub_ctx_id)
- return;
-
- LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
- if (sub->sub_ctx_id == sub_ctx_id) {
- ctx->sub = sub;
- vrend_clicbs->make_current(sub->gl_context);
- break;
- }
+ struct vrend_sub_context *sub = vrend_renderer_find_sub_ctx(ctx, sub_ctx_id);
+ if (sub && ctx->sub != sub) {
+ ctx->sub = sub;
+ vrend_clicbs->make_current(sub->gl_context);
}
}
@@ -11138,7 +12185,10 @@ void vrend_renderer_reset(void)
int vrend_renderer_get_poll_fd(void)
{
- return vrend_state.eventfd;
+ int fd = vrend_state.eventfd;
+ if (vrend_state.use_async_fence_cb && fd < 0)
+ vrend_printf("failed to duplicate eventfd: error=%d\n", errno);
+ return fd;
}
int vrend_renderer_export_query(struct pipe_resource *pres,
@@ -11349,14 +12399,13 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres)
int vrend_renderer_create_ctx0_fence(uint32_t fence_id)
{
- void *fence_cookie = (void *)(uintptr_t)fence_id;
return vrend_renderer_create_fence(vrend_state.ctx0,
- VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_cookie);
+ VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_id);
}
#ifdef HAVE_EPOXY_EGL_H
static bool find_ctx0_fence_locked(struct list_head *fence_list,
- void *fence_cookie,
+ uint64_t fence_id,
bool *seen_first,
struct vrend_fence **fence)
{
@@ -11367,13 +12416,13 @@ static bool find_ctx0_fence_locked(struct list_head *fence_list,
if (iter->ctx != vrend_state.ctx0)
continue;
- if (iter->fence_cookie == fence_cookie) {
+ if (iter->fence_id == fence_id) {
*fence = iter;
return true;
}
if (!*seen_first) {
- if (fence_cookie < iter->fence_cookie)
+ if (fence_id < iter->fence_id)
return true;
*seen_first = true;
}
@@ -11390,18 +12439,17 @@ int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd) {
}
if (vrend_state.sync_thread)
- pipe_mutex_lock(vrend_state.fence_mutex);
+ mtx_lock(&vrend_state.fence_mutex);
- void *fence_cookie = (void *)(uintptr_t)fence_id;
bool seen_first = false;
struct vrend_fence *fence = NULL;
bool found = find_ctx0_fence_locked(&vrend_state.fence_list,
- fence_cookie,
+ fence_id,
&seen_first,
&fence);
if (!found) {
found = find_ctx0_fence_locked(&vrend_state.fence_wait_list,
- fence_cookie,
+ fence_id,
&seen_first,
&fence);
/* consider signaled when no active ctx0 fence at all */
@@ -11410,7 +12458,7 @@ int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd) {
}
if (vrend_state.sync_thread)
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ mtx_unlock(&vrend_state.fence_mutex);
if (found) {
if (fence)
@@ -11497,3 +12545,11 @@ void vrend_context_emit_string_marker(struct vrend_context *ctx, GLsizei length,
length, message);
}
}
+
+#ifdef ENABLE_VIDEO
+struct vrend_video_context *vrend_context_get_video_ctx(struct vrend_context *ctx)
+{
+ return ctx->video;
+}
+#endif
+
diff --git a/src/vrend_renderer.h b/src/vrend_renderer.h
index ac4031bc..2aedf50e 100644
--- a/src/vrend_renderer.h
+++ b/src/vrend_renderer.h
@@ -88,7 +88,7 @@ struct vrend_resource {
uint64_t mipmap_offsets[VR_MAX_TEXTURE_2D_LEVELS];
void *gbm_bo, *egl_image;
void *aux_plane_egl_image[VIRGL_GBM_MAX_PLANES];
-
+
uint64_t size;
GLbitfield buffer_storage_flags;
GLuint memobj;
@@ -100,6 +100,8 @@ struct vrend_resource {
#define VIRGL_TEXTURE_NEED_SWIZZLE (1 << 0)
#define VIRGL_TEXTURE_CAN_TEXTURE_STORAGE (1 << 1)
#define VIRGL_TEXTURE_CAN_READBACK (1 << 2)
+#define VIRGL_TEXTURE_CAN_TARGET_RECTANGLE (1 << 3)
+#define VIRGL_TEXTURE_CAN_MULTISAMPLE (1 << 4)
struct vrend_format_table {
enum virgl_formats format;
@@ -111,7 +113,7 @@ struct vrend_format_table {
uint32_t flags;
};
-typedef void (*vrend_context_fence_retire)(void *fence_cookie,
+typedef void (*vrend_context_fence_retire)(uint64_t fence_id,
void *retire_data);
struct vrend_if_cbs {
@@ -120,11 +122,15 @@ struct vrend_if_cbs {
virgl_gl_context (*create_gl_context)(int scanout, struct virgl_gl_ctx_param *params);
void (*destroy_gl_context)(virgl_gl_context ctx);
int (*make_current)(virgl_gl_context ctx);
+ int (*get_drm_fd)(void);
};
#define VREND_USE_THREAD_SYNC (1 << 0)
#define VREND_USE_EXTERNAL_BLOB (1 << 1)
#define VREND_USE_ASYNC_FENCE_CB (1 << 2)
+#define VREND_USE_VIDEO (1 << 3)
+
+bool vrend_check_no_error(struct vrend_context *ctx);
const struct virgl_resource_pipe_callbacks *
vrend_renderer_get_pipe_callbacks(void);
@@ -145,6 +151,8 @@ int vrend_create_shader(struct vrend_context *ctx,
const char *shd_text, uint32_t offlen, uint32_t num_tokens,
uint32_t type, uint32_t pkt_length);
+void vrend_link_program_hook(struct vrend_context *ctx, uint32_t *handles);
+
void vrend_bind_shader(struct vrend_context *ctx,
uint32_t type,
uint32_t handle);
@@ -156,10 +164,10 @@ void vrend_clear(struct vrend_context *ctx,
const union pipe_color_union *color,
double depth, unsigned stencil);
-void vrend_clear_texture(struct vrend_context* ctx,
- uint32_t handle, uint32_t level,
- const struct pipe_box *box,
- const void * data);
+int vrend_clear_texture(struct vrend_context* ctx,
+ uint32_t handle, uint32_t level,
+ const struct pipe_box *box,
+ const void * data);
int vrend_draw_vbo(struct vrend_context *ctx,
const struct pipe_draw_info *info,
@@ -254,6 +262,11 @@ int vrend_renderer_copy_transfer3d(struct vrend_context *ctx,
uint32_t src_handle,
const struct vrend_transfer_info *info);
+int vrend_renderer_copy_transfer3d_from_host(struct vrend_context *ctx,
+ uint32_t dst_handle,
+ uint32_t src_handle,
+ const struct vrend_transfer_info *info);
+
void vrend_set_viewport_states(struct vrend_context *ctx,
uint32_t start_slot, uint32_t num_viewports,
const struct pipe_viewport_state *state);
@@ -365,7 +378,7 @@ void vrend_renderer_set_fence_retire(struct vrend_context *ctx,
int vrend_renderer_create_fence(struct vrend_context *ctx,
uint32_t flags,
- void *fence_cookie);
+ uint64_t fence_id);
void vrend_renderer_check_fences(void);
@@ -407,6 +420,11 @@ void vrend_build_format_list_gl(void);
void vrend_build_format_list_gles(void);
void vrend_build_emulated_format_list_gles(void);
void vrend_check_texture_storage(struct vrend_format_table *table);
+void vrend_check_texture_multisample(struct vrend_format_table *table,
+ bool enable_storage);
+
+struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx,
+ int res_handle);
void vrend_renderer_resource_destroy(struct vrend_resource *res);
@@ -445,6 +463,21 @@ struct vrend_renderer_resource_info {
uint32_t stride;
};
+struct vrend_blit_info {
+ const struct pipe_blit_info b;
+ GLuint src_view;
+ GLuint dst_view;
+ uint8_t swizzle[4];
+ int src_y1, src_y2, dst_y1, dst_y2;
+ GLenum gl_filter;
+ bool needs_swizzle;
+ bool can_fbo_blit;
+ bool has_texture_srgb_decode;
+ bool has_srgb_write_control;
+ bool needs_manual_srgb_decode;
+ bool needs_manual_srgb_encode;
+};
+
void vrend_renderer_resource_get_info(struct pipe_resource *pres,
struct vrend_renderer_resource_info *info);
@@ -475,19 +508,9 @@ bool vrend_format_is_bgra(enum virgl_formats format);
boolean format_is_copy_compatible(enum virgl_formats src, enum virgl_formats dst,
unsigned int flags);
-/* blitter interface */
-void vrend_renderer_blit_gl(struct vrend_context *ctx,
- struct vrend_resource *src_res,
- struct vrend_resource *dst_res,
- GLenum blit_views[2],
- const struct pipe_blit_info *info,
- bool has_texture_srgb_decode,
- bool has_srgb_write_control,
- uint8_t swizzle[static 4]);
-void vrend_blitter_fini(void);
-
void vrend_renderer_prepare_reset(void);
void vrend_renderer_reset(void);
+void vrend_renderer_poll(void);
int vrend_renderer_get_poll_fd(void);
unsigned vrend_context_has_debug_flag(const struct vrend_context *ctx,
@@ -531,4 +554,7 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres);
void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle);
void vrend_context_emit_string_marker(struct vrend_context *ctx, GLsizei length, const char * message);
+
+struct vrend_video_context *vrend_context_get_video_ctx(struct vrend_context *ctx);
+
#endif
diff --git a/src/vrend_shader.c b/src/vrend_shader.c
index 2a6a41d6..60d7eb7f 100644
--- a/src/vrend_shader.c
+++ b/src/vrend_shader.c
@@ -38,7 +38,6 @@
/* start convert of tgsi to glsl */
-#define INTERP_PREFIX " "
#define INVARI_PREFIX "invariant"
#define SHADER_REQ_NONE 0
@@ -76,44 +75,28 @@
#define SHADER_REQ_SAMPLER_BUF (1ULL << 31)
#define SHADER_REQ_GEOMETRY_SHADER (1ULL << 32)
#define SHADER_REQ_BLEND_EQUATION_ADVANCED (1ULL << 33)
+#define SHADER_REQ_EXPLICIT_ATTRIB_LOCATION (1ULL << 34)
+#define SHADER_REQ_SHADER_NOPERSPECTIVE_INTERPOLATION (1ULL << 35)
+#define SHADER_REQ_TEXTURE_SHADOW_LOD (1ULL << 36)
#define FRONT_COLOR_EMITTED (1 << 0)
#define BACK_COLOR_EMITTED (1 << 1);
+#define MAX_VARYING 32
+
+enum vrend_sysval_uniform {
+ UNIFORM_WINSYS_ADJUST_Y,
+ UNIFORM_CLIP_PLANE,
+ UNIFORM_ALPHA_REF_VAL,
+ UNIFORM_PSTIPPLE_SAMPLER,
+};
+
enum vec_type {
VEC_FLOAT = 0,
VEC_INT = 1,
VEC_UINT = 2
};
-struct vrend_shader_io {
- char glsl_name[128];
-
- unsigned sid : 16;
- unsigned first : 16;
- unsigned last : 16;
- unsigned array_id : 10;
- unsigned interpolate : 4;
- unsigned location : 2;
-
- unsigned name : 8;
- unsigned stream : 2;
- unsigned usage_mask : 4;
- unsigned type : 2;
- unsigned num_components : 3;
- unsigned swizzle_offset : 3;
-
- unsigned layout_location : 1;
- unsigned invariant : 1;
- unsigned precise : 1;
- unsigned glsl_predefined_no_emit : 1;
- unsigned glsl_no_index : 1;
- unsigned glsl_gl_block : 1;
- unsigned override_no_wm : 1;
- unsigned is_int : 1;
- unsigned fbfetch_used : 1;
-};
-
struct vrend_shader_sampler {
int tgsi_sampler_type;
enum tgsi_return_type tgsi_sampler_return;
@@ -128,11 +111,12 @@ struct vrend_shader_image {
struct tgsi_declaration_image decl;
enum tgsi_return_type image_return;
bool vflag;
+ bool coherent;
};
#define MAX_IMMEDIATE 1024
struct immed {
- int type;
+ enum tgsi_imm_type type;
union imm {
uint32_t ui;
int32_t i;
@@ -144,6 +128,35 @@ struct vrend_temp_range {
int first;
int last;
int array_id;
+ bool precise_result;
+};
+
+struct vrend_shader_io {
+ char glsl_name[128];
+ struct vrend_shader_io *overlapping_array;
+ unsigned sid : 16;
+ unsigned first : 16;
+ unsigned last : 16;
+ unsigned array_id : 10;
+ enum tgsi_interpolate_mode interpolate : 4;
+ enum tgsi_interpolate_loc location : 2;
+
+ unsigned array_offset : 8;
+ enum tgsi_semantic name : 8;
+ unsigned stream : 2;
+ unsigned usage_mask : 4;
+ enum vec_type type : 2;
+ unsigned num_components : 3;
+
+ bool invariant : 1;
+ bool precise : 1;
+ bool glsl_predefined_no_emit : 1;
+ bool glsl_no_index : 1;
+ bool glsl_gl_block : 1;
+ bool override_no_wm : 1;
+ bool is_int : 1;
+ bool fbfetch_used : 1;
+ bool needs_override : 1;
};
struct vrend_io_range {
@@ -153,18 +166,26 @@ struct vrend_io_range {
struct vrend_glsl_strbufs {
int indent_level;
+ uint8_t required_sysval_uniform_decls;
struct vrend_strbuf glsl_main;
struct vrend_strbuf glsl_hdr;
struct vrend_strbuf glsl_ver_ext;
};
+struct vrend_interface_bits {
+ uint64_t outputs_expected_mask;
+ uint64_t inputs_emitted_mask;
+ uint64_t outputs_emitted_mask;
+};
+
struct vrend_generic_ios {
+ struct vrend_interface_bits match;
struct vrend_io_range input_range;
struct vrend_io_range output_range;
+};
- uint64_t outputs_expected_mask;
- uint64_t inputs_emitted_mask;
- uint64_t outputs_emitted_mask;
+struct vrend_texcoord_ios {
+ struct vrend_interface_bits match;
};
struct vrend_patch_ios {
@@ -176,12 +197,13 @@ struct dump_ctx {
struct tgsi_iterate_context iter;
const struct vrend_shader_cfg *cfg;
struct tgsi_shader_info info;
- int prog_type;
+ enum tgsi_processor_type prog_type;
int size;
struct vrend_glsl_strbufs glsl_strbufs;
uint instno;
struct vrend_strbuf src_bufs[4];
+ struct vrend_strbuf dst_bufs[3];
uint32_t num_interps;
uint32_t num_inputs;
@@ -194,6 +216,7 @@ struct dump_ctx {
struct vrend_shader_io system_values[32];
bool guest_sent_io_arrays;
+ struct vrend_texcoord_ios texcoord_ios;
struct vrend_generic_ios generic_ios;
struct vrend_patch_ios patch_ios;
@@ -225,7 +248,6 @@ struct dump_ctx {
int num_consts;
int num_imm;
struct immed imm[MAX_IMMEDIATE];
- unsigned fragcoord_input;
uint32_t req_local_mem;
bool integer_memory;
@@ -241,6 +263,7 @@ struct dump_ctx {
int abo_offsets[32];
uint64_t shader_req_bits;
+ uint64_t patches_emitted_mask;
struct pipe_stream_output_info *so;
char **so_names;
@@ -248,10 +271,12 @@ struct dump_ctx {
bool write_all_cbufs;
uint32_t shadow_samp_mask;
- int fs_coord_origin, fs_pixel_center;
+ bool fs_lower_left_origin, fs_integer_pixel_center;
int fs_depth_layout;
uint32_t fs_blend_equation_advanced;
+ bool separable_program;
+
int gs_in_prim, gs_out_prim, gs_max_out_verts;
int gs_num_invocations;
@@ -261,6 +286,7 @@ struct dump_ctx {
int fs_uses_clipdist_input;
int glsl_ver_required;
int color_in_mask;
+ int color_out_mask;
/* only used when cull is enabled */
uint8_t num_cull_dist_prop, num_clip_dist_prop;
bool has_pervertex;
@@ -271,17 +297,24 @@ struct dump_ctx {
bool write_mul_utemp;
bool write_mul_itemp;
bool has_sample_input;
+ bool has_noperspective;
bool early_depth_stencil;
bool has_file_memory;
bool force_color_two_side;
- bool winsys_adjust_y_emitted;
bool gles_use_tex_query_level;
+ bool has_pointsize_input;
+ bool has_pointsize_output;
+
+ bool has_input_arrays;
+ bool has_output_arrays;
int tcs_vertices_out;
int tes_prim_mode;
int tes_spacing;
int tes_vertex_order;
int tes_point_mode;
+ bool is_last_vertex_stage;
+ bool require_dummy_value;
uint16_t local_cs_block_size[3];
};
@@ -312,6 +345,7 @@ static const struct vrend_shader_table shader_req_table[] = {
{ SHADER_REQ_SHADER_ATOMIC_FLOAT, "NV_shader_atomic_float"},
{ SHADER_REQ_CONSERVATIVE_DEPTH, "ARB_conservative_depth"},
{SHADER_REQ_BLEND_EQUATION_ADVANCED, "KHR_blend_equation_advanced"},
+ { SHADER_REQ_TEXTURE_SHADOW_LOD, "EXT_texture_shadow_lod"},
};
enum vrend_type_qualifier {
@@ -342,11 +376,12 @@ struct dest_info {
enum vrend_type_qualifier udstconv;
enum vrend_type_qualifier idstconv;
bool dst_override_no_wm[2];
+ int32_t dest_index;
};
struct source_info {
enum vrend_type_qualifier svec4;
- uint32_t sreg_index;
+ int32_t sreg_index;
bool tg4_has_component;
bool override_no_wm[3];
bool override_no_cast[3];
@@ -381,6 +416,25 @@ enum io_type {
io_out
};
+enum io_decl_type {
+ decl_plain,
+ decl_block
+};
+
+static
+void vrend_shader_write_io_as_src(struct vrend_strbuf *buf,
+ const char *arrayname,
+ const struct vrend_shader_io *io,
+ const struct tgsi_full_src_register *src,
+ enum io_decl_type decl_type);
+
+static
+void vrend_shader_write_io_as_dst(struct vrend_strbuf *buf,
+ const char *arrayname,
+ const struct vrend_shader_io *io,
+ const struct tgsi_full_dst_register *src,
+ enum io_decl_type decl_type);
+
/* We prefer arrays of arrays, but if this is not available then TCS, GEOM, and TES
* inputs must be blocks, but FS input should not because interpolateAt* doesn't
* support dereferencing block members. */
@@ -441,6 +495,21 @@ static inline const char *get_wm_string(unsigned wm)
}
}
+static inline const char *get_swizzle_string(uint8_t swizzle)
+{
+ switch (swizzle) {
+ case PIPE_SWIZZLE_RED: return ".x";
+ case PIPE_SWIZZLE_GREEN: return ".y";
+ case PIPE_SWIZZLE_BLUE: return ".z";
+ case PIPE_SWIZZLE_ALPHA: return ".w";
+ case PIPE_SWIZZLE_ZERO:
+ case PIPE_SWIZZLE_ONE: return ".0";
+ default:
+ assert(0);
+ return "";
+ }
+}
+
const char *get_internalformat_string(int virgl_format, enum tgsi_return_type *stype);
static inline const char *tgsi_proc_to_prefix(int shader_type)
@@ -532,15 +601,14 @@ static inline int gs_input_prim_to_size(int prim)
static inline bool fs_emit_layout(const struct dump_ctx *ctx)
{
- if (ctx->fs_pixel_center)
- return true;
- /* if coord origin is 0 and invert is 0 - emit origin_upper_left,
- if coord_origin is 0 and invert is 1 - emit nothing (lower)
- if coord origin is 1 and invert is 0 - emit nothing (lower)
- if coord_origin is 1 and invert is 1 - emit origin upper left */
- if (!(ctx->fs_coord_origin ^ ctx->key->fs.invert_origin))
+ if (ctx->fs_integer_pixel_center)
return true;
- return false;
+
+ /* if fs_lower_left_origin is 0 and lower_left_origin is 0 - emit origin_upper_left,
+ if fs_lower_left_origin is 0 and lower_left_origin is 1 - emit nothing (lower)
+ if fs_lower_left_origin is 1 and lower_left_origin is 0 - emit nothing (lower)
+ if fs_lower_left_origin is 1 and lower_left_origin is 1 - emit origin_upper_left */
+ return ctx->fs_lower_left_origin == ctx->key->fs.lower_left_origin;
}
static const char *get_stage_input_name_prefix(const struct dump_ctx *ctx, int processor)
@@ -694,14 +762,30 @@ static bool allocate_temp_range(struct vrend_temp_range **temp_ranges, uint32_t
{
int idx = *num_temp_ranges;
- *temp_ranges = realloc(*temp_ranges, sizeof(struct vrend_temp_range) * (idx + 1));
- if (!*temp_ranges)
- return false;
+ if (array_id > 0) {
+
+ *temp_ranges = realloc(*temp_ranges, sizeof(struct vrend_temp_range) * (idx + 1));
+ if (!*temp_ranges)
+ return false;
+
+ (*temp_ranges)[idx].first = first;
+ (*temp_ranges)[idx].last = last;
+ (*temp_ranges)[idx].array_id = array_id;
+ (*temp_ranges)[idx].precise_result = false;
+ (*num_temp_ranges)++;
+ } else {
+ int ntemps = last - first + 1;
+ *temp_ranges = realloc(*temp_ranges, sizeof(struct vrend_temp_range) * (idx + ntemps));
+ for (int i = 0; i < ntemps; ++i) {
+ (*temp_ranges)[idx + i].first = first + i;
+ (*temp_ranges)[idx + i].last = first + i;
+ (*temp_ranges)[idx + i].array_id = 0;
+ (*temp_ranges)[idx + i].precise_result = false;
+ }
+ (*num_temp_ranges) += ntemps;
+
- (*temp_ranges)[idx].first = first;
- (*temp_ranges)[idx].last = last;
- (*temp_ranges)[idx].array_id = array_id;
- (*num_temp_ranges)++;
+ }
return true;
}
@@ -883,6 +967,141 @@ static bool add_samplers(struct dump_ctx *ctx, int first, int last, int sview_ty
return true;
}
+typedef enum
+{
+ VARYING_SLOT_POS,
+ VARYING_SLOT_COL0, /* COL0 and COL1 must be contiguous */
+ VARYING_SLOT_COL1,
+ VARYING_SLOT_FOGC,
+ VARYING_SLOT_TEX0, /* TEX0-TEX7 must be contiguous */
+ VARYING_SLOT_TEX1,
+ VARYING_SLOT_TEX2,
+ VARYING_SLOT_TEX3,
+ VARYING_SLOT_TEX4,
+ VARYING_SLOT_TEX5,
+ VARYING_SLOT_TEX6,
+ VARYING_SLOT_TEX7,
+ VARYING_SLOT_PSIZ, /* Does not appear in FS */
+ VARYING_SLOT_BFC0, /* Does not appear in FS */
+ VARYING_SLOT_BFC1, /* Does not appear in FS */
+ VARYING_SLOT_EDGE, /* Does not appear in FS */
+ VARYING_SLOT_CLIP_VERTEX, /* Does not appear in FS */
+ VARYING_SLOT_CLIP_DIST0,
+ VARYING_SLOT_CLIP_DIST1,
+ VARYING_SLOT_CULL_DIST0,
+ VARYING_SLOT_CULL_DIST1,
+ VARYING_SLOT_PRIMITIVE_ID, /* Does not appear in VS */
+ VARYING_SLOT_LAYER, /* Appears as VS or GS output */
+ VARYING_SLOT_VIEWPORT, /* Appears as VS or GS output */
+ VARYING_SLOT_FACE, /* FS only */
+ VARYING_SLOT_PNTC, /* FS only */
+ VARYING_SLOT_TESS_LEVEL_OUTER, /* Only appears as TCS output. */
+ VARYING_SLOT_TESS_LEVEL_INNER, /* Only appears as TCS output. */
+ VARYING_SLOT_BOUNDING_BOX0, /* Only appears as TCS output. */
+ VARYING_SLOT_BOUNDING_BOX1, /* Only appears as TCS output. */
+ VARYING_SLOT_VIEW_INDEX,
+ VARYING_SLOT_VIEWPORT_MASK, /* Does not appear in FS */
+ VARYING_SLOT_PRIMITIVE_SHADING_RATE = VARYING_SLOT_FACE, /* Does not appear in FS. */
+
+ VARYING_SLOT_PRIMITIVE_COUNT = VARYING_SLOT_TESS_LEVEL_OUTER, /* Only appears in MESH. */
+ VARYING_SLOT_PRIMITIVE_INDICES = VARYING_SLOT_TESS_LEVEL_INNER, /* Only appears in MESH. */
+ VARYING_SLOT_TASK_COUNT = VARYING_SLOT_BOUNDING_BOX0, /* Only appears in TASK. */
+
+ VARYING_SLOT_VAR0 = 32, /* First generic varying slot */
+ /* the remaining are simply for the benefit of gl_varying_slot_name()
+ * and not to be construed as an upper bound:
+ */
+ VARYING_SLOT_VAR1,
+ VARYING_SLOT_VAR2,
+ VARYING_SLOT_VAR3,
+ VARYING_SLOT_VAR4,
+ VARYING_SLOT_VAR5,
+ VARYING_SLOT_VAR6,
+ VARYING_SLOT_VAR7,
+ VARYING_SLOT_VAR8,
+ VARYING_SLOT_VAR9,
+ VARYING_SLOT_VAR10,
+ VARYING_SLOT_VAR11,
+ VARYING_SLOT_VAR12,
+ VARYING_SLOT_VAR13,
+ VARYING_SLOT_VAR14,
+ VARYING_SLOT_VAR15,
+ VARYING_SLOT_VAR16,
+ VARYING_SLOT_VAR17,
+ VARYING_SLOT_VAR18,
+ VARYING_SLOT_VAR19,
+ VARYING_SLOT_VAR20,
+ VARYING_SLOT_VAR21,
+ VARYING_SLOT_VAR22,
+ VARYING_SLOT_VAR23,
+ VARYING_SLOT_VAR24,
+ VARYING_SLOT_VAR25,
+ VARYING_SLOT_VAR26,
+ VARYING_SLOT_VAR27,
+ VARYING_SLOT_VAR28,
+ VARYING_SLOT_VAR29,
+ VARYING_SLOT_VAR30,
+ VARYING_SLOT_VAR31,
+ /* Account for the shift without CAP_TEXCOORD in mesa*/
+ VARYING_SLOT_PATCH0 = VARYING_SLOT_VAR31 + 9
+} gl_varying_slot;
+
+static uint32_t
+varying_bit_from_semantic_and_index(enum tgsi_semantic semantic, int index)
+{
+ switch (semantic) {
+ case TGSI_SEMANTIC_POSITION:
+ return VARYING_SLOT_POS;
+ case TGSI_SEMANTIC_COLOR:
+ if (index == 0)
+ return VARYING_SLOT_COL0;
+ else
+ return VARYING_SLOT_COL1;
+ case TGSI_SEMANTIC_BCOLOR:
+ if (index == 0)
+ return VARYING_SLOT_BFC0;
+ else
+ return VARYING_SLOT_BFC1;
+ case TGSI_SEMANTIC_FOG:
+ return VARYING_SLOT_FOGC;
+ case TGSI_SEMANTIC_PSIZE:
+ return VARYING_SLOT_PSIZ;
+ case TGSI_SEMANTIC_GENERIC:
+ return VARYING_SLOT_VAR0 + index;
+ case TGSI_SEMANTIC_FACE:
+ return VARYING_SLOT_FACE;
+ case TGSI_SEMANTIC_EDGEFLAG:
+ return VARYING_SLOT_EDGE;
+ case TGSI_SEMANTIC_PRIMID:
+ return VARYING_SLOT_PRIMITIVE_ID;
+ case TGSI_SEMANTIC_CLIPDIST:
+ if (index == 0)
+ return VARYING_SLOT_CLIP_DIST0;
+ else
+ return VARYING_SLOT_CLIP_DIST1;
+ case TGSI_SEMANTIC_CLIPVERTEX:
+ return VARYING_SLOT_CLIP_VERTEX;
+ case TGSI_SEMANTIC_TEXCOORD:
+ assert(index < 8);
+ return (VARYING_SLOT_TEX0 + index);
+ case TGSI_SEMANTIC_PCOORD:
+ return VARYING_SLOT_PNTC;
+ case TGSI_SEMANTIC_VIEWPORT_INDEX:
+ return VARYING_SLOT_VIEWPORT;
+ case TGSI_SEMANTIC_LAYER:
+ return VARYING_SLOT_LAYER;
+ case TGSI_SEMANTIC_TESSINNER:
+ return VARYING_SLOT_TESS_LEVEL_INNER;
+ case TGSI_SEMANTIC_TESSOUTER:
+ return VARYING_SLOT_TESS_LEVEL_OUTER;
+ case TGSI_SEMANTIC_PATCH:
+ return VARYING_SLOT_PATCH0 + index;
+ default:
+ vrend_printf("Warning: Bad TGSI semantic: %d/%d\n", semantic, index);
+ return 0;
+ }
+}
+
static struct vrend_array *lookup_image_array_ptr(const struct dump_ctx *ctx, int index)
{
uint32_t i;
@@ -967,13 +1186,45 @@ static enum vec_type get_type(uint32_t signed_int_mask,
return VEC_FLOAT;
}
-static void get_swizzle_offset_and_num_components(struct vrend_shader_io *io)
+static struct vrend_shader_io *
+find_overlapping_io(struct vrend_shader_io io[static 64],
+ uint32_t num_io,
+ const struct tgsi_full_declaration *decl)
+{
+ for (uint32_t j = 0; j < num_io - 1; j++) {
+ if (io[j].interpolate == decl->Interp.Interpolate &&
+ io[j].name == decl->Semantic.Name &&
+ ((io[j].first <= decl->Range.First &&
+ io[j].last > decl->Range.First) ||
+ (io[j].first < decl->Range.Last &&
+ io[j].last >= decl->Range.Last))) {
+ return &io[j];
+ }
+ }
+ return NULL;
+}
+
+static void
+map_overlapping_io_array(struct vrend_shader_io io[static 64],
+ struct vrend_shader_io *new_io,
+ uint32_t num_io,
+ const struct tgsi_full_declaration *decl)
{
- unsigned mask_temp = io->usage_mask;
- int start, num_comp;
- u_bit_scan_consecutive_range(&mask_temp, &start, &num_comp);
- io->swizzle_offset = start;
- io->num_components = num_comp;
+ struct vrend_shader_io *overlap_io = find_overlapping_io(io, num_io, decl);
+ if (overlap_io && !overlap_io->needs_override) {
+ int delta = new_io->first - overlap_io->first;
+ if (delta >= 0) {
+ new_io->array_offset = delta;
+ new_io->overlapping_array = overlap_io;
+ overlap_io->last = MAX2(overlap_io->last, new_io->last);
+ } else if (delta < 0) {
+ overlap_io->overlapping_array = new_io;
+ overlap_io->array_offset = -delta;
+ new_io->last = MAX2(overlap_io->last, new_io->last);
+ }
+ overlap_io->usage_mask |= new_io->usage_mask;
+ new_io->usage_mask = overlap_io->usage_mask;
+ }
}
static boolean
@@ -983,9 +1234,8 @@ iter_declaration(struct tgsi_iterate_context *iter,
struct dump_ctx *ctx = (struct dump_ctx *)iter;
int i;
int color_offset = 0;
- const char *name_prefix = "";
+ const char *name_prefix;
bool add_two_side = false;
- unsigned mask_temp;
switch (decl->Declaration.File) {
case TGSI_FILE_INPUT:
@@ -993,16 +1243,18 @@ iter_declaration(struct tgsi_iterate_context *iter,
if (ctx->inputs[j].name == decl->Semantic.Name &&
ctx->inputs[j].sid == decl->Semantic.Index &&
ctx->inputs[j].first == decl->Range.First &&
- ctx->inputs[j].usage_mask == decl->Declaration.UsageMask &&
((!decl->Declaration.Array && ctx->inputs[j].array_id == 0) ||
- (ctx->inputs[j].array_id == decl->Array.ArrayID)))
+ (ctx->inputs[j].array_id == decl->Array.ArrayID))) {
return true;
+ }
}
+
i = ctx->num_inputs++;
if (ctx->num_inputs > ARRAY_SIZE(ctx->inputs)) {
vrend_printf( "Number of inputs exceeded, max is %lu\n", ARRAY_SIZE(ctx->inputs));
return false;
}
+
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) {
ctx->attrib_input_mask |= (1 << decl->Range.First);
ctx->inputs[i].type = get_type(ctx->key->vs.attrib_signed_int_bitmask,
@@ -1014,26 +1266,54 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].interpolate = decl->Interp.Interpolate;
ctx->inputs[i].location = decl->Interp.Location;
ctx->inputs[i].first = decl->Range.First;
- ctx->inputs[i].layout_location = 0;
ctx->inputs[i].last = decl->Range.Last;
ctx->inputs[i].array_id = decl->Declaration.Array ? decl->Array.ArrayID : 0;
- ctx->inputs[i].usage_mask = mask_temp = decl->Declaration.UsageMask;
- get_swizzle_offset_and_num_components(&ctx->inputs[i]);
+ ctx->inputs[i].usage_mask = decl->Declaration.UsageMask;
+ ctx->inputs[i].num_components = 4;
ctx->inputs[i].glsl_predefined_no_emit = false;
ctx->inputs[i].glsl_no_index = false;
ctx->inputs[i].override_no_wm = ctx->inputs[i].num_components == 1;
ctx->inputs[i].glsl_gl_block = false;
+ ctx->inputs[i].overlapping_array = NULL;
- if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT &&
- decl->Interp.Location == TGSI_INTERPOLATE_LOC_SAMPLE) {
- ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
- ctx->has_sample_input = true;
+ if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
+ if (decl->Interp.Location == TGSI_INTERPOLATE_LOC_SAMPLE) {
+ ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
+ ctx->has_sample_input = true;
+ }
+ if (decl->Interp.Interpolate == TGSI_INTERPOLATE_LINEAR && ctx->cfg->use_gles &&
+ ctx->cfg->has_nopersective) {
+ ctx->shader_req_bits |= SHADER_REQ_SHADER_NOPERSPECTIVE_INTERPOLATION;
+ ctx->has_noperspective = true;
+ }
+ }
+
+ map_overlapping_io_array(ctx->inputs, &ctx->inputs[i], ctx->num_inputs, decl);
+
+ if (!ctx->inputs[i].glsl_predefined_no_emit) {
+
+ /* If the output of the previous shader contained arrays we
+ * have to check whether a non-array input here should be part
+ * of an array */
+ for (uint32_t j = 0; j < ctx->key->in_arrays.num_arrays; j++) {
+ const struct vrend_shader_io_array *array = &ctx->key->in_arrays.layout[j];
+
+ if (array->name == decl->Semantic.Name &&
+ array->sid <= decl->Semantic.Index &&
+ array->sid + array->size >= decl->Semantic.Index) {
+ ctx->inputs[i].sid = array->sid;
+ ctx->inputs[i].last = MAX2(ctx->inputs[i].first + array->size, ctx->inputs[i].last);
+ break;
+ }
+ }
}
if (ctx->inputs[i].first != ctx->inputs[i].last)
ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
+ name_prefix = get_stage_input_name_prefix(ctx, iter->processor.Processor);
+
switch (ctx->inputs[i].name) {
case TGSI_SEMANTIC_COLOR:
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
@@ -1067,7 +1347,7 @@ iter_declaration(struct tgsi_iterate_context *iter,
if (ctx->front_face_emitted == false) {
int k = ctx->num_inputs++;
- if (ctx->num_inputs > ARRAY_SIZE(ctx->inputs)) {
+ if (ctx->num_inputs >= ARRAY_SIZE(ctx->inputs)) {
vrend_printf( "Number of inputs exceeded, max is %lu\n", ARRAY_SIZE(ctx->inputs));
return false;
}
@@ -1083,11 +1363,9 @@ iter_declaration(struct tgsi_iterate_context *iter,
}
add_two_side = true;
}
- name_prefix = "ex";
}
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_PRIMID:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) {
name_prefix = "gl_PrimitiveIDIn";
@@ -1095,16 +1373,14 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].glsl_no_index = true;
ctx->inputs[i].override_no_wm = true;
ctx->shader_req_bits |= SHADER_REQ_INTS;
- break;
} else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
name_prefix = "gl_PrimitiveID";
ctx->inputs[i].glsl_predefined_no_emit = true;
ctx->inputs[i].glsl_no_index = true;
ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
ctx->shader_req_bits |= SHADER_REQ_GEOMETRY_SHADER;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_VIEWPORT_INDEX:
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
ctx->inputs[i].glsl_predefined_no_emit = true;
@@ -1117,9 +1393,8 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->shader_req_bits |= SHADER_REQ_LAYER;
if (ctx->cfg->use_gles)
ctx->shader_req_bits |= SHADER_REQ_VIEWPORT_IDX;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_LAYER:
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
name_prefix = "gl_Layer";
@@ -1129,9 +1404,8 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].type = VEC_INT;
ctx->inputs[i].override_no_wm = true;
ctx->shader_req_bits |= SHADER_REQ_LAYER;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_PSIZE:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY ||
iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL ||
@@ -1142,9 +1416,9 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].override_no_wm = true;
ctx->inputs[i].glsl_gl_block = true;
ctx->shader_req_bits |= SHADER_REQ_PSIZE;
- break;
+ ctx->has_pointsize_input = true;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_CLIPDIST:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY ||
iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL ||
@@ -1157,7 +1431,6 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE;
if (ctx->inputs[i].last != ctx->inputs[i].first)
ctx->guest_sent_io_arrays = true;
- break;
} else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
name_prefix = "gl_ClipDistance";
ctx->inputs[i].glsl_predefined_no_emit = true;
@@ -1166,9 +1439,8 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE;
if (ctx->inputs[i].last != ctx->inputs[i].first)
ctx->guest_sent_io_arrays = true;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_POSITION:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY ||
iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL ||
@@ -1177,17 +1449,15 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].glsl_predefined_no_emit = true;
ctx->inputs[i].glsl_no_index = true;
ctx->inputs[i].glsl_gl_block = true;
- break;
} else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
- if (ctx->cfg->use_gles && ctx->fs_pixel_center) {
+ if (ctx->cfg->use_gles && ctx->fs_integer_pixel_center) {
name_prefix = "(gl_FragCoord - vec4(0.5, 0.5, 0.0, 0.0))";
} else
name_prefix = "gl_FragCoord";
ctx->inputs[i].glsl_predefined_no_emit = true;
ctx->inputs[i].glsl_no_index = true;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_FACE:
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
if (ctx->front_face_emitted) {
@@ -1198,26 +1468,41 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].glsl_predefined_no_emit = true;
ctx->inputs[i].glsl_no_index = true;
ctx->front_face_emitted = true;
- break;
}
- /* fallthrough */
+ break;
+ case TGSI_SEMANTIC_PCOORD:
+ if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
+ if (ctx->cfg->use_gles) {
+ name_prefix = "vec4(gl_PointCoord.x, mix(1.0 - gl_PointCoord.y, gl_PointCoord.y, clamp(winsys_adjust_y, 0.0, 1.0)), 0.0, 1.0)";
+ ctx->glsl_strbufs.required_sysval_uniform_decls |= BIT(UNIFORM_WINSYS_ADJUST_Y);
+ } else
+ name_prefix = "vec4(gl_PointCoord, 0.0, 1.0)";
+ ctx->inputs[i].glsl_predefined_no_emit = true;
+ ctx->inputs[i].glsl_no_index = true;
+ ctx->inputs[i].num_components = 4;
+ ctx->inputs[i].usage_mask = 0xf;
+ }
+ break;
case TGSI_SEMANTIC_PATCH:
+ if (iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL)
+ name_prefix = "patch";
+ /* fallthrough */
case TGSI_SEMANTIC_GENERIC:
+ case TGSI_SEMANTIC_TEXCOORD:
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
if (ctx->key->fs.coord_replace & (1 << ctx->inputs[i].sid)) {
- if (ctx->cfg->use_gles)
+ if (ctx->cfg->use_gles) {
name_prefix = "vec4(gl_PointCoord.x, mix(1.0 - gl_PointCoord.y, gl_PointCoord.y, clamp(winsys_adjust_y, 0.0, 1.0)), 0.0, 1.0)";
- else
+ ctx->glsl_strbufs.required_sysval_uniform_decls |= BIT(UNIFORM_WINSYS_ADJUST_Y);
+ } else
name_prefix = "vec4(gl_PointCoord, 0.0, 1.0)";
ctx->inputs[i].glsl_predefined_no_emit = true;
ctx->inputs[i].glsl_no_index = true;
ctx->inputs[i].num_components = 4;
- ctx->inputs[i].swizzle_offset = 0;
ctx->inputs[i].usage_mask = 0xf;
break;
}
}
-
if (ctx->inputs[i].first != ctx->inputs[i].last ||
ctx->inputs[i].array_id > 0) {
ctx->guest_sent_io_arrays = true;
@@ -1228,10 +1513,9 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
}
}
-
- /* fallthrough */
+ break;
default:
- name_prefix = get_stage_input_name_prefix(ctx, iter->processor.Processor);
+ vrend_printf("unhandled input semantic: %x\n", ctx->inputs[i].name);
break;
}
@@ -1241,15 +1525,18 @@ iter_declaration(struct tgsi_iterate_context *iter,
if (ctx->inputs[i].name == TGSI_SEMANTIC_FOG){
ctx->inputs[i].usage_mask = 0xf;
ctx->inputs[i].num_components = 4;
- ctx->inputs[i].swizzle_offset = 0;
ctx->inputs[i].override_no_wm = false;
snprintf(ctx->inputs[i].glsl_name, 128, "%s_f%d", name_prefix, ctx->inputs[i].sid);
} else if (ctx->inputs[i].name == TGSI_SEMANTIC_COLOR)
snprintf(ctx->inputs[i].glsl_name, 128, "%s_c%d", name_prefix, ctx->inputs[i].sid);
+ else if (ctx->inputs[i].name == TGSI_SEMANTIC_BCOLOR)
+ snprintf(ctx->inputs[i].glsl_name, 128, "%s_bc%d", name_prefix, ctx->inputs[i].sid);
else if (ctx->inputs[i].name == TGSI_SEMANTIC_GENERIC)
- snprintf(ctx->inputs[i].glsl_name, 128, "%s_g%dA%d", name_prefix, ctx->inputs[i].sid, ctx->inputs[i].array_id);
+ snprintf(ctx->inputs[i].glsl_name, 128, "%s_g%d", name_prefix, ctx->inputs[i].sid);
else if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH)
- snprintf(ctx->inputs[i].glsl_name, 128, "%s_p%dA%d", name_prefix, ctx->inputs[i].sid, ctx->inputs[i].array_id);
+ snprintf(ctx->inputs[i].glsl_name, 128, "%s%d", name_prefix, ctx->inputs[i].sid);
+ else if (ctx->inputs[i].name == TGSI_SEMANTIC_TEXCOORD)
+ snprintf(ctx->inputs[i].glsl_name, 64, "%s_t%d", name_prefix, ctx->inputs[i].sid);
else
snprintf(ctx->inputs[i].glsl_name, 128, "%s_%d", name_prefix, ctx->inputs[i].first);
}
@@ -1266,7 +1553,6 @@ iter_declaration(struct tgsi_iterate_context *iter,
if (ctx->outputs[j].name == decl->Semantic.Name &&
ctx->outputs[j].sid == decl->Semantic.Index &&
ctx->outputs[j].first == decl->Range.First &&
- ctx->outputs[j].usage_mask == decl->Declaration.UsageMask &&
((!decl->Declaration.Array && ctx->outputs[j].array_id == 0) ||
(ctx->outputs[j].array_id == decl->Array.ArrayID)))
return true;
@@ -1284,15 +1570,19 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->outputs[i].precise = false;
ctx->outputs[i].first = decl->Range.First;
ctx->outputs[i].last = decl->Range.Last;
- ctx->outputs[i].layout_location = 0;
ctx->outputs[i].array_id = decl->Declaration.Array ? decl->Array.ArrayID : 0;
- ctx->outputs[i].usage_mask = mask_temp = decl->Declaration.UsageMask;
- get_swizzle_offset_and_num_components(&ctx->outputs[i]);
+ ctx->outputs[i].usage_mask = decl->Declaration.UsageMask;
+ ctx->outputs[i].num_components = 4;
ctx->outputs[i].glsl_predefined_no_emit = false;
ctx->outputs[i].glsl_no_index = false;
ctx->outputs[i].override_no_wm = ctx->outputs[i].num_components == 1;
ctx->outputs[i].is_int = false;
ctx->outputs[i].fbfetch_used = false;
+ ctx->outputs[i].overlapping_array = NULL;
+
+ map_overlapping_io_array(ctx->outputs, &ctx->outputs[i], ctx->num_outputs, decl);
+
+ name_prefix = get_stage_output_name_prefix(iter->processor.Processor);
switch (ctx->outputs[i].name) {
case TGSI_SEMANTIC_POSITION:
@@ -1339,14 +1629,16 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->guest_sent_io_arrays = true;
break;
case TGSI_SEMANTIC_CLIPVERTEX:
- name_prefix = "gl_ClipVertex";
- ctx->outputs[i].glsl_predefined_no_emit = true;
- ctx->outputs[i].glsl_no_index = true;
ctx->outputs[i].override_no_wm = true;
ctx->outputs[i].invariant = false;
- ctx->outputs[i].precise = false;
- if (ctx->glsl_ver_required >= 140)
+ if (ctx->glsl_ver_required >= 140) {
ctx->has_clipvertex = true;
+ name_prefix = get_stage_output_name_prefix(iter->processor.Processor);
+ } else {
+ name_prefix = "gl_ClipVertex";
+ ctx->outputs[i].glsl_predefined_no_emit = true;
+ ctx->outputs[i].glsl_no_index = true;
+ }
break;
case TGSI_SEMANTIC_SAMPLEMASK:
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
@@ -1364,38 +1656,32 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->outputs[i].type = get_type(ctx->key->fs.cbufs_signed_int_bitmask,
ctx->key->fs.cbufs_unsigned_int_bitmask,
ctx->outputs[i].sid);
- }
-
- if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) {
+ name_prefix = ctx->key->fs.logicop_enabled ? "fsout_tmp" : "fsout";
+ } else {
if (ctx->glsl_ver_required < 140) {
ctx->outputs[i].glsl_no_index = true;
if (ctx->outputs[i].sid == 0)
name_prefix = "gl_FrontColor";
else if (ctx->outputs[i].sid == 1)
name_prefix = "gl_FrontSecondaryColor";
- } else
- name_prefix = "ex";
- break;
- } else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT &&
- ctx->key->fs.logicop_enabled) {
- name_prefix = "fsout_tmp";
- break;
+ } else {
+ ctx->color_out_mask |= (1 << decl->Semantic.Index);
+ }
}
- /* fallthrough */
+ ctx->outputs[i].override_no_wm = false;
+ break;
case TGSI_SEMANTIC_BCOLOR:
- if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) {
- if (ctx->glsl_ver_required < 140) {
- ctx->outputs[i].glsl_no_index = true;
- if (ctx->outputs[i].sid == 0)
- name_prefix = "gl_BackColor";
- else if (ctx->outputs[i].sid == 1)
- name_prefix = "gl_BackSecondaryColor";
- break;
- } else
- name_prefix = "ex";
- break;
+ if (ctx->glsl_ver_required < 140) {
+ ctx->outputs[i].glsl_no_index = true;
+ if (ctx->outputs[i].sid == 0)
+ name_prefix = "gl_BackColor";
+ else if (ctx->outputs[i].sid == 1)
+ name_prefix = "gl_BackSecondaryColor";
+ } else {
+ ctx->outputs[i].override_no_wm = false;
+ ctx->color_out_mask |= (1 << decl->Semantic.Index) << 2;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_PSIZE:
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX ||
iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY ||
@@ -1406,11 +1692,11 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->outputs[i].override_no_wm = true;
ctx->shader_req_bits |= SHADER_REQ_PSIZE;
name_prefix = "gl_PointSize";
+ ctx->has_pointsize_output = true;
if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL)
ctx->outputs[i].glsl_gl_block = true;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_LAYER:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) {
ctx->outputs[i].glsl_predefined_no_emit = true;
@@ -1418,9 +1704,8 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->outputs[i].override_no_wm = true;
ctx->outputs[i].is_int = true;
name_prefix = "gl_Layer";
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_PRIMID:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) {
ctx->outputs[i].glsl_predefined_no_emit = true;
@@ -1428,9 +1713,8 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->outputs[i].override_no_wm = true;
ctx->outputs[i].is_int = true;
name_prefix = "gl_PrimitiveID";
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_VIEWPORT_INDEX:
if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) {
ctx->outputs[i].glsl_predefined_no_emit = true;
@@ -1440,29 +1724,30 @@ iter_declaration(struct tgsi_iterate_context *iter,
name_prefix = "gl_ViewportIndex";
if (ctx->glsl_ver_required >= 140 || ctx->cfg->use_gles)
ctx->shader_req_bits |= SHADER_REQ_VIEWPORT_IDX;
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_TESSOUTER:
if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) {
ctx->outputs[i].glsl_predefined_no_emit = true;
ctx->outputs[i].glsl_no_index = true;
ctx->outputs[i].override_no_wm = true;
name_prefix = "gl_TessLevelOuter";
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_TESSINNER:
if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) {
ctx->outputs[i].glsl_predefined_no_emit = true;
ctx->outputs[i].glsl_no_index = true;
ctx->outputs[i].override_no_wm = true;
name_prefix = "gl_TessLevelInner";
- break;
}
- /* fallthrough */
+ break;
case TGSI_SEMANTIC_PATCH:
+ if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL)
+ name_prefix = "patch";
+ /* fallthrough */
case TGSI_SEMANTIC_GENERIC:
+ case TGSI_SEMANTIC_TEXCOORD:
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX)
if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC)
color_offset = -1;
@@ -1478,9 +1763,9 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
}
}
- /* fallthrough */
+ break;
default:
- name_prefix = get_stage_output_name_prefix(iter->processor.Processor);
+ vrend_printf("unhandled output semantic: %x\n", ctx->outputs[i].name);
break;
}
@@ -1490,7 +1775,6 @@ iter_declaration(struct tgsi_iterate_context *iter,
if (ctx->outputs[i].name == TGSI_SEMANTIC_FOG) {
ctx->outputs[i].usage_mask = 0xf;
ctx->outputs[i].num_components = 4;
- ctx->outputs[i].swizzle_offset = 0;
ctx->outputs[i].override_no_wm = false;
snprintf(ctx->outputs[i].glsl_name, 64, "%s_f%d", name_prefix, ctx->outputs[i].sid);
} else if (ctx->outputs[i].name == TGSI_SEMANTIC_COLOR)
@@ -1498,9 +1782,11 @@ iter_declaration(struct tgsi_iterate_context *iter,
else if (ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR)
snprintf(ctx->outputs[i].glsl_name, 64, "%s_bc%d", name_prefix, ctx->outputs[i].sid);
else if (ctx->outputs[i].name == TGSI_SEMANTIC_PATCH)
- snprintf(ctx->outputs[i].glsl_name, 64, "%s_p%dA%d", name_prefix, ctx->outputs[i].sid, ctx->outputs[i].array_id);
+ snprintf(ctx->outputs[i].glsl_name, 64, "%s%d", name_prefix, ctx->outputs[i].sid);
else if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC)
- snprintf(ctx->outputs[i].glsl_name, 64, "%s_g%dA%d", name_prefix, ctx->outputs[i].sid, ctx->outputs[i].array_id);
+ snprintf(ctx->outputs[i].glsl_name, 64, "%s_g%d", name_prefix, ctx->outputs[i].sid);
+ else if (ctx->outputs[i].name == TGSI_SEMANTIC_TEXCOORD)
+ snprintf(ctx->outputs[i].glsl_name, 64, "%s_t%d", name_prefix, ctx->outputs[i].sid);
else
snprintf(ctx->outputs[i].glsl_name, 64, "%s_%d", name_prefix, ctx->outputs[i].first + color_offset);
@@ -1666,10 +1952,10 @@ iter_property(struct tgsi_iterate_context *iter,
ctx->write_all_cbufs = true;
break;
case TGSI_PROPERTY_FS_COORD_ORIGIN:
- ctx->fs_coord_origin = prop->u[0].Data;
+ ctx->fs_lower_left_origin = prop->u[0].Data ? true : false;
break;
case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
- ctx->fs_pixel_center = prop->u[0].Data;
+ ctx->fs_integer_pixel_center = prop->u[0].Data ? true : false;
break;
case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
/* If the host doesn't support this, then we can savely ignore this,
@@ -1690,6 +1976,7 @@ iter_property(struct tgsi_iterate_context *iter,
break;
case TGSI_PROPERTY_GS_INVOCATIONS:
ctx->gs_num_invocations = prop->u[0].Data;
+ ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE;
@@ -1736,6 +2023,16 @@ iter_property(struct tgsi_iterate_context *iter,
ctx->shader_req_bits |= SHADER_REQ_BLEND_EQUATION_ADVANCED;
}
break;
+ case TGSI_PROPERTY_SEPARABLE_PROGRAM:
+ /* GLES is very strict in how separable shaders interfaces should be matched.
+ * It doesn't allow, for example, inputs without matching outputs. So, we just
+ * disable separable shaders for GLES. */
+ if (!ctx->cfg->use_gles) {
+ ctx->separable_program = prop->u[0].Data;
+ ctx->shader_req_bits |= SHADER_REQ_SEPERATE_SHADER_OBJECTS;
+ ctx->shader_req_bits |= SHADER_REQ_EXPLICIT_ATTRIB_LOCATION;
+ }
+ break;
default:
vrend_printf("unhandled property: %x\n", prop->Property.PropertyName);
return false;
@@ -1836,6 +2133,7 @@ static void emit_alpha_test(const struct dump_ctx *ctx,
case PIPE_FUNC_NOTEQUAL:
case PIPE_FUNC_GEQUAL:
snprintf(comp_buf, 128, "%s %s alpha_ref_val", "fsout_c0.w", atests[ctx->key->alpha_test]);
+ glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_ALPHA_REF_VAL);
break;
default:
vrend_printf( "invalid alpha-test: %x\n", ctx->key->alpha_test);
@@ -1848,8 +2146,18 @@ static void emit_alpha_test(const struct dump_ctx *ctx,
static void emit_pstipple_pass(struct vrend_glsl_strbufs *glsl_strbufs)
{
- emit_buf(glsl_strbufs, "stip_temp = texture(pstipple_sampler, vec2(gl_FragCoord.x / 32.0, gl_FragCoord.y / 32.0)).x;\n");
- emit_buf(glsl_strbufs, "if (stip_temp > 0.0) {\n\tdiscard;\n}\n");
+ static_assert(VREND_POLYGON_STIPPLE_SIZE == 32,
+ "According to the spec stipple size must be 32");
+
+ const int mask = VREND_POLYGON_STIPPLE_SIZE - 1;
+
+ emit_buf(glsl_strbufs, "{\n");
+ emit_buff(glsl_strbufs, " int spx = int(gl_FragCoord.x) & %d;\n", mask);
+ emit_buff(glsl_strbufs, " int spy = int(gl_FragCoord.y) & %d;\n", mask);
+ emit_buf(glsl_strbufs, " stip_temp = stipple_pattern[spy] & (0x80000000u >> spx);\n");
+ emit_buf(glsl_strbufs, " if (stip_temp == 0u) {\n discard;\n }\n");
+ emit_buf(glsl_strbufs, "}\n");
+ glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_PSTIPPLE_SAMPLER);
}
static void emit_color_select(const struct dump_ctx *ctx,
@@ -1858,16 +2166,20 @@ static void emit_color_select(const struct dump_ctx *ctx,
if (!ctx->key->color_two_side || !(ctx->color_in_mask & 0x3))
return;
+ const char *name_prefix = get_stage_input_name_prefix(ctx, ctx->prog_type);
if (ctx->color_in_mask & 1)
- emit_buf(glsl_strbufs, "realcolor0 = gl_FrontFacing ? ex_c0 : ex_bc0;\n");
+ emit_buff(glsl_strbufs, "realcolor0 = gl_FrontFacing ? %s_c0 : %s_bc0;\n",
+ name_prefix, name_prefix);
if (ctx->color_in_mask & 2)
- emit_buf(glsl_strbufs, "realcolor1 = gl_FrontFacing ? ex_c1 : ex_bc1;\n");
+ emit_buff(glsl_strbufs, "realcolor1 = gl_FrontFacing ? %s_c1 : %s_bc1;\n",
+ name_prefix, name_prefix);
}
static void emit_prescale(struct vrend_glsl_strbufs *glsl_strbufs)
{
emit_buf(glsl_strbufs, "gl_Position.y = gl_Position.y * winsys_adjust_y;\n");
+ glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_WINSYS_ADJUST_Y);
}
// TODO Consider exposing non-const ctx-> members as args to make *ctx const
@@ -1907,18 +2219,20 @@ static const struct vrend_shader_io *get_io_slot(const struct vrend_shader_io *s
static inline void
get_blockname(char outvar[64], const char *stage_prefix, const struct vrend_shader_io *io)
{
- snprintf(outvar, 64, "block_%sg%dA%d", stage_prefix, io->sid, io->array_id);
+ snprintf(outvar, 64, "block_%sg%d", stage_prefix, io->sid);
}
static inline void
get_blockvarname(char outvar[64], const char *stage_prefix, const struct vrend_shader_io *io, const char *postfix)
{
- snprintf(outvar, 64, "%sg%dA%d_%x%s", stage_prefix, io->first, io->array_id, io->usage_mask, postfix);
+ snprintf(outvar, 64, "%sg%d%s", stage_prefix, io->first, postfix);
}
static void get_so_name(const struct dump_ctx *ctx, bool from_block, const struct vrend_shader_io *output, int index, char out_var[255], char *wm)
{
- if (output->first == output->last || output->name != TGSI_SEMANTIC_GENERIC)
+ if (output->first == output->last ||
+ (output->name != TGSI_SEMANTIC_GENERIC &&
+ output->name != TGSI_SEMANTIC_TEXCOORD))
snprintf(out_var, 255, "%s%s", output->glsl_name, wm);
else {
if ((output->name == TGSI_SEMANTIC_GENERIC) && prefer_generic_io_block(ctx, io_out)) {
@@ -1972,12 +2286,18 @@ static void emit_so_movs(const struct dump_ctx *ctx,
free(ctx->so_names[i]);
if (ctx->so->output[i].register_index > ctx->num_outputs)
ctx->so_names[i] = NULL;
- else if (ctx->outputs[ctx->so->output[i].register_index].name == TGSI_SEMANTIC_CLIPVERTEX && ctx->has_clipvertex) {
+ else if (output->name == TGSI_SEMANTIC_CLIPVERTEX && ctx->has_clipvertex) {
ctx->so_names[i] = strdup("clipv_tmp");
*has_clipvertex_so = true;
} else {
char out_var[255];
- get_so_name(ctx, true, output, ctx->so->output[i].register_index, out_var, "");
+ const struct vrend_shader_io *used_output_io = output;
+ if (output->name == TGSI_SEMANTIC_GENERIC && ctx->generic_ios.output_range.used) {
+ used_output_io = &ctx->generic_ios.output_range.io;
+ } else if (output->name == TGSI_SEMANTIC_PATCH && ctx->patch_ios.output_range.used) {
+ used_output_io = &ctx->patch_ios.output_range.io;
+ }
+ get_so_name(ctx, true, used_output_io, ctx->so->output[i].register_index, out_var, "");
ctx->so_names[i] = strdup(out_var);
}
} else {
@@ -1986,7 +2306,7 @@ static void emit_so_movs(const struct dump_ctx *ctx,
ctx->so_names[i] = strdup(ntemp);
}
if (ctx->so->output[i].num_components == 1) {
- if (ctx->outputs[ctx->so->output[i].register_index].is_int)
+ if (output->is_int)
snprintf(outtype, 15, "intBitsToFloat");
else
snprintf(outtype, 15, "float");
@@ -2026,19 +2346,30 @@ static void emit_clip_dist_movs(const struct dump_ctx *ctx,
{
int i;
bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
- int num_clip = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_clip;
- int num_cull = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_cull;
+ int num_clip = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_out_clip;
+ int num_cull = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_out_cull;
+
+
+ int num_clip_cull = num_cull + num_clip;
+ if (ctx->num_out_clip_dist && !num_clip_cull)
+ num_clip = ctx->num_out_clip_dist;
int ndists;
const char *prefix="";
- if (ctx->prog_type == PIPE_SHADER_TESS_CTRL)
+ if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL)
prefix = "gl_out[gl_InvocationID].";
- if (ctx->num_out_clip_dist == 0 && ctx->key->clip_plane_enable) {
+
+ if (ctx->num_out_clip_dist == 0 &&
+ ctx->is_last_vertex_stage &&
+ ctx->num_outputs + 2 <= MAX_VARYING) {
+ emit_buff(glsl_strbufs, "if (clip_plane_enabled) {\n");
for (i = 0; i < 8; i++) {
- emit_buff(glsl_strbufs, "%sgl_ClipDistance[%d] = dot(%s, clipp[%d]);\n", prefix, i, ctx->has_clipvertex ? "clipv_tmp" : "gl_Position", i);
+ emit_buff(glsl_strbufs, " %sgl_ClipDistance[%d] = dot(%s, clipp[%d]);\n",
+ prefix, i, ctx->has_clipvertex ? "clipv_tmp" : "gl_Position", i);
}
- return;
+ emit_buff(glsl_strbufs, "}\n");
+ glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_CLIP_PLANE);
}
ndists = ctx->num_out_clip_dist;
if (has_prop)
@@ -2118,7 +2449,8 @@ static void handle_vertex_proc_exit(const struct dump_ctx *ctx,
if (ctx->so && !ctx->key->gs_present && !ctx->key->tes_present)
emit_so_movs(ctx, glsl_strbufs, has_clipvertex_so);
- emit_clip_dist_movs(ctx, glsl_strbufs);
+ if (ctx->cfg->has_cull_distance)
+ emit_clip_dist_movs(ctx, glsl_strbufs);
if (!ctx->key->gs_present && !ctx->key->tes_present)
emit_prescale(glsl_strbufs);
@@ -2134,7 +2466,12 @@ static void emit_fragment_logicop(const struct dump_ctx *ctx,
char src_fb[PIPE_MAX_COLOR_BUFS][64];
double scale[PIPE_MAX_COLOR_BUFS];
int mask[PIPE_MAX_COLOR_BUFS];
- char full_op[PIPE_MAX_COLOR_BUFS][128 + 8];
+
+ struct vrend_strbuf full_op_buf[PIPE_MAX_COLOR_BUFS];
+ for (int i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
+ strbuf_alloc(&full_op_buf[i], 134);
+ }
+
for (unsigned i = 0; i < ctx->num_outputs; i++) {
mask[i] = (1 << ctx->key->fs.surface_component_bits[i]) - 1;
@@ -2172,67 +2509,52 @@ static void emit_fragment_logicop(const struct dump_ctx *ctx,
for (unsigned i = 0; i < ctx->num_outputs; i++) {
switch (ctx->key->fs.logicop_func) {
case PIPE_LOGICOP_CLEAR:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s", "vec4(0)");
+ strbuf_fmt(&full_op_buf[i], "%s", "vec4(0)");
break;
case PIPE_LOGICOP_NOOP:
- full_op[i][0]= 0;
+ strbuf_fmt(&full_op_buf[i], "%s", "");
break;
case PIPE_LOGICOP_SET:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s", "vec4(1)");
+ strbuf_fmt(&full_op_buf[i], "%s", "vec4(1)");
break;
case PIPE_LOGICOP_COPY:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "fsout_tmp_c%d", i);
+ strbuf_fmt(&full_op_buf[i], "fsout_tmp_c%d", i);
break;
case PIPE_LOGICOP_COPY_INVERTED:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~%s", src[i]);
+ strbuf_fmt(&full_op_buf[i], "~%s", src[i]);
break;
case PIPE_LOGICOP_INVERT:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~%s", src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "~%s", src_fb[i]);
break;
case PIPE_LOGICOP_AND:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s & %s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "%s & %s", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_NAND:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~( %s & %s )", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "~( %s & %s )", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_NOR:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~( %s | %s )", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "~( %s | %s )", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_AND_INVERTED:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~%s & %s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "~%s & %s", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_AND_REVERSE:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s & ~%s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "%s & ~%s", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_XOR:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s ^%s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "%s ^%s", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_EQUIV:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~( %s ^ %s )", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "~( %s ^ %s )", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_OR_INVERTED:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "~%s | %s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "~%s | %s", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_OR_REVERSE:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s | ~%s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "%s | ~%s", src[i], src_fb[i]);
break;
case PIPE_LOGICOP_OR:
- snprintf(full_op[i], ARRAY_SIZE(full_op[i]),
- "%s | %s", src[i], src_fb[i]);
+ strbuf_fmt(&full_op_buf[i], "%s | %s", src[i], src_fb[i]);
break;
}
}
@@ -2244,10 +2566,10 @@ static void emit_fragment_logicop(const struct dump_ctx *ctx,
case PIPE_LOGICOP_COPY:
case PIPE_LOGICOP_CLEAR:
case PIPE_LOGICOP_SET:
- emit_buff(glsl_strbufs, "fsout_c%d = %s;\n", i, full_op[i]);
+ emit_buff(glsl_strbufs, "fsout_c%d = %s;\n", i, full_op_buf[i].buf);
break;
default:
- emit_buff(glsl_strbufs, "fsout_c%d = vec4((%s) & %d) / %f;\n", i, full_op[i], mask[i], scale[i]);
+ emit_buff(glsl_strbufs, "fsout_c%d = vec4((%s) & %d) / %f;\n", i, full_op_buf[i].buf, mask[i], scale[i]);
}
}
}
@@ -2255,9 +2577,13 @@ static void emit_fragment_logicop(const struct dump_ctx *ctx,
static void emit_cbuf_swizzle(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs)
{
+ int cbuf_id = 0;
for (uint i = 0; i < ctx->num_outputs; i++) {
- if (ctx->key->fs.swizzle_output_rgb_to_bgr & (1 << i)) {
- emit_buff(glsl_strbufs, "fsout_c%d = fsout_c%d.zyxw;\n", i, i);
+ if (ctx->outputs[i].name == TGSI_SEMANTIC_COLOR) {
+ if (ctx->key->fs.swizzle_output_rgb_to_bgr & (1 << cbuf_id)) {
+ emit_buff(glsl_strbufs, "fsout_c%d = fsout_c%d.zyxw;\n", cbuf_id, cbuf_id);
+ }
+ ++cbuf_id;
}
}
}
@@ -2266,7 +2592,7 @@ static void emit_cbuf_colorspace_convert(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs)
{
for (uint i = 0; i < ctx->num_outputs; i++) {
- if (ctx->key->fs.convert_linear_to_srgb_on_write & (1 << i)) {
+ if (ctx->key->fs.needs_manual_srgb_encode_bitmask & (1 << i)) {
emit_buff(glsl_strbufs,
"{\n"
" vec3 temp = fsout_c%d.xyz;\n"
@@ -2283,7 +2609,7 @@ static void emit_cbuf_colorspace_convert(const struct dump_ctx *ctx,
static void handle_fragment_proc_exit(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs)
{
- if (ctx->key->pstipple_tex)
+ if (ctx->key->pstipple_enabled)
emit_pstipple_pass(glsl_strbufs);
if (ctx->key->fs.cbufs_are_a8_bitmask)
@@ -2299,7 +2625,7 @@ static void handle_fragment_proc_exit(const struct dump_ctx *ctx,
if (ctx->key->fs.swizzle_output_rgb_to_bgr)
emit_cbuf_swizzle(ctx, glsl_strbufs);
- if (ctx->key->fs.convert_linear_to_srgb_on_write)
+ if (ctx->key->fs.needs_manual_srgb_encode_bitmask)
emit_cbuf_colorspace_convert(ctx, glsl_strbufs);
if (ctx->write_all_cbufs)
@@ -2378,9 +2704,24 @@ static void emit_txq(struct dump_ctx *ctx,
get_wm_string(twm), get_string(dtypeprefix),
srcs[sampler_index]);
} else {
+ const struct tgsi_full_src_register *src = &inst->Src[1];
+
+ int gles_sampler_index = 0;
+ for (int i = 0; i < src->Register.Index; ++i) {
+ if (ctx->samplers_used & (1 << i))
+ ++gles_sampler_index;
+ }
+
+ char sampler_str[64];
+
+ if (ctx->info.indirect_files & (1 << TGSI_FILE_SAMPLER) && src->Register.Indirect) {
+ snprintf(sampler_str, sizeof(sampler_str), "addr%d+%d", src->Indirect.Index, gles_sampler_index);
+ } else {
+ snprintf(sampler_str, sizeof(sampler_str), "%d", gles_sampler_index);
+ }
emit_buff(&ctx->glsl_strbufs, "%s%s = %s(%s_texlod[%s]);\n", dst, get_wm_string(twm),
get_string(dtypeprefix), tgsi_proc_to_prefix(ctx->info.processor),
- srcs[sampler_index]);
+ sampler_str);
ctx->gles_use_tex_query_level = true;
}
}
@@ -2416,6 +2757,7 @@ static void emit_txq(struct dump_ctx *ctx,
}
if (inst->Dst[0].Register.WriteMask & 0x7) {
+ char wm_buffer[16];
bool txq_returns_vec = (inst->Texture.Texture != TGSI_TEXTURE_BUFFER) &&
(ctx->cfg->use_gles ||
(inst->Texture.Texture != TGSI_TEXTURE_1D &&
@@ -2424,7 +2766,8 @@ static void emit_txq(struct dump_ctx *ctx,
if (ctx->cfg->use_gles &&
(inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY)) {
- writemask = ".xz";
+ snprintf(wm_buffer, sizeof(wm_buffer), ".xz%s", writemask);
+ writemask = wm_buffer;
}
emit_buff(&ctx->glsl_strbufs, "%s%s = %s(textureSize(%s%s))%s;\n", dst,
@@ -2460,8 +2803,6 @@ static void emit_txqs(struct dump_ctx *ctx,
static const char *get_tex_inst_ext(const struct tgsi_full_instruction *inst)
{
switch (inst->Instruction.Opcode) {
- case TGSI_OPCODE_LODQ:
- return "QueryLOD";
case TGSI_OPCODE_TXP:
if (inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
@@ -2497,9 +2838,32 @@ static const char *get_tex_inst_ext(const struct tgsi_full_instruction *inst)
}
}
+static void
+get_temp(const struct dump_ctx *ctx,
+ bool indirect_dim, int dim, int reg,
+ char buf[static 64], bool *require_dummy_value)
+{
+ struct vrend_temp_range *range = find_temp_range(ctx, reg);
+ if (range) {
+ if (indirect_dim) {
+ snprintf(buf, 64, "temp%d[addr%d + %d]", range->first, dim, reg - range->first);
+ } else {
+ if (range->array_id > 0) {
+ snprintf(buf, 64, "temp%d[%d]", range->first, reg - range->first);
+ } else {
+ snprintf(buf, 64, "temp%d", reg);
+ }
+ }
+ } else {
+ snprintf(buf, 64, "dummy_value");
+ *require_dummy_value = true;
+ }
+}
+
static bool fill_offset_buffer(const struct dump_ctx *ctx,
const struct tgsi_full_instruction *inst,
- struct vrend_strbuf *offset_buf)
+ struct vrend_strbuf *offset_buf,
+ bool *require_dummy_value)
{
if (inst->TexOffsets[0].File == TGSI_FILE_IMMEDIATE) {
const struct immed *imd = &ctx->imm[inst->TexOffsets[0].Index];
@@ -2530,15 +2894,16 @@ static bool fill_offset_buffer(const struct dump_ctx *ctx,
return false;
}
} else if (inst->TexOffsets[0].File == TGSI_FILE_TEMPORARY) {
- struct vrend_temp_range *range = find_temp_range(ctx, inst->TexOffsets[0].Index);
- int idx = inst->TexOffsets[0].Index - range->first;
+ char temp_buf[64];
+ get_temp(ctx, false, 0, inst->TexOffsets[0].Index, temp_buf, require_dummy_value);
switch (inst->Texture.Texture) {
case TGSI_TEXTURE_1D:
case TGSI_TEXTURE_1D_ARRAY:
case TGSI_TEXTURE_SHADOW1D:
case TGSI_TEXTURE_SHADOW1D_ARRAY:
- strbuf_appendf(offset_buf, ", int(floatBitsToInt(temp%d[%d].%c))",
- range->first, idx,
+
+ strbuf_appendf(offset_buf, ", int(floatBitsToInt(%s.%c))",
+ temp_buf,
get_swiz_char(inst->TexOffsets[0].SwizzleX));
break;
case TGSI_TEXTURE_RECT:
@@ -2547,19 +2912,19 @@ static bool fill_offset_buffer(const struct dump_ctx *ctx,
case TGSI_TEXTURE_2D_ARRAY:
case TGSI_TEXTURE_SHADOW2D:
case TGSI_TEXTURE_SHADOW2D_ARRAY:
- strbuf_appendf(offset_buf, ", ivec2(floatBitsToInt(temp%d[%d].%c), floatBitsToInt(temp%d[%d].%c))",
- range->first, idx,
+ strbuf_appendf(offset_buf, ", ivec2(floatBitsToInt(%s.%c), floatBitsToInt(%s.%c))",
+ temp_buf,
get_swiz_char(inst->TexOffsets[0].SwizzleX),
- range->first, idx,
+ temp_buf,
get_swiz_char(inst->TexOffsets[0].SwizzleY));
break;
case TGSI_TEXTURE_3D:
- strbuf_appendf(offset_buf, ", ivec3(floatBitsToInt(temp%d[%d].%c), floatBitsToInt(temp%d[%d].%c), floatBitsToInt(temp%d[%d].%c)",
- range->first, idx,
+ strbuf_appendf(offset_buf, ", ivec3(floatBitsToInt(%s.%c), floatBitsToInt(%s.%c), floatBitsToInt(%s.%c)",
+ temp_buf,
get_swiz_char(inst->TexOffsets[0].SwizzleX),
- range->first, idx,
+ temp_buf,
get_swiz_char(inst->TexOffsets[0].SwizzleY),
- range->first, idx,
+ temp_buf,
get_swiz_char(inst->TexOffsets[0].SwizzleZ));
break;
default:
@@ -2611,6 +2976,57 @@ static bool fill_offset_buffer(const struct dump_ctx *ctx,
return true;
}
+static void
+emit_lodq(struct dump_ctx *ctx,
+ const struct tgsi_full_instruction *inst,
+ const struct source_info *sinfo,
+ const struct dest_info *dinfo,
+ const char *srcs[4],
+ const char *dst,
+ const char *writemask)
+{
+ ctx->shader_req_bits |= SHADER_REQ_LODQ;
+
+ set_texture_reqs(ctx, inst, sinfo->sreg_index);
+
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(textureQueryLOD(%s, ",
+ dst, get_string(dinfo->dstconv), srcs[1]);
+
+ switch (inst->Texture.Texture) {
+ case TGSI_TEXTURE_1D:
+ case TGSI_TEXTURE_1D_ARRAY:
+ case TGSI_TEXTURE_SHADOW1D:
+ case TGSI_TEXTURE_SHADOW1D_ARRAY:
+ if (ctx->cfg->use_gles)
+ emit_buff(&ctx->glsl_strbufs, "vec2(%s.x, 0)", srcs[0]);
+ else
+ emit_buff(&ctx->glsl_strbufs, "%s.x", srcs[0]);
+ break;
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_2D_ARRAY:
+ case TGSI_TEXTURE_2D_MSAA:
+ case TGSI_TEXTURE_2D_ARRAY_MSAA:
+ case TGSI_TEXTURE_RECT:
+ case TGSI_TEXTURE_SHADOW2D:
+ case TGSI_TEXTURE_SHADOW2D_ARRAY:
+ case TGSI_TEXTURE_SHADOWRECT:
+ emit_buff(&ctx->glsl_strbufs, "%s.xy", srcs[0]);
+ break;
+ case TGSI_TEXTURE_3D:
+ case TGSI_TEXTURE_CUBE:
+ case TGSI_TEXTURE_SHADOWCUBE:
+ case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
+ case TGSI_TEXTURE_CUBE_ARRAY:
+ emit_buff(&ctx->glsl_strbufs, "%s.xyz", srcs[0]);
+ break;
+ default:
+ emit_buff(&ctx->glsl_strbufs, "%s", srcs[0]);
+ break;
+ }
+
+ emit_buff(&ctx->glsl_strbufs, ")%s);\n", writemask);
+}
+
// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void translate_tex(struct dump_ctx *ctx,
const struct tgsi_full_instruction *inst,
@@ -2621,11 +3037,11 @@ static void translate_tex(struct dump_ctx *ctx,
const char *writemask)
{
enum vrend_type_qualifier txfi = TYPE_CONVERSION_NONE;
- unsigned twm = TGSI_WRITEMASK_NONE, gwm = TGSI_WRITEMASK_NONE;
+ const char *src_swizzle;
enum vrend_type_qualifier dtypeprefix = TYPE_CONVERSION_NONE;
bool is_shad;
- int sampler_index;
+ int sampler_index = 1;
const char *tex_ext;
struct vrend_strbuf bias_buf;
@@ -2652,30 +3068,25 @@ static void translate_tex(struct dump_ctx *ctx,
break;
}
- sampler_index = 1;
-
- if (inst->Instruction.Opcode == TGSI_OPCODE_LODQ)
- ctx->shader_req_bits |= SHADER_REQ_LODQ;
-
switch (inst->Texture.Texture) {
case TGSI_TEXTURE_1D:
case TGSI_TEXTURE_BUFFER:
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
- twm = TGSI_WRITEMASK_NONE;
+ src_swizzle = "";
else
- twm = TGSI_WRITEMASK_X;
+ src_swizzle = ".x";
txfi = INT;
break;
case TGSI_TEXTURE_1D_ARRAY:
- twm = TGSI_WRITEMASK_XY;
+ src_swizzle = ".xy";
txfi = IVEC2;
break;
case TGSI_TEXTURE_2D:
case TGSI_TEXTURE_RECT:
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
- twm = TGSI_WRITEMASK_NONE;
+ src_swizzle = "";
else
- twm = TGSI_WRITEMASK_XY;
+ src_swizzle = ".xy";
txfi = IVEC2;
break;
case TGSI_TEXTURE_SHADOW1D:
@@ -2684,24 +3095,24 @@ static void translate_tex(struct dump_ctx *ctx,
case TGSI_TEXTURE_SHADOWRECT:
case TGSI_TEXTURE_3D:
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
- twm = TGSI_WRITEMASK_NONE;
+ src_swizzle = "";
else if (inst->Instruction.Opcode == TGSI_OPCODE_TG4)
- twm = TGSI_WRITEMASK_XY;
+ src_swizzle = ".xy";
else
- twm = TGSI_WRITEMASK_XYZ;
+ src_swizzle = ".xyz";
txfi = IVEC3;
break;
case TGSI_TEXTURE_CUBE:
case TGSI_TEXTURE_2D_ARRAY:
- twm = TGSI_WRITEMASK_XYZ;
+ src_swizzle = ".xyz";
txfi = IVEC3;
break;
case TGSI_TEXTURE_2D_MSAA:
- twm = TGSI_WRITEMASK_XY;
+ src_swizzle = ".xy";
txfi = IVEC2;
break;
case TGSI_TEXTURE_2D_ARRAY_MSAA:
- twm = TGSI_WRITEMASK_XYZ;
+ src_swizzle = ".xyz";
txfi = IVEC3;
break;
@@ -2713,20 +3124,57 @@ static void translate_tex(struct dump_ctx *ctx,
if (inst->Instruction.Opcode == TGSI_OPCODE_TG4 &&
inst->Texture.Texture != TGSI_TEXTURE_CUBE_ARRAY &&
inst->Texture.Texture != TGSI_TEXTURE_SHADOWCUBE_ARRAY)
- twm = TGSI_WRITEMASK_XYZ;
+ src_swizzle = ".xyz";
else
- twm = TGSI_WRITEMASK_NONE;
+ src_swizzle = "";
txfi = TYPE_CONVERSION_NONE;
break;
}
- if (inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_TEX2:
+ sampler_index = 2;
+ if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
+ strbuf_appendf(&bias_buf, ", %s.x", srcs[1]);
+ break;
+ case TGSI_OPCODE_TXB2:
+ case TGSI_OPCODE_TXL2:
+ sampler_index = 2;
+ strbuf_appendf(&bias_buf, ", %s.x", srcs[1]);
+ if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
+ strbuf_appendf(&bias_buf, ", %s.y", srcs[1]);
+ break;
+ case TGSI_OPCODE_TXB:
+ case TGSI_OPCODE_TXL:
+ /* On GLES we emulate the 1D array by using a 2D array, for this
+ * there is no shadow lookup with bias unless EXT_texture_shadow_lod is used.
+ * To avoid that compiling an invalid shader results in a crash we ignore
+ * the bias value */
+ if (!(ctx->cfg->use_gles && !ctx->cfg->has_texture_shadow_lod &&
+ TGSI_TEXTURE_SHADOW1D_ARRAY == inst->Texture.Texture))
+ strbuf_appendf(&bias_buf, ", %s.w", srcs[0]);
+ break;
+ case TGSI_OPCODE_TXF:
+ if (inst->Texture.Texture == TGSI_TEXTURE_1D ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA ||
+ inst->Texture.Texture == TGSI_TEXTURE_3D ||
+ inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY)
+ strbuf_appendf(&bias_buf, ", int(%s.w)", srcs[0]);
+ break;
+ case TGSI_OPCODE_TXD:
+ sampler_index = 3;
switch (inst->Texture.Texture) {
case TGSI_TEXTURE_1D:
case TGSI_TEXTURE_SHADOW1D:
case TGSI_TEXTURE_1D_ARRAY:
case TGSI_TEXTURE_SHADOW1D_ARRAY:
- gwm = TGSI_WRITEMASK_X;
+ if (ctx->cfg->use_gles)
+ strbuf_appendf(&bias_buf, ", vec2(%s.x, 0), vec2(%s.x, 0)", srcs[1], srcs[2]);
+ else
+ strbuf_appendf(&bias_buf, ", %s.x, %s.x", srcs[1], srcs[2]);
break;
case TGSI_TEXTURE_2D:
case TGSI_TEXTURE_SHADOW2D:
@@ -2734,53 +3182,18 @@ static void translate_tex(struct dump_ctx *ctx,
case TGSI_TEXTURE_SHADOW2D_ARRAY:
case TGSI_TEXTURE_RECT:
case TGSI_TEXTURE_SHADOWRECT:
- gwm = TGSI_WRITEMASK_XY;
+ strbuf_appendf(&bias_buf, ", %s.xy, %s.xy", srcs[1], srcs[2]);
break;
case TGSI_TEXTURE_3D:
case TGSI_TEXTURE_CUBE:
case TGSI_TEXTURE_SHADOWCUBE:
case TGSI_TEXTURE_CUBE_ARRAY:
- gwm = TGSI_WRITEMASK_XYZ;
+ strbuf_appendf(&bias_buf, ", %s.xyz, %s.xyz", srcs[1], srcs[2]);
break;
default:
- gwm = TGSI_WRITEMASK_NONE;
+ strbuf_appendf(&bias_buf, ", %s, %s", srcs[1], srcs[2]);
break;
}
- }
-
- switch (inst->Instruction.Opcode) {
- case TGSI_OPCODE_TXB2:
- case TGSI_OPCODE_TXL2:
- case TGSI_OPCODE_TEX2:
- sampler_index = 2;
- if (inst->Instruction.Opcode != TGSI_OPCODE_TEX2)
- strbuf_appendf(&bias_buf, ", %s.x", srcs[1]);
- else if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
- strbuf_appendf(&bias_buf, ", float(%s)", srcs[1]);
- break;
- case TGSI_OPCODE_TXB:
- case TGSI_OPCODE_TXL:
- strbuf_appendf(&bias_buf, ", %s.w", srcs[0]);
- break;
- case TGSI_OPCODE_TXF:
- if (inst->Texture.Texture == TGSI_TEXTURE_1D ||
- inst->Texture.Texture == TGSI_TEXTURE_2D ||
- inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
- inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA ||
- inst->Texture.Texture == TGSI_TEXTURE_3D ||
- inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
- inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY)
- strbuf_appendf(&bias_buf, ", int(%s.w)", srcs[0]);
- break;
- case TGSI_OPCODE_TXD:
- if (ctx->cfg->use_gles && (inst->Texture.Texture == TGSI_TEXTURE_1D ||
- inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
- inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
- inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY))
- strbuf_appendf(&bias_buf, ", vec2(%s%s, 0), vec2(%s%s, 0)", srcs[1], get_wm_string(gwm), srcs[2], get_wm_string(gwm));
- else
- strbuf_appendf(&bias_buf, ", %s%s, %s%s", srcs[1], get_wm_string(gwm), srcs[2], get_wm_string(gwm));
- sampler_index = 3;
break;
case TGSI_OPCODE_TG4:
sampler_index = 2;
@@ -2826,6 +3239,19 @@ static void translate_tex(struct dump_ctx *ctx,
const char *bias = bias_buf.buf;
const char *offset = offset_buf.buf;
+ // EXT_texture_shadow_lod defines a few more functions handling bias
+ if (bias &&
+ (inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY))
+ ctx->shader_req_bits |= SHADER_REQ_TEXTURE_SHADOW_LOD;
+
+ // EXT_texture_shadow_lod also adds the missing textureOffset for 2DArrayShadow in GLES
+ if ((bias || offset) && ctx->cfg->use_gles &&
+ (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY))
+ ctx->shader_req_bits |= SHADER_REQ_TEXTURE_SHADOW_LOD;
+
if (inst->Texture.NumOffsets == 1) {
if (inst->TexOffsets[0].Index >= (int)ARRAY_SIZE(ctx->imm)) {
vrend_printf( "Immediate exceeded, max is %lu\n", ARRAY_SIZE(ctx->imm));
@@ -2833,7 +3259,7 @@ static void translate_tex(struct dump_ctx *ctx,
goto cleanup;
}
- if (!fill_offset_buffer(ctx, inst, &offset_buf)) {
+ if (!fill_offset_buffer(ctx, inst, &offset_buf, &ctx->require_dummy_value)) {
set_buf_error(&ctx->glsl_strbufs);
goto cleanup;
}
@@ -2844,30 +3270,46 @@ static void translate_tex(struct dump_ctx *ctx,
}
}
- /* On GLES we have to normalized the coordinate for all but the texel fetch instruction */
- if (ctx->cfg->use_gles &&
- inst->Instruction.Opcode != TGSI_OPCODE_TXF &&
- (inst->Texture.Texture == TGSI_TEXTURE_RECT ||
- inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT)) {
+ char buf[255];
+ const char *new_srcs[4] = { buf, srcs[1], srcs[2], srcs[3] };
+
+ /* We have to unnormalize the coordinate for all but the texel fetch instruction */
+ if (inst->Instruction.Opcode != TGSI_OPCODE_TXF &&
+ vrend_shader_sampler_views_mask_get(ctx->key->sampler_views_emulated_rect_mask, sinfo->sreg_index)) {
- char buf[255];
- const char *new_srcs[4] = { buf, srcs[1], srcs[2], srcs[3] };
+ const char *bias = "";
+
+ /* No LOD for these texture types, but on GLES we emulate RECT by using
+ * a normal 2D texture, so we have to give LOD 0 */
+ switch (inst->Texture.Texture) {
+ case TGSI_TEXTURE_BUFFER:
+ case TGSI_TEXTURE_2D_MSAA:
+ case TGSI_TEXTURE_2D_ARRAY_MSAA:
+ break;
+ case TGSI_TEXTURE_RECT:
+ case TGSI_TEXTURE_SHADOWRECT:
+ if (!ctx->cfg->use_gles)
+ break;
+ /* fallthrough */
+ default:
+ bias = ", 0";
+ }
switch (inst->Instruction.Opcode) {
case TGSI_OPCODE_TXP:
- snprintf(buf, 255, "vec4(%s)/vec4(textureSize(%s, 0), 1, 1)", srcs[0], srcs[sampler_index]);
+ snprintf(buf, 255, "vec4(%s)/vec4(textureSize(%s%s), 1, 1)", srcs[0], srcs[sampler_index], bias);
break;
case TGSI_OPCODE_TG4:
- snprintf(buf, 255, "%s.xy/vec2(textureSize(%s, 0))", srcs[0], srcs[sampler_index]);
+ snprintf(buf, 255, "%s.xy/vec2(textureSize(%s%s))", srcs[0], srcs[sampler_index], bias);
break;
default:
/* Non TG4 ops have the compare value in the z components */
if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
- snprintf(buf, 255, "vec3(%s.xy/vec2(textureSize(%s, 0)), %s.z)", srcs[0], srcs[sampler_index], srcs[0]);
+ snprintf(buf, 255, "vec3(%s.xy/vec2(textureSize(%s%s)), %s.z)", srcs[0], srcs[sampler_index], bias, srcs[0]);
} else
- snprintf(buf, 255, "%s.xy/vec2(textureSize(%s, 0))", srcs[0], srcs[sampler_index]);
+ snprintf(buf, 255, "%s.xy/vec2(textureSize(%s%s))", srcs[0], srcs[sampler_index], bias);
}
srcs = new_srcs;
}
@@ -2880,31 +3322,76 @@ static void translate_tex(struct dump_ctx *ctx,
if (inst->Texture.Texture == TGSI_TEXTURE_1D)
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, ivec2(%s(%s%s), 0)%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
- tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
- get_wm_string(twm), bias, offset,
+ tex_ext, srcs[sampler_index], get_string(txfi),
+ srcs[0], src_swizzle, bias, offset,
dinfo->dst_override_no_wm[0] ? "" : writemask);
else if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY) {
/* the y coordinate must go into the z element and the y must be zero */
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, ivec3(%s(%s%s), 0).xzy%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
- tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
- get_wm_string(twm), bias, offset,
+ tex_ext, srcs[sampler_index], get_string(txfi),
+ srcs[0], src_swizzle, bias, offset,
dinfo->dst_override_no_wm[0] ? "" : writemask);
} else {
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, %s(%s%s), 0%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
- tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
- get_wm_string(twm), offset,
+ tex_ext, srcs[sampler_index], get_string(txfi),
+ srcs[0], src_swizzle, offset,
dinfo->dst_override_no_wm[0] ? "" : writemask);
}
} else {
- emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, %s(%s%s)%s%s)%s));\n",
- dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
- tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
- get_wm_string(twm), bias, offset,
- dinfo->dst_override_no_wm[0] ? "" : writemask);
+
+ /* To inject the swizzle for texturebufffers with emulated formats do
+ *
+ * {
+ * vec4 val = texelFetch( )
+ * val = vec4(0/1/swizzle_x, ...);
+ * dest.writemask = val.writemask;
+ * }
+ *
+ */
+ emit_buff(&ctx->glsl_strbufs, "{\n vec4 val = %s(texelFetch%s(%s, %s(%s%s)%s%s));\n",
+ get_string(dtypeprefix),
+ tex_ext, srcs[sampler_index], get_string(txfi),
+ srcs[0], src_swizzle, bias, offset);
+
+ if (vrend_shader_sampler_views_mask_get(ctx->key->sampler_views_lower_swizzle_mask, sinfo->sreg_index)) {
+ int16_t packed_swizzles = ctx->key->tex_swizzle[sinfo->sreg_index];
+ emit_buff(&ctx->glsl_strbufs, " val = vec4(");
+
+ for (int i = 0; i < 4; ++i) {
+ if (i > 0)
+ emit_buff(&ctx->glsl_strbufs, ", ");
+
+ int swz = (packed_swizzles >> (i * 3)) & 7;
+ switch (swz) {
+ case PIPE_SWIZZLE_ZERO : emit_buf(&ctx->glsl_strbufs, "0.0"); break;
+ case PIPE_SWIZZLE_ONE :
+ switch (dtypeprefix) {
+ case UINT_BITS_TO_FLOAT:
+ emit_buf(&ctx->glsl_strbufs, "uintBitsToFloat(1u)");
+ break;
+ case INT_BITS_TO_FLOAT:
+ emit_buf(&ctx->glsl_strbufs, "intBitsToFloat(1)");
+ break;
+ default:
+ emit_buf(&ctx->glsl_strbufs, "1.0");
+ break;
+ }
+ break;
+ default:
+ emit_buff(&ctx->glsl_strbufs, "val%s", get_swizzle_string(swz));
+ }
+ }
+
+ emit_buff(&ctx->glsl_strbufs, ");\n");
+ }
+
+ emit_buff(&ctx->glsl_strbufs, " %s = val%s;\n}\n",
+ dst, dinfo->dst_override_no_wm[0] ? "" : writemask);
}
- } else if (ctx->cfg->glsl_version < 140 && (ctx->shader_req_bits & SHADER_REQ_SAMPLER_RECT)) {
+ } else if ((ctx->cfg->glsl_version < 140 && (ctx->shader_req_bits & SHADER_REQ_SAMPLER_RECT)) &&
+ !vrend_shader_sampler_views_mask_get(ctx->key->sampler_views_emulated_rect_mask, sinfo->sreg_index)) {
/* rect is special in GLSL 1.30 */
if (inst->Texture.Texture == TGSI_TEXTURE_RECT)
emit_buff(&ctx->glsl_strbufs, "%s = texture2DRect(%s, %s.xy)%s;\n",
@@ -2924,21 +3411,21 @@ static void translate_tex(struct dump_ctx *ctx,
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, vec4(%s%s.xzw, 0).xwyz %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
- srcs[0], get_wm_string(twm), offset, bias, cname,
- src->Register.Index, cname,
- src->Register.Index, writemask);
+ srcs[0], src_swizzle, offset, bias,
+ cname, src->Register.Index,
+ cname, src->Register.Index, writemask);
else
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, vec3(%s%s.xz, 0).xzy %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
- srcs[0], get_wm_string(twm), offset, bias, cname,
- src->Register.Index, cname,
- src->Register.Index, writemask);
+ srcs[0], src_swizzle, offset, bias,
+ cname, src->Register.Index,
+ cname, src->Register.Index, writemask);
} else if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, vec4(%s%s, 0).xwyz %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], srcs[0],
- get_wm_string(twm), offset, bias, cname,
+ src_swizzle, offset, bias, cname,
src->Register.Index, cname,
src->Register.Index, writemask);
}
@@ -2946,9 +3433,9 @@ static void translate_tex(struct dump_ctx *ctx,
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, %s%s%s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], srcs[0],
- get_wm_string(twm), offset, bias, cname,
- src->Register.Index, cname,
- src->Register.Index, writemask);
+ src_swizzle, offset, bias,
+ cname, src->Register.Index,
+ cname, src->Register.Index, writemask);
} else {
/* OpenGL ES do not support 1D texture
* so we use a 2D texture with a parameter set to 0.5
@@ -2967,7 +3454,7 @@ static void translate_tex(struct dump_ctx *ctx,
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, vec2(%s%s, 0.5) %s%s)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
- srcs[0], get_wm_string(twm), offset, bias,
+ srcs[0], src_swizzle, offset, bias,
dinfo->dst_override_no_wm[0] ? "" : writemask);
} else if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY) {
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
@@ -2980,13 +3467,13 @@ static void translate_tex(struct dump_ctx *ctx,
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, vec3(%s%s, 0).xzy %s%s)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
- srcs[0], get_wm_string(twm), offset, bias,
+ srcs[0], src_swizzle, offset, bias,
dinfo->dst_override_no_wm[0] ? "" : writemask);
}
} else {
emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, %s%s%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
- tex_ext, srcs[sampler_index], srcs[0], get_wm_string(twm),
+ tex_ext, srcs[sampler_index], srcs[0], src_swizzle,
offset, bias, dinfo->dst_override_no_wm[0] ? "" : writemask);
}
}
@@ -3011,8 +3498,12 @@ create_swizzled_clipdist(const struct dump_ctx *ctx,
char clip_indirect[32] = "";
bool has_prop = (ctx->num_cull_dist_prop + ctx->num_clip_dist_prop) > 0;
- int num_culls = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_cull;
- int num_clips = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_clip;
+ int num_culls = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_out_cull;
+ int num_clips = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_out_clip;
+
+ int num_clip_cull = num_culls + num_clips;
+ if (ctx->num_in_clip_dist && !num_clip_cull)
+ num_clips = ctx->num_in_clip_dist;
int base_idx = ctx->inputs[input_idx].sid * 4;
@@ -3058,7 +3549,6 @@ void load_clipdist_fs(const struct dump_ctx *ctx,
struct vrend_strbuf *result,
const struct tgsi_full_src_register *src,
int input_idx,
- bool gl_in,
const char *stypeprefix,
int offset)
{
@@ -3081,10 +3571,7 @@ void load_clipdist_fs(const struct dump_ctx *ctx,
else
snprintf(clip_indirect, 32, "%d + %d", src->Register.Index - offset, base_idx);
- if (gl_in)
- strbuf_fmt(result, "%s(clip_dist_temp[%s].%s)", stypeprefix, clip_indirect, swz);
- else
- strbuf_fmt(result, "%s(clip_dist_temp[%s].%s)", stypeprefix, clip_indirect, swz);
+ strbuf_fmt(result, "%s(clip_dist_temp[%s].%s)", stypeprefix, clip_indirect, swz);
}
@@ -3130,6 +3617,20 @@ static bool is_integer_memory(const struct dump_ctx *ctx, enum tgsi_file_type fi
return false;
}
+static void set_image_qualifier(struct vrend_shader_image images[],
+ uint32_t image_used_mask,
+ const struct tgsi_full_instruction *inst,
+ uint32_t reg_index, bool indirect)
+{
+ if (inst->Memory.Qualifier == TGSI_MEMORY_COHERENT) {
+ if (indirect) {
+ while (image_used_mask)
+ images[u_bit_scan(&image_used_mask)].coherent = true;
+ } else
+ images[reg_index].coherent = true;
+ }
+}
+
static void set_memory_qualifier(uint8_t ssbo_memory_qualifier[],
uint32_t ssbo_used_mask,
const struct tgsi_full_instruction *inst,
@@ -3160,14 +3661,24 @@ static void
translate_store(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
uint8_t ssbo_memory_qualifier[],
+ struct vrend_shader_image images[],
const struct tgsi_full_instruction *inst,
struct source_info *sinfo,
const char *srcs[4],
+ const struct dest_info *dinfo,
const char *dst)
{
const struct tgsi_full_dst_register *dst_reg = &inst->Dst[0];
+ assert(dinfo->dest_index >= 0);
if (dst_reg->Register.File == TGSI_FILE_IMAGE) {
+
+ /* bail out if we want to write to a non-existing image */
+ if (!((1 << dinfo->dest_index) & ctx->images_used_mask))
+ return;
+
+ set_image_qualifier(images, ctx->images_used_mask, inst, inst->Src[0].Register.Index, inst->Src[0].Register.Indirect);
+
bool is_ms = false;
enum vrend_type_qualifier coord_prefix = get_coord_prefix(ctx->images[dst_reg->Register.Index].decl.Resource, &is_ms, ctx->cfg->use_gles);
enum tgsi_return_type itype;
@@ -3259,8 +3770,7 @@ static void emit_load_mem(struct vrend_glsl_strbufs *glsl_strbufs, const char *d
}
}
-
-static void
+static bool
translate_load(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
uint8_t ssbo_memory_qualifier[],
@@ -3274,6 +3784,15 @@ translate_load(const struct dump_ctx *ctx,
{
const struct tgsi_full_src_register *src = &inst->Src[0];
if (src->Register.File == TGSI_FILE_IMAGE) {
+
+ /* Bail out if we want to load from an image that is not actually used */
+ assert(sinfo->sreg_index >= 0);
+ if (!((1 << sinfo->sreg_index) & ctx->images_used_mask))
+ return false;
+
+ set_image_qualifier(images, ctx->images_used_mask, inst, inst->Src[0].Register.Index, inst->Src[0].Register.Indirect);
+
+
bool is_ms = false;
enum vrend_type_qualifier coord_prefix = get_coord_prefix(ctx->images[sinfo->sreg_index].decl.Resource, &is_ms, ctx->cfg->use_gles);
enum vrend_type_qualifier dtypeprefix = TYPE_CONVERSION_NONE;
@@ -3339,11 +3858,13 @@ translate_load(const struct dump_ctx *ctx,
set_memory_qualifier(ssbo_memory_qualifier, ctx->ssbo_used_mask, inst, inst->Src[0].Register.Index, inst->Src[0].Register.Indirect);
- strcpy(mydst, dst);
- char *wmp = strchr(mydst, '.');
+ const char *d = dst;
+ char *md = mydst;
+ unsigned i = 0;
+ while ((i < sizeof(mydst) - 1) && *d && *d != '.')
+ *md++ = *d++;
+ *md = 0;
- if (wmp)
- wmp[0] = 0;
emit_buff(glsl_strbufs, "ssbo_addr_temp = uint(floatBitsToUint(%s)) >> 2;\n", srcs[1]);
atomic_op[0] = atomic_src[0] = '\0';
@@ -3379,6 +3900,7 @@ translate_load(const struct dump_ctx *ctx,
} else if (src->Register.File == TGSI_FILE_HW_ATOMIC) {
emit_buff(glsl_strbufs, "%s = uintBitsToFloat(atomicCounter(%s));\n", dst, srcs[0]);
}
+ return true;
}
static const char *get_atomic_opname(int tgsi_opcode, bool *is_cas)
@@ -3584,11 +4106,11 @@ static const char *reswizzle_dest(const struct vrend_shader_io *io, const struct
{
if (io->usage_mask != 0xf) {
if (io->num_components > 1) {
- int real_wm = dst_reg->Register.WriteMask >> io->swizzle_offset;
+ const int wm = dst_reg->Register.WriteMask;
int k = 1;
reswizzled[0] = '.';
for (int i = 0; i < io->num_components; ++i) {
- if (real_wm & (1 << i))
+ if (wm & (1 << i))
reswizzled[k++] = get_swiz_char(i);
}
reswizzled[k] = 0;
@@ -3601,37 +4123,39 @@ static const char *reswizzle_dest(const struct vrend_shader_io *io, const struct
static void get_destination_info_generic(const struct dump_ctx *ctx,
const struct tgsi_full_dst_register *dst_reg,
const struct vrend_shader_io *io,
- const char *writemask, char dsts[255])
+ const char *writemask,
+ struct vrend_strbuf *result)
{
const char *blkarray = (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL) ? "[gl_InvocationID]" : "";
const char *stage_prefix = get_stage_output_name_prefix(ctx->prog_type);
const char *wm = io->override_no_wm ? "" : writemask;
char reswizzled[6] = "";
+ char outvarname[64];
wm = reswizzle_dest(io, dst_reg, reswizzled, writemask);
- if (io->first == io->last)
- snprintf(dsts, 255, "%s%s%s", io->glsl_name, blkarray, wm);
- else {
- if (prefer_generic_io_block(ctx, io_out)) {
- char outvarname[64];
- get_blockvarname(outvarname, stage_prefix, io, blkarray);
+ strbuf_reset(result);
- if (dst_reg->Register.Indirect)
- snprintf(dsts, 255, "%s.%s[addr%d + %d]%s", outvarname, io->glsl_name,
- dst_reg->Indirect.Index, dst_reg->Register.Index - io->first, wm);
- else
- snprintf(dsts, 255, "%s.%s[%d]%s", outvarname, io->glsl_name,
- dst_reg->Register.Index - io->first, wm);
- } else {
- if (dst_reg->Register.Indirect)
- snprintf(dsts, 255, "%s%s[addr%d + %d]%s", io->glsl_name, blkarray,
- dst_reg->Indirect.Index, dst_reg->Register.Index - io->first, wm);
- else
- snprintf(dsts, 255, "%s%s[%d]%s", io->glsl_name, blkarray,
- dst_reg->Register.Index - io->first, wm);
+ enum io_decl_type decl_type = decl_plain;
+ if (io->first != io->last && prefer_generic_io_block(ctx, io_out)) {
+ get_blockvarname(outvarname, stage_prefix, io, blkarray);
+ blkarray = outvarname;
+ decl_type = decl_block;
+ }
+ vrend_shader_write_io_as_dst(result, blkarray, io, dst_reg, decl_type);
+ strbuf_appendf(result, "%s", wm);
+}
+
+static
+int find_io_index(int num_io, struct vrend_shader_io *io, int index)
+{
+ for (int j = 0; j < num_io; j++) {
+ if (io[j].first <= index &&
+ io[j].last >= index) {
+ return j;
}
}
+ return -1;
}
// TODO Consider exposing non-const ctx-> members as args to make *ctx const
@@ -3639,7 +4163,7 @@ static bool
get_destination_info(struct dump_ctx *ctx,
const struct tgsi_full_instruction *inst,
struct dest_info *dinfo,
- char dsts[3][255],
+ struct vrend_strbuf dst_bufs[3],
char fp64_dsts[3][255],
char *writemask)
{
@@ -3714,116 +4238,118 @@ get_destination_info(struct dump_ctx *ctx,
}
if (dst_reg->Register.File == TGSI_FILE_OUTPUT) {
- uint32_t j;
- for (j = 0; j < ctx->num_outputs; j++) {
- if (ctx->outputs[j].first <= dst_reg->Register.Index &&
- ctx->outputs[j].last >= dst_reg->Register.Index &&
- (ctx->outputs[j].usage_mask & dst_reg->Register.WriteMask)) {
- if (inst->Instruction.Precise) {
- if (!ctx->outputs[j].invariant && ctx->outputs[j].name != TGSI_SEMANTIC_CLIPVERTEX) {
- ctx->outputs[j].precise = true;
- ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
- }
- }
+ int j = find_io_index(ctx->num_outputs, ctx->outputs,
+ dst_reg->Register.Index);
- if (ctx->glsl_ver_required >= 140 && ctx->outputs[j].name == TGSI_SEMANTIC_CLIPVERTEX) {
- snprintf(dsts[i], 255, "clipv_tmp");
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_CLIPDIST) {
- char clip_indirect[32] = "";
- if (ctx->outputs[j].first != ctx->outputs[j].last) {
- if (dst_reg->Register.Indirect)
- snprintf(clip_indirect, sizeof(clip_indirect), "+ addr%d", dst_reg->Indirect.Index);
- else
- snprintf(clip_indirect, sizeof(clip_indirect), "+ %d", dst_reg->Register.Index - ctx->outputs[j].first);
- }
- snprintf(dsts[i], 255, "clip_dist_temp[%d %s]%s", ctx->outputs[j].sid, clip_indirect, writemask);
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_TESSOUTER ||
- ctx->outputs[j].name == TGSI_SEMANTIC_TESSINNER ||
- ctx->outputs[j].name == TGSI_SEMANTIC_SAMPLEMASK) {
- int idx;
- switch (dst_reg->Register.WriteMask) {
- case 0x1: idx = 0; break;
- case 0x2: idx = 1; break;
- case 0x4: idx = 2; break;
- case 0x8: idx = 3; break;
- default:
- idx = 0;
- break;
- }
- snprintf(dsts[i], 255, "%s[%d]", ctx->outputs[j].glsl_name, idx);
- if (ctx->outputs[j].is_int) {
- dinfo->dtypeprefix = FLOAT_BITS_TO_INT;
- dinfo->dstconv = INT;
- }
+ if (j < 0)
+ return false;
+
+ struct vrend_shader_io *output = &ctx->outputs[j];
+
+ if (inst->Instruction.Precise) {
+ if (!output->invariant && output->name != TGSI_SEMANTIC_CLIPVERTEX &&
+ ctx->cfg->has_gpu_shader5) {
+ output->precise = true;
+ ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
+ }
+ }
+
+ if (ctx->glsl_ver_required >= 140 && output->name == TGSI_SEMANTIC_CLIPVERTEX) {
+ if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL) {
+ strbuf_fmt(&dst_bufs[i], "%s[gl_InvocationID]", output->glsl_name);
+ } else {
+ strbuf_fmt(&dst_bufs[i], "%s", ctx->is_last_vertex_stage ? "clipv_tmp" : output->glsl_name);
+ }
+ } else if (output->name == TGSI_SEMANTIC_CLIPDIST) {
+ char clip_indirect[32] = "";
+ if (output->first != output->last) {
+ if (dst_reg->Register.Indirect)
+ snprintf(clip_indirect, sizeof(clip_indirect), "+ addr%d", dst_reg->Indirect.Index);
+ else
+ snprintf(clip_indirect, sizeof(clip_indirect), "+ %d", dst_reg->Register.Index - output->first);
+ }
+ strbuf_fmt(&dst_bufs[i], "clip_dist_temp[%d %s]%s", output->sid, clip_indirect, writemask);
+ } else if (output->name == TGSI_SEMANTIC_TESSOUTER ||
+ output->name == TGSI_SEMANTIC_TESSINNER ||
+ output->name == TGSI_SEMANTIC_SAMPLEMASK) {
+ int idx;
+ switch (dst_reg->Register.WriteMask) {
+ case 0x1: idx = 0; break;
+ case 0x2: idx = 1; break;
+ case 0x4: idx = 2; break;
+ case 0x8: idx = 3; break;
+ default:
+ idx = 0;
+ break;
+ }
+ strbuf_fmt(&dst_bufs[i], "%s[%d]", output->glsl_name, idx);
+ if (output->is_int) {
+ dinfo->dtypeprefix = FLOAT_BITS_TO_INT;
+ dinfo->dstconv = INT;
+ }
+ } else {
+ if (output->glsl_gl_block) {
+ strbuf_fmt(&dst_bufs[i], "gl_out[%s].%s%s",
+ ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ? "gl_InvocationID" : "0",
+ output->glsl_name,
+ output->override_no_wm ? "" : writemask);
+ } else if (output->name == TGSI_SEMANTIC_GENERIC) {
+ struct vrend_shader_io *io = ctx->generic_ios.output_range.used ? &ctx->generic_ios.output_range.io : output;
+ get_destination_info_generic(ctx, dst_reg, io, writemask, &dst_bufs[i]);
+ dinfo->dst_override_no_wm[i] = output->override_no_wm;
+ } else if (output->name == TGSI_SEMANTIC_TEXCOORD) {
+ get_destination_info_generic(ctx, dst_reg, output, writemask, &dst_bufs[i]);
+ dinfo->dst_override_no_wm[i] = output->override_no_wm;
+ } else if (output->name == TGSI_SEMANTIC_PATCH) {
+ struct vrend_shader_io *io = ctx->patch_ios.output_range.used ? &ctx->patch_ios.output_range.io : output;
+ char reswizzled[6] = "";
+ const char *wm = reswizzle_dest(io, dst_reg, reswizzled, writemask);
+ strbuf_reset(&dst_bufs[i]);
+ vrend_shader_write_io_as_dst(&dst_bufs[i], "", io, dst_reg, decl_plain);
+ if (!output->override_no_wm)
+ strbuf_appendf(&dst_bufs[i], "%s", wm);
+ dinfo->dst_override_no_wm[i] = output->override_no_wm;
+ } else {
+ if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL) {
+ strbuf_fmt(&dst_bufs[i], "%s[gl_InvocationID]%s", output->glsl_name, output->override_no_wm ? "" : writemask);
} else {
- if (ctx->outputs[j].glsl_gl_block) {
- snprintf(dsts[i], 255, "gl_out[%s].%s%s",
- ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ? "gl_InvocationID" : "0",
- ctx->outputs[j].glsl_name,
- ctx->outputs[j].override_no_wm ? "" : writemask);
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_GENERIC) {
- struct vrend_shader_io *io = ctx->generic_ios.output_range.used ? &ctx->generic_ios.output_range.io : &ctx->outputs[j];
- get_destination_info_generic(ctx, dst_reg, io, writemask, dsts[i]);
- dinfo->dst_override_no_wm[i] = ctx->outputs[j].override_no_wm;
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_PATCH) {
- struct vrend_shader_io *io = ctx->patch_ios.output_range.used ? &ctx->patch_ios.output_range.io : &ctx->outputs[j];
- char reswizzled[6] = "";
- const char *wm = reswizzle_dest(io, dst_reg, reswizzled, writemask);
- if (io->last != io->first) {
- if (dst_reg->Register.Indirect)
- snprintf(dsts[i], 255, "%s[addr%d + %d]%s",
- io->glsl_name, dst_reg->Indirect.Index,
- dst_reg->Register.Index - io->first,
- io->override_no_wm ? "" : wm);
- else
- snprintf(dsts[i], 255, "%s[%d]%s",
- io->glsl_name,
- dst_reg->Register.Index - io->first,
- io->override_no_wm ? "" : wm);
- } else {
- snprintf(dsts[i], 255, "%s%s", io->glsl_name, ctx->outputs[j].override_no_wm ? "" : wm);
- }
- dinfo->dst_override_no_wm[i] = ctx->outputs[j].override_no_wm;
- } else {
- if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL) {
- snprintf(dsts[i], 255, "%s[gl_InvocationID]%s", ctx->outputs[j].glsl_name, ctx->outputs[j].override_no_wm ? "" : writemask);
- } else {
- snprintf(dsts[i], 255, "%s%s", ctx->outputs[j].glsl_name, ctx->outputs[j].override_no_wm ? "" : writemask);
- }
- dinfo->dst_override_no_wm[i] = ctx->outputs[j].override_no_wm;
- }
- if (ctx->outputs[j].is_int) {
- if (dinfo->dtypeprefix == TYPE_CONVERSION_NONE)
- dinfo->dtypeprefix = FLOAT_BITS_TO_INT;
- dinfo->dstconv = INT;
- }
- else if (ctx->outputs[j].type == VEC_UINT) {
- if (dinfo->dtypeprefix == TYPE_CONVERSION_NONE)
- dinfo->dtypeprefix = FLOAT_BITS_TO_UINT;
- dinfo->dstconv = dinfo->udstconv;
- }
- else if (ctx->outputs[j].type == VEC_INT) {
- if (dinfo->dtypeprefix == TYPE_CONVERSION_NONE)
- dinfo->dtypeprefix = FLOAT_BITS_TO_INT;
- dinfo->dstconv = dinfo->idstconv;
- }
- if (ctx->outputs[j].name == TGSI_SEMANTIC_PSIZE) {
- dinfo->dstconv = FLOAT;
- break;
- }
+ strbuf_fmt(&dst_bufs[i], "%s%s", output->glsl_name, output->override_no_wm ? "" : writemask);
}
+ dinfo->dst_override_no_wm[i] = output->override_no_wm;
+ }
+ if (output->is_int) {
+ if (dinfo->dtypeprefix == TYPE_CONVERSION_NONE)
+ dinfo->dtypeprefix = FLOAT_BITS_TO_INT;
+ dinfo->dstconv = INT;
+ }
+ else if (output->type == VEC_UINT) {
+ if (dinfo->dtypeprefix == TYPE_CONVERSION_NONE)
+ dinfo->dtypeprefix = FLOAT_BITS_TO_UINT;
+ dinfo->dstconv = dinfo->udstconv;
+ }
+ else if (output->type == VEC_INT) {
+ if (dinfo->dtypeprefix == TYPE_CONVERSION_NONE)
+ dinfo->dtypeprefix = FLOAT_BITS_TO_INT;
+ dinfo->dstconv = dinfo->idstconv;
+ }
+ if (output->name == TGSI_SEMANTIC_PSIZE) {
+ dinfo->dstconv = FLOAT;
break;
}
}
}
else if (dst_reg->Register.File == TGSI_FILE_TEMPORARY) {
- struct vrend_temp_range *range = find_temp_range(ctx, dst_reg->Register.Index);
- if (!range)
- return false;
- if (dst_reg->Register.Indirect) {
- snprintf(dsts[i], 255, "temp%d[addr0 + %d]%s", range->first, dst_reg->Register.Index - range->first, writemask);
- } else
- snprintf(dsts[i], 255, "temp%d[%d]%s", range->first, dst_reg->Register.Index - range->first, writemask);
+ char temp_buf[64];
+ get_temp(ctx, dst_reg->Register.Indirect, 0, dst_reg->Register.Index,
+ temp_buf, &ctx->require_dummy_value);
+ strbuf_fmt(&dst_bufs[i], "%s%s", temp_buf, writemask);
+ if (inst->Instruction.Precise) {
+ struct vrend_temp_range *range = find_temp_range(ctx, dst_reg->Register.Index);
+ if (range && ctx->cfg->has_gpu_shader5) {
+ range->precise_result = true;
+ ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
+ }
+ }
}
else if (dst_reg->Register.File == TGSI_FILE_IMAGE) {
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
@@ -3831,11 +4357,12 @@ get_destination_info(struct dump_ctx *ctx,
int basearrayidx = lookup_image_array(ctx, dst_reg->Register.Index);
if (dst_reg->Register.Indirect) {
assert(dst_reg->Indirect.File == TGSI_FILE_ADDRESS);
- snprintf(dsts[i], 255, "%simg%d[addr%d + %d]", cname, basearrayidx, dst_reg->Indirect.Index, dst_reg->Register.Index - basearrayidx);
+ strbuf_fmt(&dst_bufs[i], "%simg%d[addr%d + %d]", cname, basearrayidx, dst_reg->Indirect.Index, dst_reg->Register.Index - basearrayidx);
} else
- snprintf(dsts[i], 255, "%simg%d[%d]", cname, basearrayidx, dst_reg->Register.Index - basearrayidx);
+ strbuf_fmt(&dst_bufs[i], "%simg%d[%d]", cname, basearrayidx, dst_reg->Register.Index - basearrayidx);
} else
- snprintf(dsts[i], 255, "%simg%d", cname, dst_reg->Register.Index);
+ strbuf_fmt(&dst_bufs[i], "%simg%d", cname, dst_reg->Register.Index);
+ dinfo->dest_index = dst_reg->Register.Index;
} else if (dst_reg->Register.File == TGSI_FILE_BUFFER) {
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
if (ctx->info.indirect_files & (1 << TGSI_FILE_BUFFER)) {
@@ -3843,20 +4370,21 @@ get_destination_info(struct dump_ctx *ctx,
const char *atomic_str = atomic_ssbo ? "atomic" : "";
int base = atomic_ssbo ? ctx->ssbo_atomic_array_base : ctx->ssbo_array_base;
if (dst_reg->Register.Indirect) {
- snprintf(dsts[i], 255, "%sssboarr%s[addr%d+%d].%sssbocontents%d", cname, atomic_str, dst_reg->Indirect.Index, dst_reg->Register.Index - base, cname, base);
+ strbuf_fmt(&dst_bufs[i], "%sssboarr%s[addr%d+%d].%sssbocontents%d", cname, atomic_str, dst_reg->Indirect.Index, dst_reg->Register.Index - base, cname, base);
} else
- snprintf(dsts[i], 255, "%sssboarr%s[%d].%sssbocontents%d", cname, atomic_str, dst_reg->Register.Index - base, cname, base);
+ strbuf_fmt(&dst_bufs[i], "%sssboarr%s[%d].%sssbocontents%d", cname, atomic_str, dst_reg->Register.Index - base, cname, base);
} else
- snprintf(dsts[i], 255, "%sssbocontents%d", cname, dst_reg->Register.Index);
+ strbuf_fmt(&dst_bufs[i], "%sssbocontents%d", cname, dst_reg->Register.Index);
+ dinfo->dest_index = dst_reg->Register.Index;
} else if (dst_reg->Register.File == TGSI_FILE_MEMORY) {
- snprintf(dsts[i], 255, "values");
+ strbuf_fmt(&dst_bufs[i], "values");
} else if (dst_reg->Register.File == TGSI_FILE_ADDRESS) {
- snprintf(dsts[i], 255, "addr%d", dst_reg->Register.Index);
+ strbuf_fmt(&dst_bufs[i], "addr%d", dst_reg->Register.Index);
}
if (dtype == TGSI_TYPE_DOUBLE) {
- strcpy(fp64_dsts[i], dsts[i]);
- snprintf(dsts[i], 255, "fp64_dst[%d]%s", i, fp64_writemask);
+ strcpy(fp64_dsts[i], dst_bufs[i].buf);
+ strbuf_fmt(&dst_bufs[i], "fp64_dst[%d]%s", i, fp64_writemask);
writemask[0] = 0;
}
@@ -3873,15 +4401,15 @@ static const char *shift_swizzles(const struct vrend_shader_io *io, const struct
swizzle_shifted[swz_offset++] = '.';
for (int i = 0; i < 4; ++i) {
switch (i) {
- case 0: swizzle_shifted[swz_offset++] = get_swiz_char(src->Register.SwizzleX - io->swizzle_offset);
+ case 0: swizzle_shifted[swz_offset++] = get_swiz_char(src->Register.SwizzleX);
break;
- case 1: swizzle_shifted[swz_offset++] = get_swiz_char(src->Register.SwizzleY - io->swizzle_offset);
+ case 1: swizzle_shifted[swz_offset++] = get_swiz_char(src->Register.SwizzleY);
break;
- case 2: swizzle_shifted[swz_offset++] = src->Register.SwizzleZ - io->swizzle_offset < io->num_components ?
- get_swiz_char(src->Register.SwizzleZ - io->swizzle_offset) : 'x';
+ case 2: swizzle_shifted[swz_offset++] = src->Register.SwizzleZ < io->num_components ?
+ get_swiz_char(src->Register.SwizzleZ) : 'x';
break;
- case 3: swizzle_shifted[swz_offset++] = src->Register.SwizzleW - io->swizzle_offset < io->num_components ?
- get_swiz_char(src->Register.SwizzleW - io->swizzle_offset) : 'x';
+ case 3: swizzle_shifted[swz_offset++] = src->Register.SwizzleW < io->num_components ?
+ get_swiz_char(src->Register.SwizzleW) : 'x';
}
}
swizzle_shifted[swz_offset] = 0;
@@ -3903,6 +4431,8 @@ static void get_source_info_generic(const struct dump_ctx *ctx,
{
int swz_offset = 0;
char swizzle_shifted[6] = "";
+ char outvarname[64];
+
if (swizzle[0] == ')') {
swizzle_shifted[swz_offset++] = ')';
swizzle_shifted[swz_offset] = 0;
@@ -3911,42 +4441,26 @@ static void get_source_info_generic(const struct dump_ctx *ctx,
/* This IO element is not using all vector elements, so we have to shift the swizzle names */
swizzle = shift_swizzles(io, src, swz_offset, swizzle_shifted, swizzle);
- if (io->first == io->last) {
- strbuf_fmt(result, "%s(%s%s%s%s)", get_string(srcstypeprefix),
- prefix, io->glsl_name, arrayname, io->is_int ? "" : swizzle);
- } else {
+ strbuf_fmt(result, "%s(%s", get_string(srcstypeprefix), prefix);
- if (prefer_generic_io_block(ctx, iot)) {
- char outvarname[64];
- const char *stage_prefix = iot == io_in ? get_stage_input_name_prefix(ctx, ctx->prog_type) :
- get_stage_output_name_prefix(ctx->prog_type);
+ enum io_decl_type decl_type = decl_plain;
- get_blockvarname(outvarname, stage_prefix, io, arrayname);
- if (src->Register.Indirect)
- strbuf_fmt(result, "%s(%s %s.%s[addr%d + %d] %s)", get_string(srcstypeprefix), prefix,
- outvarname, io->glsl_name, src->Indirect.Index, src->Register.Index - io->first,
- io->is_int ? "" : swizzle);
- else
- strbuf_fmt(result, "%s(%s %s.%s[%d] %s)", get_string(srcstypeprefix), prefix,
- outvarname, io->glsl_name, src->Register.Index - io->first,
- io->is_int ? "" : swizzle);
- } else {
- if (src->Register.Indirect)
- strbuf_fmt(result, "%s(%s %s%s[addr%d + %d] %s)", get_string(srcstypeprefix), prefix,
- io->glsl_name,
- arrayname,
- src->Indirect.Index,
- src->Register.Index - io->first,
- io->is_int ? "" : swizzle);
- else
- strbuf_fmt(result, "%s(%s %s%s[%d] %s)", get_string(srcstypeprefix), prefix,
- io->glsl_name,
- arrayname,
- src->Register.Index - io->first,
- io->is_int ? "" : swizzle);
- }
+ if ((io->first != io->last || io->overlapping_array) &&
+ prefer_generic_io_block(ctx, iot)) {
+
+ const struct vrend_shader_io *array = io->overlapping_array ?
+ io->overlapping_array : io;
+ const char *stage_prefix = iot == io_in ?
+ get_stage_input_name_prefix(ctx, ctx->prog_type) :
+ get_stage_output_name_prefix(ctx->prog_type);
+ get_blockvarname(outvarname, stage_prefix, array, arrayname);
+ arrayname = outvarname;
+ decl_type = decl_block;
}
+
+ vrend_shader_write_io_as_src(result, arrayname, io, src, decl_type);
+ strbuf_appendf(result, "%s)", io->is_int ? "" : swizzle);
}
static void get_source_info_patch(enum vrend_type_qualifier srcstypeprefix,
@@ -3965,20 +4479,10 @@ static void get_source_info_patch(enum vrend_type_qualifier srcstypeprefix,
}
swizzle = shift_swizzles(io, src, swz_offset, swizzle_shifted, swizzle);
- const char *wm = io->is_int ? "" : swizzle;
-
- if (io->last == io->first)
- strbuf_fmt(result, "%s(%s%s%s%s)", get_string(srcstypeprefix), prefix, io->glsl_name,
- arrayname, wm);
- else {
- if (src->Register.Indirect)
- strbuf_fmt(result, "%s(%s %s[addr%d + %d] %s)", get_string(srcstypeprefix), prefix,
- io->glsl_name, src->Indirect.Index, src->Register.Index - io->first, wm);
- else
- strbuf_fmt(result, "%s(%s %s[%d] %s)", get_string(srcstypeprefix), prefix,
- io->glsl_name, src->Register.Index - io->first, wm);
- }
+ strbuf_fmt(result, "%s(%s", get_string(srcstypeprefix), prefix);
+ vrend_shader_write_io_as_src(result, io->last == io->first ? arrayname : "", io, src, decl_plain);
+ strbuf_appendf(result, "%s)", io->is_int ? "" : swizzle);
}
static void get_tesslevel_as_source(struct vrend_strbuf *src_buf, const char *prefix,
@@ -3992,12 +4496,28 @@ static void get_tesslevel_as_source(struct vrend_strbuf *src_buf, const char *pr
name, reg->SwizzleW);
}
+static void get_source_swizzle(const struct tgsi_full_src_register *src, char swizzle[8])
+{
+ if (src->Register.SwizzleX != TGSI_SWIZZLE_X ||
+ src->Register.SwizzleY != TGSI_SWIZZLE_Y ||
+ src->Register.SwizzleZ != TGSI_SWIZZLE_Z ||
+ src->Register.SwizzleW != TGSI_SWIZZLE_W) {
+ *swizzle++ = '.';
+ *swizzle++ = get_swiz_char(src->Register.SwizzleX);
+ *swizzle++ = get_swiz_char(src->Register.SwizzleY);
+ *swizzle++ = get_swiz_char(src->Register.SwizzleZ);
+ *swizzle++ = get_swiz_char(src->Register.SwizzleW);
+ }
+
+ *swizzle++ = 0;
+}
+
// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static bool
get_source_info(struct dump_ctx *ctx,
const struct tgsi_full_instruction *inst,
struct source_info *sinfo,
- struct vrend_strbuf srcs[4], char src_swizzle0[10])
+ struct vrend_strbuf srcs[4], char src_swizzle0[16])
{
bool stprefix = false;
@@ -4032,8 +4552,7 @@ get_source_info(struct dump_ctx *ctx,
for (uint32_t i = 0; i < inst->Instruction.NumSrcRegs; i++) {
const struct tgsi_full_src_register *src = &inst->Src[i];
struct vrend_strbuf *src_buf = &srcs[i];
- char swizzle[8] = "";
- int usage_mask = 0;
+ char swizzle[16] = "";
char *swizzle_writer = swizzle;
char prefix[6] = "";
char arrayname[16] = "";
@@ -4043,8 +4562,6 @@ get_source_info(struct dump_ctx *ctx,
sinfo->override_no_wm[i] = false;
sinfo->override_no_cast[i] = false;
- if (isfloatabsolute)
- swizzle[swz_idx++] = ')';
if (src->Register.Negate)
prefix[pre_idx++] = '-';
@@ -4068,125 +4585,127 @@ get_source_info(struct dump_ctx *ctx,
swizzle_writer = src_swizzle0;
}
- usage_mask |= 1 << src->Register.SwizzleX;
- usage_mask |= 1 << src->Register.SwizzleY;
- usage_mask |= 1 << src->Register.SwizzleZ;
- usage_mask |= 1 << src->Register.SwizzleW;
+ if (isfloatabsolute)
+ swizzle_writer[swz_idx++] = ')';
- if (src->Register.SwizzleX != TGSI_SWIZZLE_X ||
- src->Register.SwizzleY != TGSI_SWIZZLE_Y ||
- src->Register.SwizzleZ != TGSI_SWIZZLE_Z ||
- src->Register.SwizzleW != TGSI_SWIZZLE_W) {
- swizzle_writer[swz_idx++] = '.';
- swizzle_writer[swz_idx++] = get_swiz_char(src->Register.SwizzleX);
- swizzle_writer[swz_idx++] = get_swiz_char(src->Register.SwizzleY);
- swizzle_writer[swz_idx++] = get_swiz_char(src->Register.SwizzleZ);
- swizzle_writer[swz_idx++] = get_swiz_char(src->Register.SwizzleW);
- }
- swizzle_writer[swz_idx] = 0;
+ get_source_swizzle(src, swizzle_writer + swz_idx);
if (src->Register.File == TGSI_FILE_INPUT) {
- for (uint32_t j = 0; j < ctx->num_inputs; j++)
- if (ctx->inputs[j].first <= src->Register.Index &&
- ctx->inputs[j].last >= src->Register.Index &&
- (ctx->inputs[j].usage_mask & usage_mask)) {
- if (ctx->key->color_two_side && ctx->inputs[j].name == TGSI_SEMANTIC_COLOR)
- strbuf_fmt(src_buf, "%s(%s%s%d%s%s)", get_string(stypeprefix), prefix, "realcolor", ctx->inputs[j].sid, arrayname, swizzle);
- else if (ctx->inputs[j].glsl_gl_block) {
- /* GS input clipdist requires a conversion */
- if (ctx->inputs[j].name == TGSI_SEMANTIC_CLIPDIST) {
- create_swizzled_clipdist(ctx, src_buf, src, j, true, get_string(stypeprefix), prefix, arrayname, ctx->inputs[j].first);
- } else {
- strbuf_fmt(src_buf, "%s(vec4(%sgl_in%s.%s)%s)", get_string(stypeprefix), prefix, arrayname, ctx->inputs[j].glsl_name, swizzle);
- }
- }
- else if (ctx->inputs[j].name == TGSI_SEMANTIC_PRIMID)
- strbuf_fmt(src_buf, "%s(vec4(intBitsToFloat(%s)))", get_string(stypeprefix), ctx->inputs[j].glsl_name);
- else if (ctx->inputs[j].name == TGSI_SEMANTIC_FACE)
- strbuf_fmt(src_buf, "%s(%s ? 1.0 : -1.0)", get_string(stypeprefix), ctx->inputs[j].glsl_name);
- else if (ctx->inputs[j].name == TGSI_SEMANTIC_CLIPDIST) {
- if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT)
- load_clipdist_fs(ctx, src_buf, src, j, false, get_string(stypeprefix), ctx->inputs[j].first);
- else
- create_swizzled_clipdist(ctx, src_buf, src, j, false, get_string(stypeprefix), prefix, arrayname, ctx->inputs[j].first);
- } else if (ctx->inputs[j].name == TGSI_SEMANTIC_TESSOUTER ||
- ctx->inputs[j].name == TGSI_SEMANTIC_TESSINNER) {
- get_tesslevel_as_source(src_buf, prefix, ctx->inputs[j].glsl_name, &src->Register);
- } else {
- enum vrend_type_qualifier srcstypeprefix = stypeprefix;
- if (ctx->inputs[j].type != VEC_FLOAT) {
- if (stype == TGSI_TYPE_UNSIGNED)
- srcstypeprefix = UVEC4;
- else if (stype == TGSI_TYPE_SIGNED)
- srcstypeprefix = IVEC4;
- else if (ctx->inputs[j].type == VEC_INT)
- srcstypeprefix = INT_BITS_TO_FLOAT;
- else // ctx->inputs[j].type == VEC_UINT
- srcstypeprefix = UINT_BITS_TO_FLOAT;
- }
+ int j = find_io_index(ctx->num_inputs, ctx->inputs, src->Register.Index);
+ if (j < 0)
+ return false;
- if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE && i == 1) {
- strbuf_fmt(src_buf, "floatBitsToInt(%s%s%s%s)", prefix, ctx->inputs[j].glsl_name, arrayname, swizzle);
- } else if (ctx->inputs[j].name == TGSI_SEMANTIC_GENERIC) {
- struct vrend_shader_io *io = ctx->generic_ios.input_range.used ? &ctx->generic_ios.input_range.io : &ctx->inputs[j];
- get_source_info_generic(ctx, io_in, srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
- } else if (ctx->inputs[j].name == TGSI_SEMANTIC_PATCH) {
- struct vrend_shader_io *io = ctx->patch_ios.input_range.used ? &ctx->patch_ios.input_range.io : &ctx->inputs[j];
- get_source_info_patch(srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
- } else if (ctx->inputs[j].name == TGSI_SEMANTIC_POSITION && ctx->prog_type == TGSI_PROCESSOR_VERTEX &&
- ctx->inputs[j].first != ctx->inputs[j].last) {
- if (src->Register.Indirect)
- strbuf_fmt(src_buf, "%s(%s%s%s[addr%d + %d]%s)", get_string(srcstypeprefix), prefix, ctx->inputs[j].glsl_name, arrayname,
- src->Indirect.Index, src->Register.Index, ctx->inputs[j].is_int ? "" : swizzle);
- else
- strbuf_fmt(src_buf, "%s(%s%s%s[%d]%s)", get_string(srcstypeprefix), prefix, ctx->inputs[j].glsl_name, arrayname,
- src->Register.Index, ctx->inputs[j].is_int ? "" : swizzle);
- } else
- strbuf_fmt(src_buf, "%s(%s%s%s%s)", get_string(srcstypeprefix), prefix, ctx->inputs[j].glsl_name, arrayname, ctx->inputs[j].is_int ? "" : swizzle);
- }
- sinfo->override_no_wm[i] = ctx->inputs[j].override_no_wm;
- break;
+ struct vrend_shader_io *input = &ctx->inputs[j];
+
+ if (ctx->prog_type == TGSI_PROCESSOR_VERTEX) {
+ if (ctx->key->vs.attrib_zyxw_bitmask & (1 << input->first)) {
+ swizzle_writer[swz_idx++] = '.';
+ swizzle_writer[swz_idx++] = 'z';
+ swizzle_writer[swz_idx++] = 'y';
+ swizzle_writer[swz_idx++] = 'x';
+ swizzle_writer[swz_idx++] = 'w';
+ }
+ get_source_swizzle(src, swizzle_writer + swz_idx);
+ }
+
+ if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT &&
+ ctx->key->color_two_side && input->name == TGSI_SEMANTIC_COLOR)
+ strbuf_fmt(src_buf, "%s(%s%s%d%s%s)", get_string(stypeprefix), prefix, "realcolor", input->sid, arrayname, swizzle);
+ else if (input->glsl_gl_block) {
+ /* GS input clipdist requires a conversion */
+ if (input->name == TGSI_SEMANTIC_CLIPDIST) {
+ create_swizzled_clipdist(ctx, src_buf, src, j, true, get_string(stypeprefix), prefix, arrayname, input->first);
+ } else {
+ strbuf_fmt(src_buf, "%s(vec4(%sgl_in%s.%s)%s)", get_string(stypeprefix), prefix, arrayname, input->glsl_name, swizzle);
+ }
+ }
+ else if (input->name == TGSI_SEMANTIC_PRIMID)
+ strbuf_fmt(src_buf, "%s(vec4(intBitsToFloat(%s)))", get_string(stypeprefix), input->glsl_name);
+ else if (input->name == TGSI_SEMANTIC_FACE)
+ strbuf_fmt(src_buf, "%s(%s ? 1.0 : -1.0)", get_string(stypeprefix), input->glsl_name);
+ else if (input->name == TGSI_SEMANTIC_CLIPDIST) {
+ if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT)
+ load_clipdist_fs(ctx, src_buf, src, j, get_string(stypeprefix), input->first);
+ else
+ create_swizzled_clipdist(ctx, src_buf, src, j, false, get_string(stypeprefix), prefix, arrayname, input->first);
+ } else if (input->name == TGSI_SEMANTIC_TESSOUTER ||
+ input->name == TGSI_SEMANTIC_TESSINNER) {
+ get_tesslevel_as_source(src_buf, prefix, input->glsl_name, &src->Register);
+ } else {
+ enum vrend_type_qualifier srcstypeprefix = stypeprefix;
+ if (input->type != VEC_FLOAT) {
+ if (stype == TGSI_TYPE_UNSIGNED)
+ srcstypeprefix = UVEC4;
+ else if (stype == TGSI_TYPE_SIGNED)
+ srcstypeprefix = IVEC4;
+ else if (input->type == VEC_INT)
+ srcstypeprefix = INT_BITS_TO_FLOAT;
+ else // input->type == VEC_UINT
+ srcstypeprefix = UINT_BITS_TO_FLOAT;
}
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE && i == 1) {
+ strbuf_fmt(src_buf, "floatBitsToInt(%s%s%s%s)", prefix, input->glsl_name, arrayname, swizzle);
+ } else if (input->name == TGSI_SEMANTIC_GENERIC) {
+ get_source_info_generic(ctx, io_in, srcstypeprefix, prefix, src,
+ &ctx->inputs[j], arrayname, swizzle, src_buf);
+ } else if (input->name == TGSI_SEMANTIC_TEXCOORD) {
+ get_source_info_generic(ctx, io_in, srcstypeprefix, prefix, src,
+ &ctx->inputs[j], arrayname, swizzle, src_buf);
+ } else if (input->name == TGSI_SEMANTIC_PATCH) {
+ get_source_info_patch(srcstypeprefix, prefix, src,
+ &ctx->inputs[j], arrayname, swizzle, src_buf);
+ } else if (input->name == TGSI_SEMANTIC_POSITION && ctx->prog_type == TGSI_PROCESSOR_VERTEX &&
+ input->first != input->last) {
+ if (src->Register.Indirect)
+ strbuf_fmt(src_buf, "%s(%s%s%s[addr%d + %d]%s)", get_string(srcstypeprefix), prefix, input->glsl_name, arrayname,
+ src->Indirect.Index, src->Register.Index, input->is_int ? "" : swizzle);
+ else
+ strbuf_fmt(src_buf, "%s(%s%s%s[%d]%s)", get_string(srcstypeprefix), prefix, input->glsl_name, arrayname,
+ src->Register.Index, input->is_int ? "" : swizzle);
+ } else
+ strbuf_fmt(src_buf, "%s(%s%s%s%s)", get_string(srcstypeprefix), prefix, input->glsl_name, arrayname, input->is_int ? "" : swizzle);
+ }
+ sinfo->override_no_wm[i] = input->override_no_wm;
} else if (src->Register.File == TGSI_FILE_OUTPUT) {
- for (uint32_t j = 0; j < ctx->num_outputs; j++) {
- if (ctx->outputs[j].first <= src->Register.Index &&
- ctx->outputs[j].last >= src->Register.Index &&
- (ctx->outputs[j].usage_mask & usage_mask)) {
- if (inst->Instruction.Opcode == TGSI_OPCODE_FBFETCH) {
- ctx->outputs[j].fbfetch_used = true;
- ctx->shader_req_bits |= SHADER_REQ_FBFETCH;
- }
+ int j = find_io_index(ctx->num_outputs, ctx->outputs, src->Register.Index);
+ if (j < 0)
+ return false;
- enum vrend_type_qualifier srcstypeprefix = stypeprefix;
- if (stype == TGSI_TYPE_UNSIGNED && ctx->outputs[j].is_int)
- srcstypeprefix = TYPE_CONVERSION_NONE;
- if (ctx->outputs[j].glsl_gl_block) {
- if (ctx->outputs[j].name == TGSI_SEMANTIC_CLIPDIST) {
- char clip_indirect[32] = "";
- if (ctx->outputs[j].first != ctx->outputs[j].last) {
- if (src->Register.Indirect)
- snprintf(clip_indirect, sizeof(clip_indirect), "+ addr%d", src->Indirect.Index);
- else
- snprintf(clip_indirect, sizeof(clip_indirect), "+ %d", src->Register.Index - ctx->outputs[j].first);
- }
- strbuf_fmt(src_buf, "clip_dist_temp[%d%s]", ctx->outputs[j].sid, clip_indirect);
- }
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_GENERIC) {
- struct vrend_shader_io *io = ctx->generic_ios.output_range.used ? &ctx->generic_ios.output_range.io : &ctx->outputs[j];
- get_source_info_generic(ctx, io_out, srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_PATCH) {
- struct vrend_shader_io *io = ctx->patch_ios.output_range.used ? &ctx->patch_ios.output_range.io : &ctx->outputs[j];
- get_source_info_patch(srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
- } else if (ctx->outputs[j].name == TGSI_SEMANTIC_TESSOUTER ||
- ctx->outputs[j].name == TGSI_SEMANTIC_TESSINNER) {
- get_tesslevel_as_source(src_buf, prefix, ctx->outputs[j].glsl_name, &src->Register);
- } else {
- strbuf_fmt(src_buf, "%s(%s%s%s%s)", get_string(srcstypeprefix), prefix, ctx->outputs[j].glsl_name, arrayname, ctx->outputs[j].is_int ? "" : swizzle);
+ struct vrend_shader_io *output = &ctx->outputs[j];
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_FBFETCH) {
+ output->fbfetch_used = true;
+ ctx->shader_req_bits |= SHADER_REQ_FBFETCH;
+ }
+
+ enum vrend_type_qualifier srcstypeprefix = stypeprefix;
+ if (stype == TGSI_TYPE_UNSIGNED && output->is_int)
+ srcstypeprefix = TYPE_CONVERSION_NONE;
+ if (output->glsl_gl_block) {
+ if (output->name == TGSI_SEMANTIC_CLIPDIST) {
+ char clip_indirect[32] = "";
+ if (output->first != output->last) {
+ if (src->Register.Indirect)
+ snprintf(clip_indirect, sizeof(clip_indirect), "+ addr%d", src->Indirect.Index);
+ else
+ snprintf(clip_indirect, sizeof(clip_indirect), "+ %d", src->Register.Index - output->first);
}
- sinfo->override_no_wm[i] = ctx->outputs[j].override_no_wm;
- break;
+ strbuf_fmt(src_buf, "clip_dist_temp[%d%s]", output->sid, clip_indirect);
}
+ } else if (output->name == TGSI_SEMANTIC_GENERIC) {
+ struct vrend_shader_io *io = ctx->generic_ios.output_range.used ? &ctx->generic_ios.output_range.io : &ctx->outputs[j];
+ get_source_info_generic(ctx, io_out, srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
+ } else if (output->name == TGSI_SEMANTIC_PATCH) {
+ struct vrend_shader_io *io = ctx->patch_ios.output_range.used ? &ctx->patch_ios.output_range.io : &ctx->outputs[j];
+ get_source_info_patch(srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
+ } else if (output->name == TGSI_SEMANTIC_TESSOUTER ||
+ output->name == TGSI_SEMANTIC_TESSINNER) {
+ get_tesslevel_as_source(src_buf, prefix, output->glsl_name, &src->Register);
+ } else {
+ strbuf_fmt(src_buf, "%s(%s%s%s%s)", get_string(srcstypeprefix), prefix, output->glsl_name, arrayname, output->is_int ? "" : swizzle);
}
+ sinfo->override_no_wm[i] = output->override_no_wm;
} else if (src->Register.File == TGSI_FILE_TEMPORARY) {
struct vrend_temp_range *range = find_temp_range(ctx, src->Register.Index);
if (!range)
@@ -4195,12 +4714,10 @@ get_source_info(struct dump_ctx *ctx,
stprefix = true;
stypeprefix = FLOAT_BITS_TO_INT;
}
-
- if (src->Register.Indirect) {
- assert(src->Indirect.File == TGSI_FILE_ADDRESS);
- strbuf_fmt(src_buf, "%s%c%stemp%d[addr%d + %d]%s%c", get_string(stypeprefix), stprefix ? '(' : ' ', prefix, range->first, src->Indirect.Index, src->Register.Index - range->first, swizzle, stprefix ? ')' : ' ');
- } else
- strbuf_fmt(src_buf, "%s%c%stemp%d[%d]%s%c", get_string(stypeprefix), stprefix ? '(' : ' ', prefix, range->first, src->Register.Index - range->first, swizzle, stprefix ? ')' : ' ');
+ char temp_buf[64];
+ get_temp(ctx, src->Register.Indirect, src->Indirect.Index, src->Register.Index,
+ temp_buf, &ctx->require_dummy_value);
+ strbuf_fmt(src_buf, "%s%c%s%s%s%c", get_string(stypeprefix), stprefix ? '(' : ' ', prefix, temp_buf, swizzle, stprefix ? ')' : ' ');
} else if (src->Register.File == TGSI_FILE_CONSTANT) {
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
int dim = 0;
@@ -4243,28 +4760,16 @@ get_source_info(struct dump_ctx *ctx,
strbuf_fmt(src_buf, "%s%s(%sconst%d[%d]%s)", prefix, get_string(csp), cname, dim, src->Register.Index, swizzle);
}
} else if (src->Register.File == TGSI_FILE_SAMPLER) {
- if (!ctx->cfg->use_gles ||
- !(inst->Instruction.Opcode == TGSI_OPCODE_TXQ) ||
- !(inst->Dst[0].Register.WriteMask & 0x8)) {
- const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
- if (ctx->info.indirect_files & (1 << TGSI_FILE_SAMPLER)) {
- int basearrayidx = lookup_sampler_array(ctx, src->Register.Index);
- if (src->Register.Indirect) {
- strbuf_fmt(src_buf, "%ssamp%d[addr%d+%d]%s", cname, basearrayidx, src->Indirect.Index, src->Register.Index - basearrayidx, swizzle);
- } else {
- strbuf_fmt(src_buf, "%ssamp%d[%d]%s", cname, basearrayidx, src->Register.Index - basearrayidx, swizzle);
- }
+ const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
+ if (ctx->info.indirect_files & (1 << TGSI_FILE_SAMPLER)) {
+ int basearrayidx = lookup_sampler_array(ctx, src->Register.Index);
+ if (src->Register.Indirect) {
+ strbuf_fmt(src_buf, "%ssamp%d[addr%d+%d]%s", cname, basearrayidx, src->Indirect.Index, src->Register.Index - basearrayidx, swizzle);
} else {
- strbuf_fmt(src_buf, "%ssamp%d%s", cname, src->Register.Index, swizzle);
+ strbuf_fmt(src_buf, "%ssamp%d[%d]%s", cname, basearrayidx, src->Register.Index - basearrayidx, swizzle);
}
} else {
- /* This is probably incorrect, we assume that the base-index is the sum of all arrays sizes up
- * to this array of samplers */
- if (ctx->info.indirect_files & (1 << TGSI_FILE_SAMPLER) && src->Register.Indirect) {
- strbuf_fmt(src_buf, "addr%d+%d", src->Indirect.Index, src->Register.Index);
- } else {
- strbuf_fmt(src_buf, "%d", src->Register.Index);
- }
+ strbuf_fmt(src_buf, "%ssamp%d%s", cname, src->Register.Index, swizzle);
}
sinfo->sreg_index = src->Register.Index;
} else if (src->Register.File == TGSI_FILE_IMAGE) {
@@ -4338,7 +4843,9 @@ get_source_info(struct dump_ctx *ctx,
}
/* build up a vec4 of immediates */
- strbuf_fmt(src_buf, "%s(%s%s(", get_string(imm_stypeprefix), prefix, get_string(vtype));
+ strbuf_fmt(src_buf, "%s%s(%s(", prefix,
+ get_string(imm_stypeprefix), get_string(vtype));
+
for (uint32_t j = 0; j < 4; j++) {
if (j == 0)
idx = src->Register.SwizzleX;
@@ -4445,12 +4952,15 @@ get_source_info(struct dump_ctx *ctx,
sinfo->override_no_cast[i] = true;
} else if (ctx->system_values[j].name == TGSI_SEMANTIC_SAMPLEMASK) {
const char *vec_type = "ivec4";
- if (ctx->cfg->use_gles &&
- (inst->Instruction.Opcode == TGSI_OPCODE_AND) &&
- (stype == TGSI_TYPE_UNSIGNED))
+ enum vrend_type_qualifier srcstypeprefix = TYPE_CONVERSION_NONE;
+ if (stypeprefix == TYPE_CONVERSION_NONE)
+ srcstypeprefix = INT_BITS_TO_FLOAT;
+ else if (stype == TGSI_TYPE_UNSIGNED)
vec_type = "uvec4";
+
ctx->shader_req_bits |= SHADER_REQ_SAMPLE_SHADING | SHADER_REQ_INTS;
- strbuf_fmt(src_buf, "%s(%s, %s, %s, %s)",
+ strbuf_fmt(src_buf, "%s(%s(%s, %s, %s, %s))",
+ get_string(srcstypeprefix),
vec_type,
src->Register.SwizzleX == TGSI_SWIZZLE_X ? ctx->system_values[j].glsl_name : "0",
src->Register.SwizzleY == TGSI_SWIZZLE_X ? ctx->system_values[j].glsl_name : "0",
@@ -4516,177 +5026,89 @@ static bool rewrite_1d_image_coordinate(struct vrend_strbuf *src, const struct t
}
return true;
}
+
/* We have indirect IO access, but the guest actually send separate values, so
- * now we have to emulate an array.
- */
-static
-void rewrite_io_ranged(struct dump_ctx *ctx)
-{
- if ((ctx->info.indirect_files & (1 << TGSI_FILE_INPUT)) ||
- ctx->key->input.num_indirect_generic ||
- ctx->key->input.num_indirect_patch) {
+ * now we have to emulate arrays by putting IO values into arrays according
+ * to semantic. Only join elements that are consecutive. */
+static int
+make_array_from_semantic(struct vrend_shader_io *io, int start_index,
+ int num_entries, enum tgsi_semantic semantic)
+{
+ struct vrend_shader_io *io_out_range = &io[start_index];
+
+ int last_sid = io_out_range->sid;
+ for (int i = start_index + 1; i < num_entries; ++i) {
+ if (io[i].name == semantic && (io[i].sid - last_sid == 1)) {
+ io[i].glsl_predefined_no_emit = true;
+ last_sid = io[i].sid;
+ io[i].array_offset = io[i].sid - io_out_range->sid;
+ io_out_range->last = io_out_range->first + io[i].array_offset;
+ io[i].overlapping_array = io_out_range;
+ } else {
+ break;
+ }
+ }
+ return io_out_range->last + 1;
+}
- for (uint i = 0; i < ctx->num_inputs; ++i) {
- if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH) {
- ctx->inputs[i].glsl_predefined_no_emit = true;
- if (ctx->inputs[i].sid < ctx->patch_ios.input_range.io.sid || ctx->patch_ios.input_range.used == false) {
- ctx->patch_ios.input_range.io.first = i;
- ctx->patch_ios.input_range.io.usage_mask = 0xf;
- ctx->patch_ios.input_range.io.name = TGSI_SEMANTIC_PATCH;
- ctx->patch_ios.input_range.io.sid = ctx->inputs[i].sid;
- ctx->patch_ios.input_range.used = true;
- if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
- ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
- }
- if (ctx->inputs[i].sid > ctx->patch_ios.input_range.io.last)
- ctx->patch_ios.input_range.io.last = ctx->inputs[i].sid;
- }
+static bool
+collapse_vars_to_arrays(struct vrend_shader_io *io,
+ int num_entries,
+ enum tgsi_semantic semantic)
+{
+
+ bool retval = 0;
+ int start_index = 0;
+ while (start_index < num_entries) {
+ if (io[start_index].name == semantic && !io[start_index].glsl_predefined_no_emit) {
+ int new_start_index = make_array_from_semantic(io, start_index, num_entries, semantic);
+ retval |= io[start_index].first != io[start_index].last;
+ start_index = new_start_index;
+ } else {
+ ++start_index;
+ }
+ }
- if (ctx->inputs[i].name == TGSI_SEMANTIC_GENERIC) {
- ctx->inputs[i].glsl_predefined_no_emit = true;
- if (ctx->inputs[i].sid < ctx->generic_ios.input_range.io.sid || ctx->generic_ios.input_range.used == false) {
- ctx->generic_ios.input_range.io.sid = ctx->inputs[i].sid;
- ctx->generic_ios.input_range.io.first = i;
- ctx->generic_ios.input_range.io.name = TGSI_SEMANTIC_GENERIC;
- ctx->generic_ios.input_range.io.num_components = 4;
- ctx->generic_ios.input_range.used = true;
- if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
- ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
- }
- if (ctx->inputs[i].sid > ctx->generic_ios.input_range.io.last)
- ctx->generic_ios.input_range.io.last = ctx->inputs[i].sid;
- }
+ io->num_components = 4;
+ io->usage_mask = 0xf;
+ return retval;
+}
- if (ctx->key->input.num_indirect_generic > 0)
- ctx->generic_ios.input_range.io.last = ctx->generic_ios.input_range.io.sid + ctx->key->input.num_indirect_generic - 1;
- if (ctx->key->input.num_indirect_patch > 0)
- ctx->patch_ios.input_range.io.last = ctx->patch_ios.input_range.io.sid + ctx->key->input.num_indirect_patch - 1;
- }
- snprintf(ctx->patch_ios.input_range.io.glsl_name, 64, "%s_p%d",
- get_stage_input_name_prefix(ctx, ctx->prog_type), ctx->patch_ios.input_range.io.sid);
- snprintf(ctx->generic_ios.input_range.io.glsl_name, 64, "%s_g%d",
- get_stage_input_name_prefix(ctx, ctx->prog_type), ctx->generic_ios.input_range.io.sid);
+static void
+rewrite_io_ranged(struct dump_ctx *ctx)
+{
+ if ((ctx->info.indirect_files & (1 << TGSI_FILE_INPUT)) ||
+ ctx->key->require_input_arrays) {
- ctx->generic_ios.input_range.io.num_components = 4;
- ctx->generic_ios.input_range.io.usage_mask = 0xf;
- ctx->generic_ios.input_range.io.swizzle_offset = 0;
+ bool generic_array = collapse_vars_to_arrays(ctx->inputs, ctx->num_inputs,
+ TGSI_SEMANTIC_GENERIC);
+ bool patch_array = collapse_vars_to_arrays(ctx->inputs, ctx->num_inputs,
+ TGSI_SEMANTIC_PATCH);
- ctx->patch_ios.input_range.io.num_components = 4;
- ctx->patch_ios.input_range.io.usage_mask = 0xf;
- ctx->patch_ios.input_range.io.swizzle_offset = 0;
+ ctx->has_input_arrays = generic_array || patch_array;
if (prefer_generic_io_block(ctx, io_in))
- ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
}
if ((ctx->info.indirect_files & (1 << TGSI_FILE_OUTPUT)) ||
- ctx->key->output.num_indirect_generic ||
- ctx->key->output.num_indirect_patch) {
+ ctx->key->require_output_arrays) {
- for (uint i = 0; i < ctx->num_outputs; ++i) {
- if (ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) {
- ctx->outputs[i].glsl_predefined_no_emit = true;
- if (ctx->outputs[i].sid < ctx->patch_ios.output_range.io.sid || ctx->patch_ios.output_range.used == false) {
- ctx->patch_ios.output_range.io.first = i;
- ctx->patch_ios.output_range.io.name = TGSI_SEMANTIC_PATCH;
- ctx->patch_ios.output_range.io.sid = ctx->outputs[i].sid;
- ctx->patch_ios.output_range.used = true;
- if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
- ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
- }
- if (ctx->outputs[i].sid > ctx->patch_ios.output_range.io.last) {
- ctx->patch_ios.output_range.io.last = ctx->outputs[i].sid;
- }
- }
-
- if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC) {
- ctx->outputs[i].glsl_predefined_no_emit = true;
- if (ctx->outputs[i].sid < ctx->generic_ios.output_range.io.sid || ctx->generic_ios.output_range.used == false) {
- ctx->generic_ios.output_range.io.sid = ctx->outputs[i].sid;
- ctx->generic_ios.output_range.io.first = i;
- ctx->generic_ios.output_range.io.name = TGSI_SEMANTIC_GENERIC;
- ctx->generic_ios.output_range.used = true;
- ctx->generic_ios.output_range.io.usage_mask = 0xf;
- ctx->generic_ios.output_range.io.num_components = 4;
- if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
- ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
- }
- if (ctx->outputs[i].sid > ctx->generic_ios.output_range.io.last) {
- ctx->generic_ios.output_range.io.last = ctx->outputs[i].sid;
- }
- }
- }
- snprintf(ctx->patch_ios.output_range.io.glsl_name, 64, "%s_p%d",
- get_stage_output_name_prefix(ctx->prog_type), ctx->patch_ios.output_range.io.sid);
- snprintf(ctx->generic_ios.output_range.io.glsl_name, 64, "%s_g%d",
- get_stage_output_name_prefix(ctx->prog_type), ctx->generic_ios.output_range.io.sid);
-
- ctx->generic_ios.output_range.io.num_components = 4;
- ctx->generic_ios.output_range.io.usage_mask = 0xf;
- ctx->generic_ios.output_range.io.swizzle_offset = 0;
-
- ctx->patch_ios.output_range.io.num_components = 4;
- ctx->patch_ios.output_range.io.usage_mask = 0xf;
- ctx->patch_ios.output_range.io.swizzle_offset = 0;
+ bool generic_array = collapse_vars_to_arrays(ctx->outputs, ctx->num_outputs,
+ TGSI_SEMANTIC_GENERIC);
+ bool patch_array = collapse_vars_to_arrays(ctx->outputs, ctx->num_outputs,
+ TGSI_SEMANTIC_PATCH);
+ ctx->has_output_arrays = generic_array || patch_array;
if (prefer_generic_io_block(ctx, io_out))
- ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
- }
-}
-
-
-static void rename_variables(unsigned nio, struct vrend_shader_io *io,
- const char *name_prefix, unsigned coord_replace)
-{
- /* Rename the generic and patch variables after applying all identifications */
- for (unsigned i = 0; i < nio; ++i) {
- if ((io[i].name != TGSI_SEMANTIC_GENERIC &&
- io[i].name != TGSI_SEMANTIC_PATCH) ||
- (coord_replace & (1 << io[i].sid)))
- continue;
- char io_type = io[i].name == TGSI_SEMANTIC_GENERIC ? 'g' : 'p';
- snprintf(io[i].glsl_name, 64, "%s_%c%dA%d_%x", name_prefix, io_type, io[i].sid, io[i].array_id, io[i].usage_mask);
- }
-}
-
-static
-void rewrite_components(unsigned nio, struct vrend_shader_io *io,
- const char *name_prefix, unsigned coord_replace,
- bool no_input_arrays)
-{
- if (!nio)
- return;
-
- for (unsigned i = 0; i < nio - 1; ++i) {
- if ((io[i].name != TGSI_SEMANTIC_GENERIC &&
- io[i].name != TGSI_SEMANTIC_PATCH) ||
- io[i].glsl_predefined_no_emit)
- continue;
-
- for (unsigned j = i + 1; j < nio; ++j) {
- if ((io[j].name != TGSI_SEMANTIC_GENERIC &&
- io[j].name != TGSI_SEMANTIC_PATCH) ||
- io[j].glsl_predefined_no_emit)
- continue;
- if (io[i].first == io[j].first)
- io[j].glsl_predefined_no_emit = true;
- }
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
}
- for (unsigned i = 0; i < nio; ++i) {
- if ((io[i].name != TGSI_SEMANTIC_GENERIC &&
- io[i].name != TGSI_SEMANTIC_PATCH) ||
- !no_input_arrays)
- continue;
-
- io[i].usage_mask = 0xf;
- io[i].num_components = 4;
- io[i].swizzle_offset = 0;
- io[i].override_no_wm = false;
- }
+ if ((ctx->has_output_arrays || ctx->has_input_arrays)
+ && ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
+ ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
- rename_variables(nio, io, name_prefix, coord_replace);
}
static
@@ -4726,11 +5148,11 @@ void emit_fs_clipdistance_load(const struct dump_ctx *ctx,
if (!ctx->fs_uses_clipdist_input)
return;
- int prev_num = ctx->key->num_clip + ctx->key->num_cull;
+ int prev_num = ctx->key->num_in_clip + ctx->key->num_in_cull;
int ndists;
const char *prefix="";
- if (ctx->prog_type == PIPE_SHADER_TESS_CTRL)
+ if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL)
prefix = "gl_out[gl_InvocationID].";
ndists = ctx->num_in_clip_dist;
@@ -4750,131 +5172,15 @@ void emit_fs_clipdistance_load(const struct dump_ctx *ctx,
}
bool is_cull = false;
if (prev_num > 0) {
- if (i >= ctx->key->num_clip && i < prev_num)
+ if (i >= ctx->key->num_in_clip && i < prev_num)
is_cull = true;
}
const char *clip_cull = is_cull ? "Cull" : "Clip";
emit_buff(glsl_strbufs, "clip_dist_temp[%d].%c = %sgl_%sDistance[%d];\n", clipidx, wm, prefix, clip_cull,
- is_cull ? i - ctx->key->num_clip : i);
+ is_cull ? i - ctx->key->num_in_clip : i);
}
}
-/* TGSI possibly emits VS, TES, TCS, and GEOM outputs with layouts (i.e.
- * it gives components), but it doesn't do so for the corresponding inputs from
- * TXS, GEOM, abd TES, so that we have to apply the output layouts from the
- * previous shader stage to the according inputs.
- */
-
-static bool apply_prev_layout(const struct vrend_shader_key *key,
- struct vrend_shader_io inputs[],
- uint32_t *num_inputs)
-{
- bool require_enhanced_layouts = false;
-
- /* Walk through all inputs and see whether we have a corresonding output from
- * the previous shader that uses a different layout. It may even be that one
- * input be the combination of two inputs. */
-
- for (unsigned i = 0; i < *num_inputs; ++i ) {
- unsigned i_input = i;
- struct vrend_shader_io *io = &inputs[i];
-
- if (io->name == TGSI_SEMANTIC_GENERIC || io->name == TGSI_SEMANTIC_PATCH) {
-
- const struct vrend_layout_info *layout = key->prev_stage_generic_and_patch_outputs_layout;
- for (unsigned generic_index = 0; generic_index < key->input.num_generic_and_patch; ++generic_index, ++layout) {
-
- bool already_found_one = false;
-
- /* Identify by sid and arrays_id */
- if (io->sid == layout->sid && (io->array_id == layout->array_id)) {
-
- /* We have already one IO with the same SID and arrays ID, so we need to duplicate it */
- if (already_found_one) {
- memmove(io + 1, io, (*num_inputs - i_input) * sizeof(struct vrend_shader_io));
- (*num_inputs)++;
- ++io;
- ++i_input;
-
- } else if ((io->usage_mask == 0xf) && (layout->usage_mask != 0xf)) {
- /* If we found the first input with all components, and a corresponding prev output that uses
- * less components */
- already_found_one = true;
- }
-
- if (already_found_one) {
- io->usage_mask = (uint8_t)layout->usage_mask;
- io->layout_location = layout->location;
- io->array_id = layout->array_id;
-
- get_swizzle_offset_and_num_components(io);
- require_enhanced_layouts |= io->swizzle_offset > 0;
- if (io->num_components == 1)
- io->override_no_wm = true;
- if (i_input < *num_inputs - 1) {
- already_found_one = (io[1].sid != layout->sid || io[1].array_id != layout->array_id);
- }
- }
- }
- }
- }
- ++io;
- ++i_input;
- }
- return require_enhanced_layouts;
-}
-
-static bool evaluate_layout_overlays(unsigned nio, struct vrend_shader_io *io,
- const char *name_prefix, unsigned coord_replace)
-{
- bool require_enhanced_layouts = 0;
- int next_loc = 1;
-
- /* IO elements may be emitted for the same location but with
- * non-overlapping swizzles, therefore, we modify the name of
- * the variable to include the swizzle mask.
- *
- * Since TGSI also emits inputs that have no masks but are still at the
- * same location, we also need to add an array ID.
- */
-
- for (unsigned i = 0; i < nio - 1; ++i) {
- if ((io[i].name != TGSI_SEMANTIC_GENERIC &&
- io[i].name != TGSI_SEMANTIC_PATCH) ||
- io[i].usage_mask == 0xf ||
- io[i].layout_location > 0)
- continue;
-
- for (unsigned j = i + 1; j < nio ; ++j) {
- if ((io[j].name != TGSI_SEMANTIC_GENERIC &&
- io[j].name != TGSI_SEMANTIC_PATCH) ||
- io[j].usage_mask == 0xf ||
- io[j].layout_location > 0)
- continue;
-
- /* Do the definition ranges overlap? */
- if (io[i].last < io[j].first || io[i].first > io[j].last)
- continue;
-
- /* Overlapping ranges require explicite layouts and if they start at the
- * same index thet location must be equal */
- if (io[i].first == io[j].first) {
- io[j].layout_location = io[i].layout_location = next_loc++;
- } else {
- io[i].layout_location = next_loc++;
- io[j].layout_location = next_loc++;
- }
- require_enhanced_layouts = true;
- }
- }
-
- rename_variables(nio, io, name_prefix, coord_replace);
-
- return require_enhanced_layouts;
-}
-
-
-
static
void renumber_io_arrays(unsigned nio, struct vrend_shader_io *io)
{
@@ -4891,8 +5197,6 @@ void renumber_io_arrays(unsigned nio, struct vrend_shader_io *io)
// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void handle_io_arrays(struct dump_ctx *ctx)
{
- bool require_enhanced_layouts = false;
-
/* If the guest sent real IO arrays then we declare them individually,
* and have to do some work to deal with overlapping values, regions and
* enhanced layouts */
@@ -4903,48 +5207,116 @@ static void handle_io_arrays(struct dump_ctx *ctx)
renumber_io_arrays(ctx->num_inputs, ctx->inputs);
renumber_io_arrays(ctx->num_outputs, ctx->outputs);
+ } else {
+ /* The guest didn't send real arrays, do we might have to add a big array
+ * for all generic and another for patch inputs */
+ rewrite_io_ranged(ctx);
}
+}
+static int
+compare_shader_io(const void *vlhs, const void *vrhs)
+{
+ struct vrend_shader_io *lhs = (struct vrend_shader_io *)vlhs;
+ struct vrend_shader_io *rhs = (struct vrend_shader_io *)vrhs;
- /* In these shaders the inputs don't have the layout component information
- * therefore, copy the info from the prev shaders output */
- if (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY ||
- ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ||
- ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)
- require_enhanced_layouts |= apply_prev_layout(ctx->key, ctx->inputs, &ctx->num_inputs);
+ if (lhs->name < rhs->name)
+ return -1;
+ if (lhs->name > rhs->name)
+ return 1;
+ return lhs->sid - rhs->sid;
+}
- if (ctx->guest_sent_io_arrays) {
- if (ctx->num_inputs > 0)
- if (evaluate_layout_overlays(ctx->num_inputs, ctx->inputs,
- get_stage_input_name_prefix(ctx, ctx->prog_type),
- ctx->key->fs.coord_replace)) {
- require_enhanced_layouts = true;
+static void
+add_missing_semantic_inputs(struct vrend_shader_io *inputs, int *num_inputs,
+ int *next_location, uint64_t sids_missing,
+ const char *prefix, char *type_prefix,
+ enum tgsi_semantic name,
+ const struct vrend_shader_key *key)
+{
+
+ while (sids_missing) {
+ int sid = u_bit_scan64(&sids_missing);
+ struct vrend_shader_io *io = &inputs[*num_inputs];
+ io->sid = sid;
+ io->last = io->first = *next_location;
+ io->name = name;
+ io->type = VEC_FLOAT;
+ uint32_t sids_added = 1 << sid;
+
+
+ for (uint32_t j = 0; j < key->in_arrays.num_arrays; j++) {
+ const struct vrend_shader_io_array *array = &key->in_arrays.layout[j];
+ if (array->name == name &&
+ array->sid <= sid &&
+ array->sid + array->size >= sid) {
+ io->last = io->first + array->size;
+ io->sid = array->sid;
+ sids_added = ((1u << array->size) - 1) << sid;
+ break;
}
+ }
- if (ctx->num_outputs > 0)
- if (evaluate_layout_overlays(ctx->num_outputs, ctx->outputs,
- get_stage_output_name_prefix(ctx->prog_type), 0)){
- require_enhanced_layouts = true;
- }
+ (*next_location) += io->last - io->first + 1;
- } else {
- /* The guest didn't send real arrays, do we might have to add a big array
- * for all generic and another ofr patch inputs */
- rewrite_io_ranged(ctx);
- rewrite_components(ctx->num_inputs, ctx->inputs,
- get_stage_input_name_prefix(ctx, ctx->prog_type),
- ctx->key->fs.coord_replace, true);
+ sids_missing &= ~sids_added;
- rewrite_components(ctx->num_outputs, ctx->outputs,
- get_stage_output_name_prefix(ctx->prog_type), 0, true);
+ snprintf(io->glsl_name, 128, "%s%s%d", prefix, type_prefix, sid);
+ (*num_inputs)++;
}
+}
+
+static int
+add_missing_inputs(const struct dump_ctx *ctx, struct vrend_shader_io *inputs,
+ int num_inputs)
+{
+ uint64_t generics_declared = 0;
+ uint64_t patches_declared = 0;
+ uint8_t texcoord_declared = 0;
- if (require_enhanced_layouts) {
- ctx->shader_req_bits |= SHADER_REQ_ENHANCED_LAYOUTS;
- ctx->shader_req_bits |= SHADER_REQ_SEPERATE_SHADER_OBJECTS;
+ int next_location = 0;
+ for (int i = 0; i < num_inputs; ++i) {
+ int offset = 0;
+ for (int k = inputs[i].first; k <= inputs[i].last; ++k, ++offset) {
+ int sid = inputs[i].sid + offset;
+ switch (inputs[i].name) {
+ case TGSI_SEMANTIC_GENERIC:
+ generics_declared |= 1ull << sid;
+ break;
+ case TGSI_SEMANTIC_PATCH:
+ patches_declared |= 1ull << sid;
+ break;
+ case TGSI_SEMANTIC_TEXCOORD:
+ texcoord_declared |= 1ull << sid;
+ break;
+ default:
+ ;
+ }
+ }
+ if (next_location < inputs[i].last)
+ next_location = inputs[i].last;
}
-}
+ ++next_location;
+ uint64_t generics_missing = ctx->key->in_generic_expected_mask & ~generics_declared;
+ uint64_t patches_missing = ctx->key->in_patch_expected_mask & ~patches_declared;
+ uint64_t texcoord_missing = ctx->key->in_texcoord_expected_mask & ~texcoord_declared;
+
+ const char *prefix = get_stage_input_name_prefix(ctx, ctx->prog_type);
+ add_missing_semantic_inputs(inputs, &num_inputs, &next_location,
+ generics_missing, prefix, "_g",
+ TGSI_SEMANTIC_GENERIC, ctx->key);
+ add_missing_semantic_inputs(inputs, &num_inputs, &next_location,
+ texcoord_missing, prefix, "_t",
+ TGSI_SEMANTIC_TEXCOORD, ctx->key);
+ add_missing_semantic_inputs(inputs, &num_inputs, &next_location,
+ patches_missing, "patch", "",
+ TGSI_SEMANTIC_PATCH, ctx->key);
+
+ qsort(inputs, num_inputs, sizeof(struct vrend_shader_io),
+ compare_shader_io);
+ return num_inputs;
+}
static boolean
iter_instruction(struct tgsi_iterate_context *iter,
@@ -4954,18 +5326,20 @@ iter_instruction(struct tgsi_iterate_context *iter,
struct dest_info dinfo = { 0 };
struct source_info sinfo = { 0 };
const char *srcs[4];
- char dsts[3][255];
+ char *dsts[3];
char fp64_dsts[3][255];
uint instno = ctx->instno++;
char writemask[6] = "";
- char src_swizzle0[10];
+ char src_swizzle0[16];
sinfo.svec4 = VEC4;
- if (ctx->prog_type == -1)
+ if (ctx->prog_type == (enum tgsi_processor_type) -1)
ctx->prog_type = iter->processor.Processor;
if (instno == 0) {
+ if (ctx->prog_type != TGSI_PROCESSOR_VERTEX)
+ ctx->num_inputs = add_missing_inputs(ctx, ctx->inputs, ctx->num_inputs);
handle_io_arrays(ctx);
/* Vertex shader inputs are not send as arrays, but the access may still be
@@ -4988,7 +5362,10 @@ iter_instruction(struct tgsi_iterate_context *iter,
* GLSL < 4.30 it is required to match the output of the previous stage */
if (!ctx->cfg->use_gles) {
for (unsigned i = 0; i < ctx->num_inputs; ++i) {
- if (ctx->key->force_invariant_inputs & (1ull << ctx->inputs[i].sid))
+ uint32_t bit_pos = varying_bit_from_semantic_and_index(ctx->inputs[i].name, ctx->inputs[i].sid);
+ uint32_t slot = bit_pos / 32;
+ uint32_t bit = 1u << (bit_pos & 0x1f);
+ if (ctx->key->force_invariant_inputs[slot] & bit)
ctx->inputs[i].invariant = 1;
else
ctx->inputs[i].invariant = 0;
@@ -4996,7 +5373,7 @@ iter_instruction(struct tgsi_iterate_context *iter,
}
}
- if (!get_destination_info(ctx, inst, &dinfo, dsts, fp64_dsts, writemask))
+ if (!get_destination_info(ctx, inst, &dinfo, ctx->dst_bufs, fp64_dsts, writemask))
return false;
if (!get_source_info(ctx, inst, &sinfo, ctx->src_bufs, src_swizzle0))
@@ -5005,6 +5382,9 @@ iter_instruction(struct tgsi_iterate_context *iter,
for (size_t i = 0; i < ARRAY_SIZE(srcs); ++i)
srcs[i] = ctx->src_bufs[i].buf;
+ for (size_t i = 0; i < ARRAY_SIZE(dsts); ++i)
+ dsts[i] = ctx->dst_bufs[i].buf;
+
switch (inst->Instruction.Opcode) {
case TGSI_OPCODE_SQRT:
case TGSI_OPCODE_DSQRT:
@@ -5112,9 +5492,11 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_buff(&ctx->glsl_strbufs, "%s = %s(1.0LF/(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_FLR:
+ case TGSI_OPCODE_DFLR:
emit_op1("floor");
break;
case TGSI_OPCODE_ROUND:
+ case TGSI_OPCODE_DROUND:
// There is no TGSI OPCODE for roundEven, prefer roundEven
// so roundEven in guest gets translated to roundEven.
if ((ctx->cfg->use_gles && ctx->cfg->glsl_version >= 300) ||
@@ -5127,6 +5509,7 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_op1("sign");
break;
case TGSI_OPCODE_CEIL:
+ case TGSI_OPCODE_DCEIL:
emit_op1("ceil");
break;
case TGSI_OPCODE_FRC:
@@ -5134,9 +5517,11 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_op1("fract");
break;
case TGSI_OPCODE_TRUNC:
+ case TGSI_OPCODE_DTRUNC:
emit_op1("trunc");
break;
case TGSI_OPCODE_SSG:
+ case TGSI_OPCODE_DSSG:
emit_op1("sign");
break;
case TGSI_OPCODE_RSQ:
@@ -5152,7 +5537,8 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_arit_op2("+");
break;
case TGSI_OPCODE_UADD:
- emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(ivec4((uvec4(%s) + uvec4(%s))))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(uvec4(%s) + uvec4(%s))%s);\n", dsts[0],
+ get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_SUB:
emit_arit_op2("-");
@@ -5213,9 +5599,11 @@ iter_instruction(struct tgsi_iterate_context *iter,
case TGSI_OPCODE_TXF:
case TGSI_OPCODE_TG4:
case TGSI_OPCODE_TXP:
- case TGSI_OPCODE_LODQ:
translate_tex(ctx, inst, &sinfo, &dinfo, srcs, dsts[0], writemask);
break;
+ case TGSI_OPCODE_LODQ:
+ emit_lodq(ctx, inst, &sinfo, &dinfo, srcs, dsts[0], writemask);
+ break;
case TGSI_OPCODE_TXQ:
emit_txq(ctx, inst, sinfo.sreg_index, srcs, dsts[0], writemask);
break;
@@ -5274,6 +5662,12 @@ iter_instruction(struct tgsi_iterate_context *iter,
case TGSI_OPCODE_SLT:
emit_compare("lessThan");
break;
+ case TGSI_OPCODE_SLE:
+ emit_compare("lessThanEqual");
+ break;
+ case TGSI_OPCODE_SGT:
+ emit_compare("greaterThan");
+ break;
case TGSI_OPCODE_ISLT:
case TGSI_OPCODE_USLT:
case TGSI_OPCODE_FSLT:
@@ -5315,9 +5709,9 @@ iter_instruction(struct tgsi_iterate_context *iter,
case TGSI_OPCODE_END:
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) {
handle_vertex_proc_exit(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
- } else if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) {
+ } else if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL && ctx->cfg->has_cull_distance) {
emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
- } else if (iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) {
+ } else if (iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL && ctx->cfg->has_cull_distance) {
if (ctx->so && !ctx->key->gs_present)
emit_so_movs(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
@@ -5361,7 +5755,8 @@ iter_instruction(struct tgsi_iterate_context *iter,
struct immed *imd = &ctx->imm[(inst->Src[0].Register.Index)];
if (ctx->so && ctx->key->gs_present)
emit_so_movs(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
- emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
+ if (ctx->cfg->has_cull_distance && ctx->key->gs.emit_clip_distance)
+ emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
emit_prescale(&ctx->glsl_strbufs);
if (imd->val[inst->Src[0].Register.SwizzleX].ui > 0) {
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
@@ -5483,8 +5878,10 @@ iter_instruction(struct tgsi_iterate_context *iter,
return false;
srcs[1] = ctx->src_bufs[1].buf;
}
- translate_store(ctx, &ctx->glsl_strbufs, ctx->ssbo_memory_qualifier,
- inst, &sinfo, srcs, dsts[0]);
+ /* Don't try to write to dest with a negative index. */
+ if (dinfo.dest_index >= 0)
+ translate_store(ctx, &ctx->glsl_strbufs, ctx->ssbo_memory_qualifier, ctx->images,
+ inst, &sinfo, srcs, &dinfo, dsts[0]);
break;
case TGSI_OPCODE_LOAD:
if (ctx->cfg->use_gles) {
@@ -5492,8 +5889,12 @@ iter_instruction(struct tgsi_iterate_context *iter,
return false;
srcs[1] = ctx->src_bufs[1].buf;
}
- translate_load(ctx, &ctx->glsl_strbufs, ctx->ssbo_memory_qualifier, ctx->images,
- inst, &sinfo, &dinfo, srcs, dsts[0], writemask);
+ /* Replace an obvious out-of-bounds load with loading zero. */
+ if (sinfo.sreg_index < 0 ||
+ !translate_load(ctx, &ctx->glsl_strbufs, ctx->ssbo_memory_qualifier, ctx->images,
+ inst, &sinfo, &dinfo, srcs, dsts[0], writemask)) {
+ emit_buff(&ctx->glsl_strbufs, "%s = vec4(0.0, 0.0, 0.0, 0.0)%s;\n", dsts[0], writemask);
+ }
break;
case TGSI_OPCODE_ATOMUADD:
case TGSI_OPCODE_ATOMXCHG:
@@ -5544,7 +5945,7 @@ prolog(struct tgsi_iterate_context *iter)
{
struct dump_ctx *ctx = (struct dump_ctx *)iter;
- if (ctx->prog_type == -1)
+ if (ctx->prog_type == (enum tgsi_processor_type) -1)
ctx->prog_type = iter->processor.Processor;
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX &&
@@ -5565,8 +5966,8 @@ static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *g
if (ctx->cfg->use_gles) {
emit_ver_extf(glsl_strbufs, "#version %d es\n", ctx->cfg->glsl_version);
- if ((ctx->shader_req_bits & SHADER_REQ_CLIP_DISTANCE)||
- (ctx->num_out_clip_dist == 0 && ctx->key->clip_plane_enable)) {
+ if ((ctx->shader_req_bits & SHADER_REQ_CLIP_DISTANCE) ||
+ (ctx->cfg->has_cull_distance && ctx->num_out_clip_dist == 0)) {
emit_ext(glsl_strbufs, "EXT_clip_cull_distance", "require");
}
@@ -5581,6 +5982,8 @@ static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *g
emit_ext(glsl_strbufs, "EXT_shader_framebuffer_fetch", "require");
if (ctx->shader_req_bits & SHADER_REQ_BLEND_EQUATION_ADVANCED)
emit_ext(glsl_strbufs, "KHR_blend_equation_advanced", "require");
+ if (ctx->cfg->has_dual_src_blend)
+ emit_ext(glsl_strbufs, "EXT_blend_func_extended", "require");
}
if (ctx->shader_req_bits & SHADER_REQ_VIEWPORT_IDX)
@@ -5595,6 +5998,9 @@ static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *g
if (ctx->shader_req_bits & SHADER_REQ_NV_IMAGE_FORMATS)
emit_ext(glsl_strbufs, "NV_image_formats", "require");
+ if (ctx->shader_req_bits & SHADER_REQ_SEPERATE_SHADER_OBJECTS)
+ emit_ext(glsl_strbufs, "EXT_separate_shader_objects", "require");
+
if ((ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ||
ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)) {
if (ctx->cfg->glsl_version < 320)
@@ -5634,9 +6040,15 @@ static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *g
}
+ if (ctx->shader_req_bits & SHADER_REQ_TEXTURE_SHADOW_LOD)
+ emit_ext(glsl_strbufs, "EXT_texture_shadow_lod", "require");
+
if (ctx->shader_req_bits & SHADER_REQ_LODQ)
emit_ext(glsl_strbufs, "EXT_texture_query_lod", "require");
+ if (ctx->shader_req_bits & SHADER_REQ_SHADER_NOPERSPECTIVE_INTERPOLATION)
+ emit_ext(glsl_strbufs, "NV_shader_noperspective_interpolation", "require");
+
emit_hdr(glsl_strbufs, "precision highp float;\n");
emit_hdr(glsl_strbufs, "precision highp int;\n");
} else {
@@ -5663,6 +6075,9 @@ static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *g
if (ctx->shader_req_bits & SHADER_REQ_SEPERATE_SHADER_OBJECTS)
emit_ext(glsl_strbufs, "ARB_separate_shader_objects", "require");
+ if (ctx->shader_req_bits & SHADER_REQ_EXPLICIT_ATTRIB_LOCATION)
+ emit_ext(glsl_strbufs, "ARB_explicit_attrib_location", "require");
+
if (ctx->shader_req_bits & SHADER_REQ_ARRAYS_OF_ARRAYS)
emit_ext(glsl_strbufs, "ARB_arrays_of_arrays", "require");
@@ -5678,7 +6093,7 @@ static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *g
if (ctx->ubo_used_mask)
emit_ext(glsl_strbufs, "ARB_uniform_buffer_object", "require");
- if (ctx->num_cull_dist_prop || ctx->key->num_cull)
+ if (ctx->num_cull_dist_prop || ctx->key->num_in_cull || ctx->key->num_out_cull)
emit_ext(glsl_strbufs, "ARB_cull_distance", "require");
if (ctx->ssbo_used_mask)
emit_ext(glsl_strbufs, "ARB_shader_storage_buffer_object", "require");
@@ -5751,11 +6166,11 @@ const char *vrend_shader_samplertypeconv(bool use_gles, int sampler_type)
}
}
-static const char *get_interp_string(const struct vrend_shader_cfg *cfg, int interpolate, bool flatshade)
+static const char *get_interp_string(const struct vrend_shader_cfg *cfg, enum tgsi_interpolate_mode interpolate, bool flatshade)
{
switch (interpolate) {
case TGSI_INTERPOLATE_LINEAR:
- if (!cfg->use_gles)
+ if (cfg->has_nopersective)
return "noperspective ";
else
return "";
@@ -5768,11 +6183,11 @@ static const char *get_interp_string(const struct vrend_shader_cfg *cfg, int int
return "flat ";
/* fallthrough */
default:
- return NULL;
+ return "";
}
}
-static const char *get_aux_string(unsigned location)
+static const char *get_aux_string(enum tgsi_interpolate_loc location)
{
switch (location) {
case TGSI_INTERPOLATE_LOC_CENTER:
@@ -5954,6 +6369,7 @@ static void emit_image_decl(const struct dump_ctx *ctx,
const char *sname, *stc, *formatstr;
enum tgsi_return_type itype;
const char *volatile_str = image->vflag ? "volatile " : "";
+ const char *coherent_str = image->coherent ? "coherent " : "";
const char *precision = ctx->cfg->use_gles ? "highp " : "";
const char *access = "";
formatstr = get_internalformat_string(image->decl.Format, &itype);
@@ -5961,28 +6377,40 @@ static void emit_image_decl(const struct dump_ctx *ctx,
sname = tgsi_proc_to_prefix(ctx->prog_type);
stc = vrend_shader_samplertypeconv(ctx->cfg->use_gles, image->decl.Resource);
- if (!image->decl.Writable)
+
+ /* From ARB_shader_image_load_store:
+ Any image variable used for shader loads or atomic memory operations must
+ be declared with a format layout qualifier matching the format of its
+ associated image unit, ... Otherwise, the access is considered to
+ involve a format mismatch, ... Image variables used exclusively for
+ image stores need not include a format layout qualifier, but any declared
+ qualifier must match the image unit format to avoid a format mismatch. */
+ bool require_format_specifer = true;
+ if (!image->decl.Writable) {
access = "readonly ";
- else if (!image->decl.Format ||
+ } else if (!image->decl.Format ||
(ctx->cfg->use_gles &&
(image->decl.Format != PIPE_FORMAT_R32_FLOAT) &&
(image->decl.Format != PIPE_FORMAT_R32_SINT) &&
- (image->decl.Format != PIPE_FORMAT_R32_UINT)))
+ (image->decl.Format != PIPE_FORMAT_R32_UINT))) {
access = "writeonly ";
+ require_format_specifer = formatstr[0] != '\0';
+ }
if (ctx->cfg->use_gles) { /* TODO: enable on OpenGL 4.2 and up also */
emit_hdrf(glsl_strbufs, "layout(binding=%d%s%s) ",
i, formatstr[0] != '\0' ? ", " : ", rgba32f", formatstr);
- } else if (formatstr[0] != '\0') {
- emit_hdrf(glsl_strbufs, "layout(%s) ", formatstr);
+ } else if (require_format_specifer) {
+ emit_hdrf(glsl_strbufs, "layout(%s) ",
+ formatstr[0] != '\0' ? formatstr : "rgba32f");
}
if (range)
- emit_hdrf(glsl_strbufs, "%s%suniform %s%cimage%s %simg%d[%d];\n",
- access, volatile_str, precision, ptc, stc, sname, i, range);
+ emit_hdrf(glsl_strbufs, "%s%s%suniform %s%cimage%s %simg%d[%d];\n",
+ access, volatile_str, coherent_str, precision, ptc, stc, sname, i, range);
else
- emit_hdrf(glsl_strbufs, "%s%suniform %s%cimage%s %simg%d;\n",
- access, volatile_str, precision, ptc, stc, sname, i);
+ emit_hdrf(glsl_strbufs, "%s%s%suniform %s%cimage%s %simg%d;\n",
+ access, volatile_str, coherent_str, precision, ptc, stc, sname, i);
}
static int emit_ios_common(const struct dump_ctx *ctx,
@@ -5994,9 +6422,18 @@ static int emit_ios_common(const struct dump_ctx *ctx,
int glsl_ver_required = ctx->glsl_ver_required;
for (i = 0; i < ctx->num_temp_ranges; i++) {
- emit_hdrf(glsl_strbufs, "vec4 temp%d[%d];\n", ctx->temp_ranges[i].first, ctx->temp_ranges[i].last - ctx->temp_ranges[i].first + 1);
+ const char *precise = ctx->temp_ranges[i].precise_result ? "precise" : "";
+ if (ctx->temp_ranges[i].array_id > 0) {
+ emit_hdrf(glsl_strbufs, "%s vec4 temp%d[%d];\n", precise, ctx->temp_ranges[i].first,
+ ctx->temp_ranges[i].last - ctx->temp_ranges[i].first + 1);
+ } else {
+ emit_hdrf(glsl_strbufs, "%s vec4 temp%d;\n", precise, ctx->temp_ranges[i].first);
+ }
}
+ if (ctx->require_dummy_value)
+ emit_hdr(glsl_strbufs, "vec4 dummy_value = vec4(0.0, 0.0, 0.0, 0.0);\n");
+
if (ctx->write_mul_utemp) {
emit_hdr(glsl_strbufs, "uvec4 mul_utemp;\n");
emit_hdr(glsl_strbufs, "uvec4 umul_temp;\n");
@@ -6041,14 +6478,12 @@ static int emit_ios_common(const struct dump_ctx *ctx,
}
}
- unsigned n_samplers = 0;
if (ctx->info.indirect_files & (1 << TGSI_FILE_SAMPLER)) {
for (i = 0; i < ctx->num_sampler_arrays; i++) {
uint32_t first = ctx->sampler_arrays[i].first;
uint32_t range = ctx->sampler_arrays[i].array_size;
emit_sampler_decl(ctx, glsl_strbufs, shadow_samp_mask, first, range, ctx->samplers + first);
- n_samplers += range;
}
} else {
uint nsamp = util_last_bit(ctx->samplers_used);
@@ -6058,12 +6493,12 @@ static int emit_ios_common(const struct dump_ctx *ctx,
continue;
emit_sampler_decl(ctx, glsl_strbufs, shadow_samp_mask, i, 0, ctx->samplers + i);
- ++n_samplers;
}
}
if (ctx->cfg->use_gles && ctx->gles_use_tex_query_level)
- emit_hdrf(glsl_strbufs, "uniform int %s_texlod[%d];\n", tgsi_proc_to_prefix(ctx->info.processor), n_samplers);
+ emit_hdrf(glsl_strbufs, "uniform int %s_texlod[%d];\n", tgsi_proc_to_prefix(ctx->info.processor),
+ util_bitcount(ctx->samplers_used));
if (ctx->info.indirect_files & (1 << TGSI_FILE_IMAGE)) {
for (i = 0; i < ctx->num_image_arrays; i++) {
@@ -6127,7 +6562,7 @@ static void emit_ios_streamout(const struct dump_ctx *ctx,
const struct vrend_shader_io *output = get_io_slot(&ctx->outputs[0], ctx->num_outputs,
ctx->so->output[i].register_index);
if (ctx->so->output[i].need_temp || output->name == TGSI_SEMANTIC_CLIPDIST ||
- output->glsl_predefined_no_emit) {
+ ctx->prog_type == TGSI_PROCESSOR_GEOMETRY || output->glsl_predefined_no_emit) {
if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL)
emit_hdrf(glsl_strbufs, "out %s tfout%d[];\n", outtype, i);
@@ -6139,17 +6574,17 @@ static void emit_ios_streamout(const struct dump_ctx *ctx,
}
}
-static inline void emit_winsys_correction(struct vrend_glsl_strbufs *glsl_strbufs)
-{
- emit_hdr(glsl_strbufs, "uniform float winsys_adjust_y;\n");
-}
-
static void emit_ios_indirect_generics_output(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
const char *postfix)
{
if (ctx->generic_ios.output_range.used) {
- int size = ctx->generic_ios.output_range.io.last - ctx->generic_ios.output_range.io.sid + 1;
+ int size = ctx->generic_ios.output_range.io.last -
+ ctx->generic_ios.output_range.io.first + 1;
+ char array_handle[32] = "";
+ if (size > 1)
+ snprintf(array_handle, sizeof(array_handle), "[%d]", size);
+
if (prefer_generic_io_block(ctx, io_out)) {
char blockname[64];
const char *stage_prefix = get_stage_output_name_prefix(ctx->prog_type);
@@ -6158,13 +6593,13 @@ static void emit_ios_indirect_generics_output(const struct dump_ctx *ctx,
char blockvarame[64];
get_blockvarname(blockvarame, stage_prefix, &ctx->generic_ios.output_range.io, postfix);
- emit_hdrf(glsl_strbufs, "out %s {\n vec4 %s[%d]; \n} %s;\n", blockname,
- ctx->generic_ios.output_range.io.glsl_name, size, blockvarame);
+ emit_hdrf(glsl_strbufs, "out %s {\n vec4 %s%s; \n} %s;\n", blockname,
+ ctx->generic_ios.output_range.io.glsl_name, array_handle, blockvarame);
} else
- emit_hdrf(glsl_strbufs, "out vec4 %s%s[%d];\n",
+ emit_hdrf(glsl_strbufs, "out vec4 %s%s%s;\n",
ctx->generic_ios.output_range.io.glsl_name,
postfix,
- size);
+ array_handle);
}
}
@@ -6173,13 +6608,13 @@ static void emit_ios_indirect_generics_input(const struct dump_ctx *ctx,
const char *postfix)
{
if (ctx->generic_ios.input_range.used) {
- int size = ctx->generic_ios.input_range.io.last - ctx->generic_ios.input_range.io.sid + 1;
+ int size = ctx->generic_ios.input_range.io.last -
+ ctx->generic_ios.input_range.io.first + 1;
+ char array_handle[32] = "";
+ if (size > 1)
+ snprintf(array_handle, sizeof(array_handle), "[%d]", size);
+
assert(size < 256 && size >= 0);
- if (size < ctx->key->input.num_indirect_generic) {
- VREND_DEBUG(dbg_shader, NULL, "WARNING: shader key indicates less indirect inputs"
- " (%d) then are actually used (%d)\n",
- ctx->key->input.num_indirect_generic, size);
- }
if (prefer_generic_io_block(ctx, io_in)) {
@@ -6191,14 +6626,14 @@ static void emit_ios_indirect_generics_input(const struct dump_ctx *ctx,
get_blockvarname(blockvarame, stage_prefix, &ctx->generic_ios.input_range.io,
postfix);
- emit_hdrf(glsl_strbufs, "in %s {\n vec4 %s[%d]; \n} %s;\n",
+ emit_hdrf(glsl_strbufs, "in %s {\n vec4 %s%s; \n} %s;\n",
blockname, ctx->generic_ios.input_range.io.glsl_name,
- size, blockvarame);
+ array_handle, blockvarame);
} else
- emit_hdrf(glsl_strbufs, "in vec4 %s%s[%d];\n",
+ emit_hdrf(glsl_strbufs, "in vec4 %s%s%s;\n",
ctx->generic_ios.input_range.io.glsl_name,
postfix,
- size);
+ array_handle);
}
}
@@ -6206,36 +6641,31 @@ static void
emit_ios_generic(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
enum io_type iot, const char *prefix,
const struct vrend_shader_io *io, const char *inout,
const char *postfix)
{
- const char *atype[3][4] = {
- {"float", " vec2", " vec3", " vec4"},
- {" int", "ivec2", "ivec3", "ivec4"},
- {" uint", "uvec2", "uvec3", "uvec4"},
+ const char *atype[3] = {
+ " vec4", "ivec4", "uvec4"
};
- const char **type = atype[io->type];
- const char *t = type[3];
+
+ const char *t = atype[io->type];
char layout[128] = "";
- if (io->layout_location > 0) {
- /* we need to define a layout here because interleaved arrays might be emited */
- if (io->swizzle_offset)
- snprintf(layout, sizeof(layout), "layout(location = %d, component = %d)\n",
- io->layout_location - 1, io->swizzle_offset);
- else
- snprintf(layout, sizeof(layout), "layout(location = %d)\n", io->layout_location - 1);
- }
+ if (io->overlapping_array)
+ return;
- if (io->usage_mask != 0xf && io->name == TGSI_SEMANTIC_GENERIC)
- t = type[io->num_components - 1];
+ if (ctx->separable_program && io->name == TGSI_SEMANTIC_GENERIC &&
+ !(ctx->prog_type == TGSI_PROCESSOR_FRAGMENT && strcmp(inout, "in") != 0)) {
+ snprintf(layout, sizeof(layout), "layout(location = %d) ", 31 - io->sid);
+ }
if (io->first == io->last) {
emit_hdr(glsl_strbufs, layout);
/* ugly leave spaces to patch interp in later */
- emit_hdrf(glsl_strbufs, "%s%s\n%s %s %s %s%s;\n",
+ emit_hdrf(glsl_strbufs, "%s%s %s %s %s %s%s;\n",
io->precise ? "precise" : "",
io->invariant ? "invariant" : "",
prefix,
@@ -6247,13 +6677,21 @@ emit_ios_generic(const struct dump_ctx *ctx,
if (io->name == TGSI_SEMANTIC_GENERIC) {
assert(io->sid < 64);
if (iot == io_in) {
- generic_ios->inputs_emitted_mask |= 1ull << io->sid;
+ generic_ios->match.inputs_emitted_mask |= 1ull << io->sid;
} else {
- generic_ios->outputs_emitted_mask |= 1ull << io->sid;
+ generic_ios->match.outputs_emitted_mask |= 1ull << io->sid;
+ }
+ } else if (io->name == TGSI_SEMANTIC_TEXCOORD) {
+ assert(io->sid < 8);
+ if (iot == io_in) {
+ texcoord_ios->match.inputs_emitted_mask |= 1ull << io->sid;
+ } else {
+ texcoord_ios->match.outputs_emitted_mask |= 1ull << io->sid;
}
}
} else {
+ int array_size = io->last - io->first + 1;
if (prefer_generic_io_block(ctx, iot)) {
const char *stage_prefix = iot == io_in ? get_stage_input_name_prefix(ctx, ctx->prog_type):
get_stage_output_name_prefix(ctx->prog_type);
@@ -6272,7 +6710,7 @@ emit_ios_generic(const struct dump_ctx *ctx,
prefix,
t,
io->glsl_name,
- io->last - io->first +1,
+ array_size,
blockvarame);
} else {
emit_hdr(glsl_strbufs, layout);
@@ -6284,18 +6722,75 @@ emit_ios_generic(const struct dump_ctx *ctx,
t,
io->glsl_name,
postfix,
- io->last - io->first +1);
+ array_size);
+ uint64_t mask = ((1ull << array_size) - 1) << io->sid;
+ if (io->name == TGSI_SEMANTIC_GENERIC) {
+ assert(io->sid + array_size < 64);
+ if (iot == io_in) {
+ generic_ios->match.inputs_emitted_mask |= mask;
+ } else {
+ generic_ios->match.outputs_emitted_mask |= mask;
+ }
+ } else if (io->name == TGSI_SEMANTIC_TEXCOORD) {
+ assert(io->sid + array_size < 8);
+ if (iot == io_in) {
+ texcoord_ios->match.inputs_emitted_mask |= mask;
+ } else {
+ texcoord_ios->match.outputs_emitted_mask |= mask;
+ }
+ }
}
}
}
typedef bool (*can_emit_generic_callback)(const struct vrend_shader_io *io);
+/* Front and back color of the same semantic ID must have the same interpolator
+ * specifiers, and it may happen, that one or the other shader doesn't define
+ * both, front and back color, so always compare these two as COLOR. */
+static inline
+enum tgsi_semantic get_semantic_to_compare(enum tgsi_semantic name)
+{
+ switch (name) {
+ case TGSI_SEMANTIC_COLOR:
+ case TGSI_SEMANTIC_BCOLOR:
+ return TGSI_SEMANTIC_COLOR;
+ default:
+ return name;
+ }
+}
+
+static const char *
+get_interpolator_prefix(struct vrend_strbuf *buf, uint32_t *num_interps,
+ const struct vrend_shader_cfg *cfg, const struct vrend_shader_io *io,
+ const struct vrend_fs_shader_info *fs_info, bool flatshade)
+{
+ if (io->name == TGSI_SEMANTIC_GENERIC ||
+ io->name == TGSI_SEMANTIC_TEXCOORD ||
+ io->name == TGSI_SEMANTIC_COLOR ||
+ io->name == TGSI_SEMANTIC_BCOLOR) {
+ (*num_interps)++;
+ enum tgsi_semantic name = get_semantic_to_compare(io->name);
+
+ for (int j = 0; j < fs_info->num_interps; ++j) {
+ if (get_semantic_to_compare(fs_info->interpinfo[j].semantic_name) == name &&
+ fs_info->interpinfo[j].semantic_index == io->sid) {
+ strbuf_fmt(buf, "%s %s",
+ get_interp_string(cfg, fs_info->interpinfo[j].interpolate, flatshade),
+ get_aux_string(fs_info->interpinfo[j].location));
+ return buf->buf;
+ }
+ }
+ }
+ return "";
+}
+
static void
emit_ios_generic_outputs(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
uint8_t front_back_color_emitted_flags[],
bool *force_color_two_side,
uint32_t *num_interps,
@@ -6305,6 +6800,10 @@ emit_ios_generic_outputs(const struct dump_ctx *ctx,
uint64_t fc_emitted = 0;
uint64_t bc_emitted = 0;
+ char buffer[64];
+ struct vrend_strbuf buf;
+ strbuf_alloc_fixed(&buf, buffer, sizeof(buffer));
+
for (i = 0; i < ctx->num_outputs; i++) {
if (!ctx->outputs[i].glsl_predefined_no_emit) {
@@ -6312,14 +6811,10 @@ emit_ios_generic_outputs(const struct dump_ctx *ctx,
if (!can_emit_generic(&ctx->outputs[i]))
continue;
- const char *prefix = "";
- if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC ||
- ctx->outputs[i].name == TGSI_SEMANTIC_COLOR ||
- ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR) {
- (*num_interps)++;
- /* ugly leave spaces to patch interp in later */
- prefix = INTERP_PREFIX;
- }
+ /* It is save to use buf here even though it is declared outside the loop, because
+ * when written it is reset, and the content is used within the iteration */
+ const char *prefix = get_interpolator_prefix(&buf, num_interps, ctx->cfg, &ctx->outputs[i],
+ &ctx->key->fs_info, ctx->key->flatshade);
if (ctx->outputs[i].name == TGSI_SEMANTIC_COLOR) {
front_back_color_emitted_flags[ctx->outputs[i].sid] |= FRONT_COLOR_EMITTED;
@@ -6331,7 +6826,7 @@ emit_ios_generic_outputs(const struct dump_ctx *ctx,
bc_emitted |= 1ull << ctx->outputs[i].sid;
}
- emit_ios_generic(ctx, glsl_strbufs, generic_ios,
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios,
io_out, prefix, &ctx->outputs[i],
ctx->outputs[i].fbfetch_used ? "inout" : "out", "");
} else if (ctx->outputs[i].invariant || ctx->outputs[i].precise) {
@@ -6349,31 +6844,28 @@ emit_ios_generic_outputs(const struct dump_ctx *ctx,
*force_color_two_side = 1;
}
-static void
+static uint64_t
emit_ios_patch(struct vrend_glsl_strbufs *glsl_strbufs,
const char *prefix, const struct vrend_shader_io *io,
- const char *inout, int size)
+ const char *inout, int size, bool emit_location)
{
- const char type[4][6] = {"float", " vec2", " vec3", " vec4"};
- const char *t = " vec4";
-
- if (io->layout_location > 0) {
- /* we need to define a layout here because interleaved arrays might be emited */
- if (io->swizzle_offset)
- emit_hdrf(glsl_strbufs, "layout(location = %d, component = %d)\n",
- io->layout_location - 1, io->swizzle_offset);
- else
- emit_hdrf(glsl_strbufs, "layout(location = %d)\n", io->layout_location - 1);
- }
+ uint64_t emitted_patches = 0;
- if (io->usage_mask != 0xf)
- t = type[io->num_components - 1];
+ /* We start these locations from 32 and proceed downwards, to avoid
+ * conflicting with generic IO locations. */
+ if (emit_location)
+ emit_hdrf(glsl_strbufs, "layout(location = %d) ", io->sid);
- if (io->last == io->first)
- emit_hdrf(glsl_strbufs, "%s %s %s %s;\n", prefix, inout, t, io->glsl_name);
- else
- emit_hdrf(glsl_strbufs, "%s %s %s %s[%d];\n", prefix, inout, t,
+ if (io->last == io->first) {
+ emit_hdrf(glsl_strbufs, "%s %s vec4 %s;\n", prefix, inout, io->glsl_name);
+ emitted_patches |= 1ul << io->sid;
+ } else {
+ emit_hdrf(glsl_strbufs, "%s %s vec4 %s[%d];\n", prefix, inout,
io->glsl_name, size);
+ uint64_t mask = (1ul << size) - 1;
+ emitted_patches |= mask << io->sid;
+ }
+ return emitted_patches;
}
static bool
@@ -6385,6 +6877,7 @@ can_emit_generic_default(UNUSED const struct vrend_shader_io *io)
static void emit_ios_vs(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
uint32_t *num_interps,
uint8_t front_back_color_emitted_flags[],
bool *force_color_two_side)
@@ -6407,12 +6900,24 @@ static void emit_ios_vs(const struct dump_ctx *ctx,
emit_ios_indirect_generics_output(ctx, glsl_strbufs, "");
- emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags,
- force_color_two_side, num_interps, can_emit_generic_default);
+ emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, texcoord_ios,
+ front_back_color_emitted_flags, force_color_two_side,
+ num_interps, can_emit_generic_default);
if (ctx->key->color_two_side || ctx->force_color_two_side) {
bool fcolor_emitted, bcolor_emitted;
+ enum tgsi_interpolate_mode interpolators[2] = {TGSI_INTERPOLATE_COLOR, TGSI_INTERPOLATE_COLOR};
+ enum tgsi_interpolate_loc interp_loc[2] = { TGSI_INTERPOLATE_LOC_CENTER, TGSI_INTERPOLATE_LOC_CENTER};
+ for (int k = 0; k < ctx->key->fs_info.num_interps; k++) {
+ const struct vrend_interp_info *interp_info = &ctx->key->fs_info.interpinfo[k];
+ if (interp_info->semantic_name == TGSI_SEMANTIC_COLOR ||
+ interp_info->semantic_name == TGSI_SEMANTIC_BCOLOR) {
+ interpolators[interp_info->semantic_index] = interp_info->interpolate;
+ interp_loc[interp_info->semantic_index] = interp_info->location;
+ }
+ }
+
for (i = 0; i < ctx->num_outputs; i++) {
if (ctx->outputs[i].sid >= 2)
continue;
@@ -6423,11 +6928,17 @@ static void emit_ios_vs(const struct dump_ctx *ctx,
bcolor_emitted = front_back_color_emitted_flags[ctx->outputs[i].sid] & BACK_COLOR_EMITTED;
if (fcolor_emitted && !bcolor_emitted) {
- emit_hdrf(glsl_strbufs, "%sout vec4 ex_bc%d;\n", INTERP_PREFIX, ctx->outputs[i].sid);
+ emit_hdrf(glsl_strbufs, "%s %s out vec4 vso_bc%d;\n",
+ get_interp_string(ctx->cfg, interpolators[ctx->outputs[i].sid], ctx->key->flatshade),
+ get_aux_string(interp_loc[ctx->outputs[i].sid]),
+ ctx->outputs[i].sid);
front_back_color_emitted_flags[ctx->outputs[i].sid] |= BACK_COLOR_EMITTED;
}
if (bcolor_emitted && !fcolor_emitted) {
- emit_hdrf(glsl_strbufs, "%sout vec4 ex_c%d;\n", INTERP_PREFIX, ctx->outputs[i].sid);
+ emit_hdrf(glsl_strbufs, "%s %s out vec4 vso_c%d;\n",
+ get_interp_string(ctx->cfg, interpolators[ctx->outputs[i].sid], ctx->key->flatshade),
+ get_aux_string(interp_loc[ctx->outputs[i].sid]),
+ ctx->outputs[i].sid);
front_back_color_emitted_flags[ctx->outputs[i].sid] |= FRONT_COLOR_EMITTED;
}
}
@@ -6436,36 +6947,37 @@ static void emit_ios_vs(const struct dump_ctx *ctx,
if (ctx->key->vs.fog_fixup_mask)
emit_fog_fixup_hdr(ctx, glsl_strbufs);
- emit_winsys_correction(glsl_strbufs);
-
- if (ctx->has_clipvertex) {
+ if (ctx->has_clipvertex && ctx->is_last_vertex_stage) {
emit_hdrf(glsl_strbufs, "%svec4 clipv_tmp;\n", ctx->has_clipvertex_so ? "out " : "");
}
- if (ctx->num_out_clip_dist || ctx->key->clip_plane_enable) {
- bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
- int num_clip_dists = ctx->num_out_clip_dist ? ctx->num_out_clip_dist : 8;
- int num_cull_dists = 0;
- char cull_buf[64] = "";
- char clip_buf[64] = "";
- if (has_prop) {
- num_clip_dists = ctx->num_clip_dist_prop;
- num_cull_dists = ctx->num_cull_dist_prop;
- if (num_clip_dists)
- snprintf(clip_buf, 64, "out float gl_ClipDistance[%d];\n", num_clip_dists);
- if (num_cull_dists)
- snprintf(cull_buf, 64, "out float gl_CullDistance[%d];\n", num_cull_dists);
- } else
+
+ char cull_buf[64] = "";
+ char clip_buf[64] = "";
+
+ if (ctx->cfg->has_cull_distance && (ctx->num_out_clip_dist || ctx->is_last_vertex_stage)) {
+ int num_clip_dists = ctx->num_clip_dist_prop ? ctx->num_clip_dist_prop : 0;
+ int num_cull_dists = ctx->num_cull_dist_prop ? ctx->num_cull_dist_prop : 0;
+
+ int num_clip_cull = num_clip_dists + num_cull_dists;
+ if (ctx->num_out_clip_dist && !num_clip_cull)
+ num_clip_dists = ctx->num_out_clip_dist;
+
+ if (num_clip_dists)
snprintf(clip_buf, 64, "out float gl_ClipDistance[%d];\n", num_clip_dists);
- if (ctx->key->clip_plane_enable) {
- emit_hdr(glsl_strbufs, "uniform vec4 clipp[8];\n");
- }
+ if (num_cull_dists)
+ snprintf(cull_buf, 64, "out float gl_CullDistance[%d];\n", num_cull_dists);
- if (ctx->key->gs_present || ctx->key->tes_present) {
- emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position;\n %s%s};\n", clip_buf, cull_buf);
- } else {
+ if (ctx->is_last_vertex_stage) {
emit_hdrf(glsl_strbufs, "%s%s", clip_buf, cull_buf);
}
- emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
+
+ emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
+ }
+
+ const char *psize_buf = ctx->has_pointsize_output ? "out float gl_PointSize;\n" : "";
+
+ if (!ctx->is_last_vertex_stage && ctx->key->use_pervertex_in) {
+ emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position;\n %s%s%s};\n", clip_buf, cull_buf, psize_buf);
}
}
@@ -6486,21 +6998,21 @@ static const char *get_depth_layout(int depth_layout)
static void emit_ios_fs(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
- uint32_t *num_interps,
- bool *winsys_adjust_y_emitted
+ struct vrend_texcoord_ios *texcoord_ios,
+ uint32_t *num_interps
)
{
uint32_t i;
if (fs_emit_layout(ctx)) {
- bool upper_left = !(ctx->fs_coord_origin ^ ctx->key->fs.invert_origin);
- char comma = (upper_left && ctx->fs_pixel_center) ? ',' : ' ';
+ bool upper_left = ctx->fs_lower_left_origin == ctx->key->fs.lower_left_origin;
+ char comma = (upper_left && ctx->fs_integer_pixel_center) ? ',' : ' ';
if (!ctx->cfg->use_gles)
emit_hdrf(glsl_strbufs, "layout(%s%c%s) in vec4 gl_FragCoord;\n",
upper_left ? "origin_upper_left" : "",
comma,
- ctx->fs_pixel_center ? "pixel_center_integer" : "");
+ ctx->fs_integer_pixel_center ? "pixel_center_integer" : "");
}
if (ctx->early_depth_stencil) {
emit_hdr(glsl_strbufs, "layout(early_fragment_tests) in;\n");
@@ -6513,6 +7025,24 @@ static void emit_ios_fs(const struct dump_ctx *ctx,
const char *prefix = "";
const char *auxprefix = "";
+ if (ctx->cfg->use_gles) {
+ if (ctx->inputs[i].name == TGSI_SEMANTIC_COLOR) {
+ if (!(ctx->key->fs.available_color_in_bits & (1 << ctx->inputs[i].sid))) {
+ emit_hdrf(glsl_strbufs, "vec4 %s = vec4(0.0, 0.0, 0.0, 0.0);\n",
+ ctx->inputs[i].glsl_name);
+ continue;
+ }
+ }
+
+ if (ctx->inputs[i].name == TGSI_SEMANTIC_BCOLOR) {
+ if (!(ctx->key->fs.available_color_in_bits & (1 << ctx->inputs[i].sid) << 2)) {
+ emit_hdrf(glsl_strbufs, "vec4 %s = vec4(0.0, 0.0, 0.0, 0.0);\n",
+ ctx->inputs[i].glsl_name);
+ continue;
+ }
+ }
+ }
+
if (ctx->inputs[i].name == TGSI_SEMANTIC_GENERIC ||
ctx->inputs[i].name == TGSI_SEMANTIC_COLOR ||
ctx->inputs[i].name == TGSI_SEMANTIC_BCOLOR) {
@@ -6525,20 +7055,10 @@ static void emit_ios_fs(const struct dump_ctx *ctx,
char prefixes[64];
snprintf(prefixes, sizeof(prefixes), "%s %s", prefix, auxprefix);
- emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_in, prefixes, &ctx->inputs[i], "in", "");
- }
-
- if (ctx->cfg->use_gles && !ctx->winsys_adjust_y_emitted &&
- (ctx->key->fs.coord_replace & (1 << ctx->inputs[i].sid))) {
- *winsys_adjust_y_emitted = true;
- emit_hdr(glsl_strbufs, "uniform float winsys_adjust_y;\n");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios, io_in, prefixes, &ctx->inputs[i], "in", "");
}
}
- if (vrend_shader_needs_alpha_func(ctx->key)) {
- emit_hdr(glsl_strbufs, "uniform float alpha_ref_val;\n");
- }
-
if (ctx->key->color_two_side) {
if (ctx->color_in_mask & 1)
emit_hdr(glsl_strbufs, "vec4 realcolor0;\n");
@@ -6584,7 +7104,7 @@ static void emit_ios_fs(const struct dump_ctx *ctx,
!ctx->cfg->has_dual_src_blend)
sprintf(prefix, "layout(location = %d)", ctx->outputs[i].sid);
- emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_out, prefix, &ctx->outputs[i],
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios, io_out, prefix, &ctx->outputs[i],
ctx->outputs[i].fbfetch_used ? "inout" : "out", "");
} else if (ctx->outputs[i].invariant || ctx->outputs[i].precise) {
@@ -6603,14 +7123,14 @@ static void emit_ios_fs(const struct dump_ctx *ctx,
}
if (ctx->num_in_clip_dist) {
- if (ctx->key->num_clip) {
- emit_hdrf(glsl_strbufs, "in float gl_ClipDistance[%d];\n", ctx->key->num_clip);
- } else if (ctx->num_in_clip_dist > 4 && !ctx->key->num_cull) {
+ if (ctx->key->num_in_clip) {
+ emit_hdrf(glsl_strbufs, "in float gl_ClipDistance[%d];\n", ctx->key->num_in_clip);
+ } else if (ctx->num_in_clip_dist > 4 && !ctx->key->num_in_cull) {
emit_hdrf(glsl_strbufs, "in float gl_ClipDistance[%d];\n", ctx->num_in_clip_dist);
}
- if (ctx->key->num_cull) {
- emit_hdrf(glsl_strbufs, "in float gl_CullDistance[%d];\n", ctx->key->num_cull);
+ if (ctx->key->num_in_cull) {
+ emit_hdrf(glsl_strbufs, "in float gl_CullDistance[%d];\n", ctx->key->num_in_cull);
}
if(ctx->fs_uses_clipdist_input)
emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
@@ -6627,13 +7147,18 @@ static void emit_ios_per_vertex_in(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
bool *has_pervertex)
{
- if (ctx->num_in_clip_dist || ctx->key->clip_plane_enable) {
+ char clip_var[64] = "";
+ char cull_var[64] = "";
+
+ if (ctx->num_in_clip_dist) {
int clip_dist, cull_dist;
- char clip_var[64] = "";
- char cull_var[64] = "";
- clip_dist = ctx->num_clip_dist_prop ? ctx->num_clip_dist_prop : ctx->key->num_clip;
- cull_dist = ctx->num_cull_dist_prop ? ctx->num_cull_dist_prop : ctx->key->num_cull;
+ clip_dist = ctx->key->num_in_clip;
+ cull_dist = ctx->key->num_in_cull;
+
+ int num_clip_cull = clip_dist + cull_dist;
+ if (ctx->num_in_clip_dist && !num_clip_cull)
+ clip_dist = ctx->num_in_clip_dist;
if (clip_dist)
snprintf(clip_var, 64, "float gl_ClipDistance[%d];\n", clip_dist);
@@ -6641,35 +7166,47 @@ static void emit_ios_per_vertex_in(const struct dump_ctx *ctx,
snprintf(cull_var, 64, "float gl_CullDistance[%d];\n", cull_dist);
(*has_pervertex) = true;
- emit_hdrf(glsl_strbufs, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s\n} gl_in[];\n", clip_var, cull_var);
+ emit_hdrf(glsl_strbufs, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s%s\n} gl_in[];\n",
+ clip_var, cull_var, ctx->has_pointsize_input ? "float gl_PointSize;\n" : "");
+
}
+
}
static void emit_ios_per_vertex_out(const struct dump_ctx *ctx,
- struct vrend_glsl_strbufs *glsl_strbufs)
+ struct vrend_glsl_strbufs *glsl_strbufs, const char *instance_var)
{
- if (ctx->num_out_clip_dist || ctx->num_cull_dist_prop) {
- if (ctx->key->output.use_pervertex) {
+ int clip_dist = ctx->num_clip_dist_prop ? ctx->num_clip_dist_prop : ctx->key->num_out_clip;
+ int cull_dist = ctx->num_cull_dist_prop ? ctx->num_cull_dist_prop : ctx->key->num_out_cull;
+ int num_clip_cull = clip_dist + cull_dist;
- int clip_dist = ctx->num_clip_dist_prop ? ctx->num_clip_dist_prop : ctx->key->num_clip;
- int cull_dist = ctx->num_cull_dist_prop ? ctx->num_cull_dist_prop : ctx->key->num_cull;
+ if (ctx->num_out_clip_dist && !num_clip_cull)
+ clip_dist = ctx->num_out_clip_dist;
- char clip_var[64] = "", cull_var[64] = "";
- if (cull_dist)
- snprintf(cull_var, 64, "float gl_CullDistance[%d];\n", cull_dist);
+ if (ctx->key->use_pervertex_in) {
+ char clip_var[64] = "", cull_var[64] = "";
+ if (cull_dist)
+ snprintf(cull_var, 64, "float gl_CullDistance[%d];\n", cull_dist);
- if (clip_dist)
- snprintf(clip_var, 64, "float gl_ClipDistance[%d];\n", clip_dist);
- emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position;\n %s%s\n} gl_out[];\n", clip_var, cull_var);
- }
- emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
+ if (clip_dist)
+ snprintf(clip_var, 64, "float gl_ClipDistance[%d];\n", clip_dist);
+
+ emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position; \n %s%s%s\n} %s;\n",
+ clip_var, cull_var,
+ ctx->has_pointsize_output ? "float gl_PointSize;\n" : "",
+ instance_var);
}
+
+ if (clip_dist + cull_dist > 0)
+ emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
+
}
static void emit_ios_geom(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
uint8_t front_back_color_emitted_flags[],
uint32_t *num_interps,
bool *has_pervertex,
@@ -6690,7 +7227,7 @@ static void emit_ios_geom(const struct dump_ctx *ctx,
if (!ctx->inputs[i].glsl_predefined_no_emit) {
char postfix[64];
snprintf(postfix, sizeof(postfix), "[%d]", gs_input_prim_to_size(ctx->gs_in_prim));
- emit_ios_generic(ctx, glsl_strbufs, generic_ios,
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios,
io_in, "", &ctx->inputs[i], "in", postfix);
}
}
@@ -6705,8 +7242,6 @@ static void emit_ios_geom(const struct dump_ctx *ctx,
ctx->outputs[i].name == TGSI_SEMANTIC_COLOR ||
ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR) {
(*num_interps)++;
- /* ugly leave spaces to patch interp in later */
- prefix = INTERP_PREFIX;
}
emit_hdrf(glsl_strbufs, "layout (stream = %d) %s%s%sout vec4 %s;\n", ctx->outputs[i].stream, prefix,
@@ -6716,28 +7251,33 @@ static void emit_ios_geom(const struct dump_ctx *ctx,
}
}
- emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags,
- force_color_two_side, num_interps, can_emit_generic_geom);
+ emit_ios_indirect_generics_output(ctx, glsl_strbufs, "");
- emit_winsys_correction(glsl_strbufs);
+ emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, texcoord_ios,
+ front_back_color_emitted_flags, force_color_two_side,
+ num_interps, can_emit_generic_geom);
emit_ios_per_vertex_in(ctx, glsl_strbufs, has_pervertex);
+ if (ctx->has_clipvertex) {
+ emit_hdrf(glsl_strbufs, "%svec4 clipv_tmp;\n", ctx->has_clipvertex_so ? "out " : "");
+ }
+
if (ctx->num_out_clip_dist) {
- bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
- int num_clip_dists = ctx->num_out_clip_dist ? ctx->num_out_clip_dist : 8;
- int num_cull_dists = 0;
+ bool has_clip_or_cull_prop = ctx->num_clip_dist_prop + ctx->num_cull_dist_prop > 0;
+
+ int num_clip_dists = has_clip_or_cull_prop ? ctx->num_clip_dist_prop :
+ (ctx->num_out_clip_dist ? ctx->num_out_clip_dist : 8);
+ int num_cull_dists = has_clip_or_cull_prop ? ctx->num_cull_dist_prop : 0;
+
char cull_buf[64] = "";
char clip_buf[64] = "";
- if (has_prop) {
- num_clip_dists = ctx->num_clip_dist_prop;
- num_cull_dists = ctx->num_cull_dist_prop;
- if (num_clip_dists)
- snprintf(clip_buf, 64, "out float gl_ClipDistance[%d];\n", num_clip_dists);
- if (num_cull_dists)
- snprintf(cull_buf, 64, "out float gl_CullDistance[%d];\n", num_cull_dists);
- } else
+
+ if (num_clip_dists)
snprintf(clip_buf, 64, "out float gl_ClipDistance[%d];\n", num_clip_dists);
+ if (num_cull_dists)
+ snprintf(cull_buf, 64, "out float gl_CullDistance[%d];\n", num_cull_dists);
+
emit_hdrf(glsl_strbufs, "%s%s\n", clip_buf, cull_buf);
emit_hdrf(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
}
@@ -6747,6 +7287,8 @@ static void emit_ios_geom(const struct dump_ctx *ctx,
static void emit_ios_tcs(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
+ uint64_t *emitted_out_patches_mask,
bool *has_pervertex)
{
uint32_t i;
@@ -6755,28 +7297,33 @@ static void emit_ios_tcs(const struct dump_ctx *ctx,
for (i = 0; i < ctx->num_inputs; i++) {
if (!ctx->inputs[i].glsl_predefined_no_emit) {
- if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH)
- emit_ios_patch(glsl_strbufs, "", &ctx->inputs[i], "in", ctx->inputs[i].last - ctx->inputs[i].first + 1);
- else
- emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_in, "", &ctx->inputs[i], "in", "[]");
+ if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH) {
+ emit_ios_patch(glsl_strbufs, "", &ctx->inputs[i], "in",
+ ctx->inputs[i].last - ctx->inputs[i].first + 1,
+ ctx->separable_program);
+ } else
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios, io_in, "", &ctx->inputs[i], "in", "[]");
}
}
- emit_hdrf(glsl_strbufs, "layout(vertices = %d) out;\n", ctx->tcs_vertices_out);
+ uint64_t emitted_patches = 0;
- emit_ios_indirect_generics_output(ctx, glsl_strbufs, "[]");
+ emit_hdrf(glsl_strbufs, "layout(vertices = %d) out;\n", ctx->tcs_vertices_out);
if (ctx->patch_ios.output_range.used)
- emit_ios_patch(glsl_strbufs, "patch", &ctx->patch_ios.output_range.io, "out",
- ctx->patch_ios.output_range.io.last - ctx->patch_ios.output_range.io.sid + 1);
+ emitted_patches |= emit_ios_patch(glsl_strbufs, "patch", &ctx->patch_ios.output_range.io, "out",
+ ctx->patch_ios.output_range.io.last - ctx->patch_ios.output_range.io.first + 1,
+ ctx->separable_program);
for (i = 0; i < ctx->num_outputs; i++) {
if (!ctx->outputs[i].glsl_predefined_no_emit) {
if (ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) {
- emit_ios_patch(glsl_strbufs, "patch", &ctx->outputs[i], "out",
- ctx->outputs[i].last - ctx->outputs[i].first + 1);
+
+ emitted_patches |= emit_ios_patch(glsl_strbufs, "patch", &ctx->outputs[i], "out",
+ ctx->outputs[i].last - ctx->outputs[i].first + 1,
+ ctx->separable_program);
} else
- emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_out, "", &ctx->outputs[i], "out", "[]");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios, io_out, "", &ctx->outputs[i], "out", "[]");
} else if (ctx->outputs[i].invariant || ctx->outputs[i].precise) {
emit_hdrf(glsl_strbufs, "%s%s;\n",
ctx->outputs[i].precise ? "precise " :
@@ -6786,12 +7333,15 @@ static void emit_ios_tcs(const struct dump_ctx *ctx,
}
emit_ios_per_vertex_in(ctx, glsl_strbufs, has_pervertex);
- emit_ios_per_vertex_out(ctx, glsl_strbufs);
+ emit_ios_per_vertex_out(ctx, glsl_strbufs, " gl_out[]");
+
+ *emitted_out_patches_mask = emitted_patches;
}
static void emit_ios_tes(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
uint8_t front_back_color_emitted_flags[],
uint32_t *num_interps,
bool *has_pervertex,
@@ -6801,7 +7351,9 @@ static void emit_ios_tes(const struct dump_ctx *ctx,
if (ctx->patch_ios.input_range.used)
emit_ios_patch(glsl_strbufs, "patch", &ctx->patch_ios.input_range.io, "in",
- ctx->patch_ios.input_range.io.last - ctx->patch_ios.input_range.io.sid + 1);
+ ctx->patch_ios.input_range.io.last -
+ ctx->patch_ios.input_range.io.first + 1,
+ ctx->separable_program);
if (generic_ios->input_range.used)
emit_ios_indirect_generics_input(ctx, glsl_strbufs, "[]");
@@ -6810,9 +7362,10 @@ static void emit_ios_tes(const struct dump_ctx *ctx,
if (!ctx->inputs[i].glsl_predefined_no_emit) {
if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH)
emit_ios_patch(glsl_strbufs, "patch", &ctx->inputs[i], "in",
- ctx->inputs[i].last - ctx->inputs[i].first + 1);
+ ctx->inputs[i].last - ctx->inputs[i].first + 1,
+ ctx->separable_program);
else
- emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_in, "", &ctx->inputs[i], "in", "[]");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, texcoord_ios, io_in, "", &ctx->inputs[i], "in", "[]");
}
}
@@ -6822,13 +7375,19 @@ static void emit_ios_tes(const struct dump_ctx *ctx,
ctx->tes_vertex_order ? "cw" : "ccw",
ctx->tes_point_mode ? ", point_mode" : "");
- emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags,
- force_color_two_side, num_interps, can_emit_generic_default);
+ emit_ios_indirect_generics_output(ctx, glsl_strbufs, "");
- emit_winsys_correction(glsl_strbufs);
+ emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, texcoord_ios,
+ front_back_color_emitted_flags, force_color_two_side,
+ num_interps, can_emit_generic_default);
emit_ios_per_vertex_in(ctx, glsl_strbufs, has_pervertex);
- emit_ios_per_vertex_out(ctx, glsl_strbufs);
+ emit_ios_per_vertex_out(ctx, glsl_strbufs, "");
+
+ if (ctx->has_clipvertex && !ctx->key->gs_present) {
+ emit_hdrf(glsl_strbufs, "%svec4 clipv_tmp;\n", ctx->has_clipvertex_so ? "out " : "");
+ }
+
}
@@ -6844,14 +7403,59 @@ static void emit_ios_cs(const struct dump_ctx *ctx,
}
}
+static void emit_interp_info(struct vrend_glsl_strbufs *glsl_strbufs,
+ const struct vrend_shader_cfg *cfg,
+ const struct vrend_fs_shader_info *fs_info,
+ enum tgsi_semantic semantic, int sid, bool flatshade)
+{
+ for (int j = 0; j < fs_info->num_interps; ++j) {
+ if (fs_info->interpinfo[j].semantic_name == semantic &&
+ fs_info->interpinfo[j].semantic_index == sid) {
+ emit_hdrf(glsl_strbufs, "%s %s ",
+ get_interp_string(cfg, fs_info->interpinfo[j].interpolate, flatshade),
+ get_aux_string(fs_info->interpinfo[j].location));
+ break;
+ }
+ }
+}
+
+struct sematic_info {
+ enum tgsi_semantic name;
+ const char prefix;
+};
+
+static void emit_match_interfaces(struct vrend_glsl_strbufs *glsl_strbufs,
+ const struct dump_ctx *ctx,
+ const struct vrend_interface_bits *match,
+ const struct sematic_info *semantic)
+{
+ uint64_t mask = (match->outputs_expected_mask | match->outputs_emitted_mask)
+ ^ match->outputs_emitted_mask;
+
+ while (mask) {
+ int i = u_bit_scan64(&mask);
+ emit_interp_info(glsl_strbufs, ctx->cfg, &ctx->key->fs_info,
+ semantic->name, i, ctx->key->flatshade);
+
+ if (semantic->name == TGSI_SEMANTIC_GENERIC && ctx->separable_program)
+ emit_hdrf(glsl_strbufs, "layout(location=%d) ", i);
+
+ emit_hdrf(glsl_strbufs, "out vec4 %s_%c%d%s;\n",
+ get_stage_output_name_prefix(ctx->prog_type),
+ semantic->prefix, i,
+ ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ? "[]" : "");
+ }
+}
+
static int emit_ios(const struct dump_ctx *ctx,
struct vrend_glsl_strbufs *glsl_strbufs,
struct vrend_generic_ios *generic_ios,
+ struct vrend_texcoord_ios *texcoord_ios,
+ uint64_t *patches_emitted_mask,
uint8_t front_back_color_emitted_flags[],
uint32_t *num_interps,
bool *has_pervertex,
bool *force_color_two_side,
- bool *winsys_adjust_y_emitted,
uint32_t *shadow_samp_mask)
{
*num_interps = 0;
@@ -6865,19 +7469,19 @@ static int emit_ios(const struct dump_ctx *ctx,
switch (ctx->prog_type) {
case TGSI_PROCESSOR_VERTEX:
- emit_ios_vs(ctx, glsl_strbufs, generic_ios, num_interps, front_back_color_emitted_flags, force_color_two_side);
+ emit_ios_vs(ctx, glsl_strbufs, generic_ios, texcoord_ios, num_interps, front_back_color_emitted_flags, force_color_two_side);
break;
case TGSI_PROCESSOR_FRAGMENT:
- emit_ios_fs(ctx, glsl_strbufs, generic_ios, num_interps, winsys_adjust_y_emitted);
+ emit_ios_fs(ctx, glsl_strbufs, generic_ios, texcoord_ios, num_interps);
break;
case TGSI_PROCESSOR_GEOMETRY:
- emit_ios_geom(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags, num_interps, has_pervertex, force_color_two_side);
+ emit_ios_geom(ctx, glsl_strbufs, generic_ios, texcoord_ios, front_back_color_emitted_flags, num_interps, has_pervertex, force_color_two_side);
break;
case TGSI_PROCESSOR_TESS_CTRL:
- emit_ios_tcs(ctx, glsl_strbufs, generic_ios, has_pervertex);
+ emit_ios_tcs(ctx, glsl_strbufs, generic_ios, texcoord_ios, patches_emitted_mask, has_pervertex);
break;
case TGSI_PROCESSOR_TESS_EVAL:
- emit_ios_tes(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags, num_interps, has_pervertex, force_color_two_side);
+ emit_ios_tes(ctx, glsl_strbufs, generic_ios, texcoord_ios, front_back_color_emitted_flags, num_interps, has_pervertex, force_color_two_side);
break;
case TGSI_PROCESSOR_COMPUTE:
emit_ios_cs(ctx, glsl_strbufs);
@@ -6888,24 +7492,18 @@ static int emit_ios(const struct dump_ctx *ctx,
return glsl_ver_required;
}
- if (generic_ios->outputs_expected_mask &&
- (generic_ios->outputs_expected_mask != generic_ios->outputs_emitted_mask)) {
- for (int i = 0; i < 64; ++i) {
- uint64_t mask = 1ull << i;
- bool expecting = generic_ios->outputs_expected_mask & mask;
- if (expecting & !(generic_ios->outputs_emitted_mask & mask))
- emit_hdrf(glsl_strbufs, " out vec4 %s_g%dA0_f%s;\n",
- get_stage_output_name_prefix(ctx->prog_type), i,
- ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ? "[]" : "");
- }
- }
+ const struct sematic_info generic = {TGSI_SEMANTIC_GENERIC, 'g'};
+ const struct sematic_info texcoord = {TGSI_SEMANTIC_TEXCOORD, 't'};
+
+ emit_match_interfaces(glsl_strbufs, ctx, &generic_ios->match, &generic);
+ emit_match_interfaces(glsl_strbufs, ctx, &texcoord_ios->match, &texcoord);
emit_ios_streamout(ctx, glsl_strbufs);
glsl_ver_required = emit_ios_common(ctx, glsl_strbufs, shadow_samp_mask);
if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT &&
- ctx->key->pstipple_tex == true) {
- emit_hdr(glsl_strbufs, "uniform sampler2D pstipple_sampler;\nfloat stip_temp;\n");
+ ctx->key->pstipple_enabled) {
+ emit_hdr(glsl_strbufs, "uint stip_temp;\n");
}
return glsl_ver_required;
@@ -6940,7 +7538,7 @@ static boolean fill_interpolants(const struct dump_ctx *ctx, struct vrend_variab
{
if (!ctx->num_interps)
return true;
- if (ctx->prog_type == TGSI_PROCESSOR_VERTEX || ctx->prog_type == TGSI_PROCESSOR_GEOMETRY)
+ if (ctx->prog_type != TGSI_PROCESSOR_FRAGMENT)
return true;
return fill_fragment_interpolants(ctx, &sinfo->fs_info);
@@ -6980,19 +7578,23 @@ static boolean analyze_instruction(struct tgsi_iterate_context *iter,
static void fill_var_sinfo(const struct dump_ctx *ctx, struct vrend_variable_shader_info *sinfo)
{
- sinfo->num_ucp = ctx->key->clip_plane_enable ? 8 : 0;
+ sinfo->num_ucp = ctx->is_last_vertex_stage ? VIRGL_NUM_CLIP_PLANES : 0;
sinfo->fs_info.has_sample_input = ctx->has_sample_input;
+ sinfo->fs_info.has_noperspective = ctx->has_noperspective;
sinfo->fs_info.num_interps = ctx->num_interps;
sinfo->fs_info.glsl_ver = ctx->glsl_ver_required;
bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
- sinfo->num_clip = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_clip;
- sinfo->num_cull = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_cull;
+ sinfo->num_in_clip = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_in_clip;
+ sinfo->num_in_cull = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_in_cull;
+ sinfo->num_out_clip = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_out_clip;
+ sinfo->num_out_cull = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_out_cull;
+ sinfo->legacy_color_bits = ctx->color_out_mask;
}
static void fill_sinfo(const struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
{
- sinfo->in.use_pervertex = ctx->has_pervertex;
+ sinfo->use_pervertex_in = ctx->has_pervertex;
sinfo->samplers_used_mask = ctx->samplers_used;
sinfo->images_used_mask = ctx->images_used_mask;
sinfo->num_consts = ctx->num_consts;
@@ -7004,15 +7606,12 @@ static void fill_sinfo(const struct dump_ctx *ctx, struct vrend_shader_info *sin
sinfo->ubo_indirect = !!(ctx->info.dimension_indirect_files & (1 << TGSI_FILE_CONSTANT));
- if (ctx->generic_ios.input_range.used)
- sinfo->in.num_indirect_generic = ctx->generic_ios.input_range.io.last - ctx->generic_ios.input_range.io.sid + 1;
- if (ctx->patch_ios.input_range.used)
- sinfo->in.num_indirect_patch = ctx->patch_ios.input_range.io.last - ctx->patch_ios.input_range.io.sid + 1;
+ sinfo->has_output_arrays = ctx->has_output_arrays;
+ sinfo->has_input_arrays = ctx->has_input_arrays;
- if (ctx->generic_ios.output_range.used)
- sinfo->out.num_indirect_generic = ctx->generic_ios.output_range.io.last - ctx->generic_ios.output_range.io.sid + 1;
- if (ctx->patch_ios.output_range.used)
- sinfo->out.num_indirect_patch = ctx->patch_ios.output_range.io.last - ctx->patch_ios.output_range.io.sid + 1;
+ sinfo->out_generic_emitted_mask = ctx->generic_ios.match.outputs_emitted_mask;
+ sinfo->out_texcoord_emitted_mask = ctx->texcoord_ios.match.outputs_emitted_mask;
+ sinfo->out_patch_emitted_mask = ctx->patches_emitted_mask;
sinfo->num_inputs = ctx->num_inputs;
sinfo->num_outputs = ctx->num_outputs;
@@ -7021,6 +7620,7 @@ static void fill_sinfo(const struct dump_ctx *ctx, struct vrend_shader_info *sin
sinfo->tes_prim = ctx->tes_prim_mode;
sinfo->tes_point_mode = ctx->tes_point_mode;
sinfo->fs_blend_equation_advanced = ctx->fs_blend_equation_advanced;
+ sinfo->separable_program = ctx->separable_program;
if (sinfo->so_names || ctx->so_names) {
if (sinfo->so_names) {
@@ -7034,16 +7634,12 @@ static void fill_sinfo(const struct dump_ctx *ctx, struct vrend_shader_info *sin
* to the next shader stage. mesa/tgsi doesn't provide this information for
* TCS, TES, and GEOM shaders.
*/
- sinfo->out.guest_sent_io_arrays = ctx->guest_sent_io_arrays;
- sinfo->out.num_generic_and_patch = 0;
for(unsigned i = 0; i < ctx->num_outputs; i++) {
- if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC || ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) {
- sinfo->generic_outputs_layout[sinfo->out.num_generic_and_patch].name = ctx->outputs[i].name;
- sinfo->generic_outputs_layout[sinfo->out.num_generic_and_patch].sid = ctx->outputs[i].sid;
- sinfo->generic_outputs_layout[sinfo->out.num_generic_and_patch].location = ctx->outputs[i].layout_location;
- sinfo->generic_outputs_layout[sinfo->out.num_generic_and_patch].array_id = ctx->outputs[i].array_id;
- sinfo->generic_outputs_layout[sinfo->out.num_generic_and_patch].usage_mask = ctx->outputs[i].usage_mask;
- sinfo->out.num_generic_and_patch++;
+ if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT) {
+ if (ctx->outputs[i].name == TGSI_SEMANTIC_COLOR)
+ sinfo->fs_output_layout[i] = ctx->outputs[i].sid;
+ else
+ sinfo->fs_output_layout[i] = -1;
}
}
@@ -7057,13 +7653,34 @@ static void fill_sinfo(const struct dump_ctx *ctx, struct vrend_shader_info *sin
free(sinfo->image_arrays);
sinfo->image_arrays = ctx->image_arrays;
sinfo->num_image_arrays = ctx->num_image_arrays;
- sinfo->in.generic_emitted_mask = ctx->generic_ios.inputs_emitted_mask;
+ sinfo->in_generic_emitted_mask = ctx->generic_ios.match.inputs_emitted_mask;
+ sinfo->in_texcoord_emitted_mask = ctx->texcoord_ios.match.inputs_emitted_mask;
for (unsigned i = 0; i < ctx->num_outputs; ++i) {
- if (ctx->outputs[i].invariant)
- sinfo->invariant_outputs |= 1ull << ctx->outputs[i].sid;
+ if (ctx->outputs[i].invariant) {
+ uint32_t bit_pos = varying_bit_from_semantic_and_index(ctx->outputs[i].name, ctx->outputs[i].sid);
+ uint32_t slot = bit_pos / 32;
+ uint32_t bit = 1u << (bit_pos & 0x1f);
+ sinfo->invariant_outputs[slot] |= bit;
+ }
}
sinfo->gles_use_tex_query_level = ctx->gles_use_tex_query_level;
+
+ if (ctx->guest_sent_io_arrays) {
+ sinfo->output_arrays.num_arrays = 0;
+ for (unsigned i = 0; i < ctx->num_outputs; ++i) {
+ const struct vrend_shader_io *io = &ctx->outputs[i];
+ if (io->array_id > 0) {
+ struct vrend_shader_io_array *array =
+ &sinfo->output_arrays.layout[sinfo->output_arrays.num_arrays];
+ array->sid = io->sid;
+ array->size = io->last - io->first;
+ array->name = io->name;
+ array->array_id = io->array_id;
+ ++sinfo->output_arrays.num_arrays;
+ }
+ }
+ }
}
static bool allocate_strbuffers(struct vrend_glsl_strbufs* glsl_strbufs)
@@ -7083,7 +7700,7 @@ static bool allocate_strbuffers(struct vrend_glsl_strbufs* glsl_strbufs)
return true;
}
-static void set_strbuffers(MAYBE_UNUSED const struct vrend_context *rctx, const struct vrend_glsl_strbufs* glsl_strbufs,
+static void set_strbuffers(const struct vrend_glsl_strbufs* glsl_strbufs,
struct vrend_strarray *shader)
{
strarray_addstrbuf(shader, &glsl_strbufs->glsl_ver_ext);
@@ -7091,13 +7708,127 @@ static void set_strbuffers(MAYBE_UNUSED const struct vrend_context *rctx, const
strarray_addstrbuf(shader, &glsl_strbufs->glsl_main);
}
-static bool vrend_patch_vertex_shader_interpolants(MAYBE_UNUSED const struct vrend_context *rctx,
- const struct vrend_shader_cfg *cfg,
- struct vrend_strarray *prog_strings,
- const struct vrend_shader_info *vs_info,
- const struct vrend_fs_shader_info *fs_info,
- const char *oprefix,
- bool flatshade);
+static void emit_required_sysval_uniforms(struct vrend_strbuf *block, uint32_t mask)
+{
+ if (!mask)
+ return;
+
+ strbuf_append(block, "layout (std140) uniform VirglBlock {\n");
+ strbuf_append(block, "\tvec4 clipp[8];\n");
+ strbuf_appendf(block, "\tuint stipple_pattern[%d];\n", VREND_POLYGON_STIPPLE_SIZE);
+ strbuf_append(block, "\tfloat winsys_adjust_y;\n");
+ strbuf_append(block, "\tfloat alpha_ref_val;\n");
+ strbuf_append(block, "\tbool clip_plane_enabled;\n");
+ strbuf_append(block, "};\n");
+
+}
+
+static int compare_sid(const void *lhs, const void *rhs)
+{
+ const struct vrend_shader_io *l = (struct vrend_shader_io *)lhs;
+ const struct vrend_shader_io *r = (struct vrend_shader_io *)rhs;
+
+ if (l->name != r->name)
+ return l->name - r->name;
+
+ return l->sid - r->sid;
+}
+
+struct sso_scan_ctx {
+ struct tgsi_iterate_context iter;
+ const struct vrend_shader_cfg *cfg;
+ uint8_t max_generic_in_sid;
+ uint8_t max_patch_in_sid;
+ uint8_t max_generic_out_sid;
+ uint8_t max_patch_out_sid;
+ bool separable_program;
+ bool unsupported_io;
+};
+
+static boolean
+iter_prop_for_separable(struct tgsi_iterate_context *iter,
+ struct tgsi_full_property *prop)
+{
+ struct sso_scan_ctx *ctx = (struct sso_scan_ctx *) iter;
+
+ if (prop->Property.PropertyName == TGSI_PROPERTY_SEPARABLE_PROGRAM)
+ ctx->separable_program = prop->u[0].Data != 0;
+ return true;
+}
+
+static boolean
+iter_decl_for_overlap(struct tgsi_iterate_context *iter,
+ struct tgsi_full_declaration *decl)
+{
+ struct sso_scan_ctx *ctx = (struct sso_scan_ctx *) iter;
+
+ /* VS inputs and FS outputs are of no interest
+ * when it comes to IO matching */
+ if (decl->Declaration.File == TGSI_FILE_INPUT &&
+ iter->processor.Processor == TGSI_PROCESSOR_VERTEX)
+ return true;
+
+ if (decl->Declaration.File == TGSI_FILE_OUTPUT &&
+ iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT)
+ return true;
+
+ switch (decl->Semantic.Name) {
+ case TGSI_SEMANTIC_PATCH:
+ if (decl->Declaration.File == TGSI_FILE_INPUT) {
+ if (ctx->max_patch_in_sid < decl->Semantic.Index)
+ ctx->max_patch_in_sid = decl->Semantic.Index;
+ } else {
+ if (ctx->max_patch_out_sid < decl->Semantic.Index)
+ ctx->max_patch_out_sid = decl->Semantic.Index;
+ }
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ if (decl->Declaration.File == TGSI_FILE_INPUT) {
+ if (ctx->max_generic_in_sid < decl->Semantic.Index)
+ ctx->max_generic_in_sid = decl->Semantic.Index;
+ } else {
+ if (ctx->max_generic_out_sid < decl->Semantic.Index)
+ ctx->max_generic_out_sid = decl->Semantic.Index;
+ }
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ case TGSI_SEMANTIC_CLIPVERTEX:
+ case TGSI_SEMANTIC_BCOLOR:
+ case TGSI_SEMANTIC_TEXCOORD:
+ case TGSI_SEMANTIC_FOG:
+ /* These are semantics that need to be matched by name and since we can't
+ * guarantee that they exist in all the stages of separable shaders
+ * we can't emit the shader as SSO */
+ ctx->unsupported_io = true;
+ break;
+ default:
+ ;
+ }
+ return true;
+}
+
+
+bool vrend_shader_query_separable_program(const struct tgsi_token *tokens,
+ const struct vrend_shader_cfg *cfg)
+{
+ struct sso_scan_ctx ctx = {0};
+ ctx.cfg = cfg;
+ ctx.iter.iterate_property = iter_prop_for_separable;
+ ctx.iter.iterate_declaration = iter_decl_for_overlap;
+ tgsi_iterate_shader(tokens, &ctx.iter);
+
+ /* Since we have to match by location, and have to handle generics and patches
+ * at in the limited range of 32 locations, we have to make sure that the
+ * the generics range and the patch range don't overlap. In addition, to
+ * work around that radeonsi doesn't support patch locations above 30 we have
+ * to check that limit too. */
+ bool supports_separable = !ctx.unsupported_io &&
+ (ctx.max_generic_in_sid + ctx.max_patch_in_sid < MAX_VARYING) &&
+ (ctx.max_generic_out_sid + ctx.max_patch_out_sid < MAX_VARYING) &&
+ (ctx.max_patch_in_sid < ctx.cfg->max_shader_patch_varyings) &&
+ (ctx.max_patch_out_sid < ctx.cfg->max_shader_patch_varyings);
+ return ctx.separable_program && supports_separable;
+}
bool vrend_convert_shader(const struct vrend_context *rctx,
const struct vrend_shader_cfg *cfg,
@@ -7112,18 +7843,21 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
boolean bret;
memset(&ctx, 0, sizeof(struct dump_ctx));
+ ctx.cfg = cfg;
/* First pass to deal with edge cases. */
- if (ctx.prog_type == TGSI_PROCESSOR_FRAGMENT ||
- ctx.prog_type == TGSI_PROCESSOR_VERTEX)
- ctx.iter.iterate_declaration = iter_decls;
+ ctx.iter.iterate_declaration = iter_decls;
ctx.iter.iterate_instruction = analyze_instruction;
bret = tgsi_iterate_shader(tokens, &ctx.iter);
if (bret == false)
return false;
- ctx.num_inputs = 0;
+ ctx.is_last_vertex_stage =
+ (ctx.iter.processor.Processor == TGSI_PROCESSOR_GEOMETRY) ||
+ (ctx.iter.processor.Processor == TGSI_PROCESSOR_TESS_EVAL && !key->gs_present) ||
+ (ctx.iter.processor.Processor == TGSI_PROCESSOR_VERTEX && !key->gs_present && !key->tes_present);
+ ctx.num_inputs = 0;
ctx.iter.prolog = prolog;
ctx.iter.iterate_instruction = iter_instruction;
ctx.iter.iterate_declaration = iter_declaration;
@@ -7141,8 +7875,9 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
ctx.ssbo_atomic_array_base = 0xffffffff;
ctx.has_sample_input = false;
ctx.req_local_mem = req_local_mem;
- ctx.guest_sent_io_arrays = key->input.guest_sent_io_arrays;
- ctx.generic_ios.outputs_expected_mask = key->output.generic_emitted_mask;
+ ctx.guest_sent_io_arrays = false;
+ ctx.generic_ios.match.outputs_expected_mask = key->out_generic_expected_mask;
+ ctx.texcoord_ios.match.outputs_expected_mask = key->out_texcoord_expected_mask;
tgsi_scan_shader(tokens, &ctx.info);
/* if we are in core profile mode we should use GLSL 1.40 */
@@ -7175,15 +7910,46 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
if (bret == false)
goto fail;
+ /* If we need a sysvalue UBO then we require GLSL 1.40 */
+ if (ctx.glsl_strbufs.required_sysval_uniform_decls)
+ ctx.glsl_ver_required = require_glsl_ver(&ctx, 140);
+
+ if (!ctx.cfg->use_gles &&
+ ( key->in_arrays.num_arrays > 0 ) &&
+ (ctx.prog_type == TGSI_PROCESSOR_GEOMETRY ||
+ ctx.prog_type == TGSI_PROCESSOR_TESS_CTRL ||
+ ctx.prog_type == TGSI_PROCESSOR_TESS_EVAL)) {
+ ctx.shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
+ }
+
for (size_t i = 0; i < ARRAY_SIZE(ctx.src_bufs); ++i)
strbuf_free(ctx.src_bufs + i);
+ for (size_t i = 0; i < ARRAY_SIZE(ctx.dst_bufs); ++i)
+ strbuf_free(ctx.dst_bufs + i);
+
+ if (ctx.prog_type == TGSI_PROCESSOR_FRAGMENT)
+ qsort(ctx.outputs, ctx.num_outputs, sizeof(struct vrend_shader_io), compare_sid);
+
+ const struct vrend_fs_shader_info *fs_info = &key->fs_info;
+
+ if (fs_info->num_interps && fs_info->has_sample_input &&
+ ((cfg->use_gles && cfg->glsl_version < 320) ||
+ cfg->glsl_version >= 320)) {
+ ctx.shader_req_bits |= SHADER_REQ_GPU_SHADER5;
+ }
+
+ if (fs_info->num_interps && fs_info->has_noperspective &&
+ cfg->use_gles) {
+ ctx.shader_req_bits |= SHADER_REQ_SHADER_NOPERSPECTIVE_INTERPOLATION;
+ }
+
emit_header(&ctx, &ctx.glsl_strbufs);
ctx.glsl_ver_required = emit_ios(&ctx, &ctx.glsl_strbufs, &ctx.generic_ios,
+ &ctx.texcoord_ios, &ctx.patches_emitted_mask,
ctx.front_back_color_emitted_flags,
&ctx.num_interps, &ctx.has_pervertex,
&ctx.force_color_two_side,
- &ctx.winsys_adjust_y_emitted,
&ctx.shadow_samp_mask);
if (strbuf_get_error(&ctx.glsl_strbufs.glsl_hdr))
@@ -7198,32 +7964,9 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
fill_sinfo(&ctx, sinfo);
fill_var_sinfo(&ctx, var_sinfo);
- set_strbuffers(rctx, &ctx.glsl_strbufs, shader);
-
- if (ctx.prog_type == TGSI_PROCESSOR_GEOMETRY) {
- vrend_patch_vertex_shader_interpolants(rctx,
- cfg,
- shader,
- sinfo,
- key->fs_info, "gso",
- key->flatshade);
- } else if (!key->gs_present &&
- ctx.prog_type == TGSI_PROCESSOR_TESS_EVAL) {
- vrend_patch_vertex_shader_interpolants(rctx,
- cfg,
- shader,
- sinfo,
- key->fs_info, "teo",
- key->flatshade);
- } else if (!key->gs_present && !key->tes_present &&
- ctx.prog_type == TGSI_PROCESSOR_VERTEX) {
- vrend_patch_vertex_shader_interpolants(rctx,
- cfg,
- shader,
- sinfo,
- key->fs_info, "vso",
- key->flatshade);
- }
+ emit_required_sysval_uniforms (&ctx.glsl_strbufs.glsl_hdr,
+ ctx.glsl_strbufs.required_sysval_uniform_decls);
+ set_strbuffers(&ctx.glsl_strbufs, shader);
VREND_DEBUG(dbg_shader_glsl, rctx, "GLSL:");
VREND_DEBUG_EXT(dbg_shader_glsl, rctx, strarray_dump(shader));
@@ -7239,107 +7982,6 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
return false;
}
-static void replace_interp(struct vrend_strarray *program,
- const char *var_name,
- const char *pstring, const char *auxstring)
-{
- int mylen = strlen(INTERP_PREFIX) + strlen("out float ");
-
- char *ptr = program->strings[SHADER_STRING_HDR].buf;
- do {
- char *p = strstr(ptr, var_name);
- if (!p)
- break;
-
- ptr = p - mylen;
-
- memset(ptr, ' ', strlen(INTERP_PREFIX));
- memcpy(ptr, pstring, strlen(pstring));
- memcpy(ptr + strlen(pstring), auxstring, strlen(auxstring));
-
- ptr = p + strlen(var_name);
- } while (1);
-}
-
-static const char *gpu_shader5_string = "#extension GL_ARB_gpu_shader5 : require\n";
-
-static void require_gpu_shader5(struct vrend_strarray *program)
-{
- strbuf_append(&program->strings[SHADER_STRING_VER_EXT], gpu_shader5_string);
-}
-
-static const char *gpu_shader5_and_msinterp_string =
- "#extension GL_OES_gpu_shader5 : require\n"
- "#extension GL_OES_shader_multisample_interpolation : require\n";
-
-static void require_gpu_shader5_and_msinterp(struct vrend_strarray *program)
-{
- strbuf_append(&program->strings[SHADER_STRING_VER_EXT], gpu_shader5_and_msinterp_string);
-}
-
-static bool vrend_patch_vertex_shader_interpolants(MAYBE_UNUSED const struct vrend_context *rctx,
- const struct vrend_shader_cfg *cfg,
- struct vrend_strarray *prog_strings,
- const struct vrend_shader_info *vs_info,
- const struct vrend_fs_shader_info *fs_info,
- const char *oprefix, bool flatshade)
-{
- int i;
- const char *pstring, *auxstring;
- char glsl_name[64];
- if (!vs_info || !fs_info)
- return true;
-
- if (!fs_info->num_interps)
- return true;
-
- if (fs_info->has_sample_input) {
- if (!cfg->use_gles && (cfg->glsl_version >= 320))
- require_gpu_shader5(prog_strings);
-
- if (cfg->use_gles && (cfg->glsl_version < 320))
- require_gpu_shader5_and_msinterp(prog_strings);
- }
-
- for (i = 0; i < fs_info->num_interps; i++) {
- pstring = get_interp_string(cfg, fs_info->interpinfo[i].interpolate, flatshade);
- if (!pstring)
- continue;
-
- auxstring = get_aux_string(fs_info->interpinfo[i].location);
-
- switch (fs_info->interpinfo[i].semantic_name) {
- case TGSI_SEMANTIC_COLOR:
- case TGSI_SEMANTIC_BCOLOR:
- /* color is a bit trickier */
- if (fs_info->glsl_ver < 140) {
- if (fs_info->interpinfo[i].semantic_index == 1) {
- replace_interp(prog_strings, "gl_FrontSecondaryColor", pstring, auxstring);
- replace_interp(prog_strings, "gl_BackSecondaryColor", pstring, auxstring);
- } else {
- replace_interp(prog_strings, "gl_FrontColor", pstring, auxstring);
- replace_interp(prog_strings, "gl_BackColor", pstring, auxstring);
- }
- } else {
- snprintf(glsl_name, 64, "ex_c%d", fs_info->interpinfo[i].semantic_index);
- replace_interp(prog_strings, glsl_name, pstring, auxstring);
- snprintf(glsl_name, 64, "ex_bc%d", fs_info->interpinfo[i].semantic_index);
- replace_interp(prog_strings, glsl_name, pstring, auxstring);
- }
- break;
- case TGSI_SEMANTIC_GENERIC:
- snprintf(glsl_name, 64, "%s_g%d", oprefix, fs_info->interpinfo[i].semantic_index);
- replace_interp(prog_strings, glsl_name, pstring, auxstring);
- break;
- default:
- vrend_printf("unhandled semantic: %x\n", fs_info->interpinfo[i].semantic_name);
- return false;
- }
- }
-
- return true;
-}
-
static boolean
iter_vs_declaration(struct tgsi_iterate_context *iter,
struct tgsi_full_declaration *decl)
@@ -7350,7 +7992,6 @@ iter_vs_declaration(struct tgsi_iterate_context *iter,
const char *shader_out_prefix = "tco";
const char *name_prefix = "";
unsigned i;
- unsigned mask_temp;
// Generate a shader that passes through all VS outputs
if (decl->Declaration.File == TGSI_FILE_OUTPUT) {
@@ -7370,12 +8011,10 @@ iter_vs_declaration(struct tgsi_iterate_context *iter,
ctx->inputs[i].interpolate = decl->Interp.Interpolate;
ctx->inputs[i].location = decl->Interp.Location;
ctx->inputs[i].first = decl->Range.First;
- ctx->inputs[i].layout_location = 0;
ctx->inputs[i].last = decl->Range.Last;
ctx->inputs[i].array_id = decl->Declaration.Array ? decl->Array.ArrayID : 0;
- ctx->inputs[i].usage_mask = mask_temp = decl->Declaration.UsageMask;
- get_swizzle_offset_and_num_components(&ctx->inputs[i]);
-
+ ctx->inputs[i].usage_mask = decl->Declaration.UsageMask;
+ ctx->inputs[i].num_components = 4;
ctx->inputs[i].glsl_predefined_no_emit = false;
ctx->inputs[i].glsl_no_index = false;
ctx->inputs[i].override_no_wm = ctx->inputs[i].num_components == 1;
@@ -7431,7 +8070,6 @@ iter_vs_declaration(struct tgsi_iterate_context *iter,
if (ctx->inputs[i].name == TGSI_SEMANTIC_FOG){
ctx->inputs[i].usage_mask = 0xf;
ctx->inputs[i].num_components = 4;
- ctx->inputs[i].swizzle_offset = 0;
ctx->inputs[i].override_no_wm = false;
snprintf(ctx->inputs[i].glsl_name, 64, "%s_f%d", shader_in_prefix, ctx->inputs[i].sid);
snprintf(ctx->outputs[i].glsl_name, 64, "%s_f%d", shader_out_prefix, ctx->inputs[i].sid);
@@ -7439,19 +8077,8 @@ iter_vs_declaration(struct tgsi_iterate_context *iter,
snprintf(ctx->inputs[i].glsl_name, 64, "%s_c%d", shader_in_prefix, ctx->inputs[i].sid);
snprintf(ctx->outputs[i].glsl_name, 64, "%s_c%d", shader_out_prefix, ctx->inputs[i].sid);
} else if (ctx->inputs[i].name == TGSI_SEMANTIC_GENERIC) {
- snprintf(ctx->inputs[i].glsl_name, 64, "%s_g%dA%d_%x",
- shader_in_prefix, ctx->inputs[i].sid,
- ctx->inputs[i].array_id, ctx->inputs[i].usage_mask);
- snprintf(ctx->outputs[i].glsl_name, 64, "%s_g%dA%d_%x",
- shader_out_prefix, ctx->inputs[i].sid,
- ctx->inputs[i].array_id, ctx->inputs[i].usage_mask);
- } else if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH) {
- snprintf(ctx->inputs[i].glsl_name, 64, "%s_p%dA%d_%x",
- shader_in_prefix, ctx->inputs[i].sid,
- ctx->inputs[i].array_id, ctx->inputs[i].usage_mask);
- snprintf(ctx->outputs[i].glsl_name, 64, "%s_p%dA%d_%x",
- shader_out_prefix, ctx->inputs[i].sid,
- ctx->inputs[i].array_id, ctx->inputs[i].usage_mask);
+ snprintf(ctx->inputs[i].glsl_name, 64, "%s_g%d", shader_in_prefix, ctx->inputs[i].sid);
+ snprintf(ctx->outputs[i].glsl_name, 64, "%s_g%d", shader_out_prefix, ctx->inputs[i].sid);
} else {
snprintf(ctx->outputs[i].glsl_name, 64, "%s_%d", shader_in_prefix, ctx->inputs[i].first);
snprintf(ctx->inputs[i].glsl_name, 64, "%s_%d", shader_out_prefix, ctx->inputs[i].first);
@@ -7496,10 +8123,10 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *rctx,
emit_header(&ctx, &ctx.glsl_strbufs);
ctx.glsl_ver_required = emit_ios(&ctx, &ctx.glsl_strbufs, &ctx.generic_ios,
+ &ctx.texcoord_ios, &ctx.patches_emitted_mask,
ctx.front_back_color_emitted_flags,
&ctx.num_interps, &ctx.has_pervertex,
&ctx.force_color_two_side,
- &ctx.winsys_adjust_y_emitted,
&ctx.shadow_samp_mask);
emit_buf(&ctx.glsl_strbufs, "void main() {\n");
@@ -7540,7 +8167,9 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *rctx,
emit_buf(&ctx.glsl_strbufs, "}\n");
fill_sinfo(&ctx, sinfo);
- set_strbuffers(rctx, &ctx.glsl_strbufs, shader);
+ emit_required_sysval_uniforms (&ctx.glsl_strbufs.glsl_hdr,
+ ctx.glsl_strbufs.required_sysval_uniform_decls);
+ set_strbuffers(&ctx.glsl_strbufs, shader);
VREND_DEBUG(dbg_shader_glsl, rctx, "GLSL:");
VREND_DEBUG_EXT(dbg_shader_glsl, rctx, strarray_dump(shader));
@@ -7556,6 +8185,73 @@ fail:
return false;
}
+static
+void vrend_shader_write_io_as_src(struct vrend_strbuf *result,
+ const char *array_or_varname,
+ const struct vrend_shader_io *io,
+ const struct tgsi_full_src_register *src,
+ enum io_decl_type decl_type)
+{
+
+
+ if (io->first == io->last && !io->overlapping_array) {
+ strbuf_appendf(result, "%s%s", io->glsl_name, array_or_varname);
+ } else {
+ const struct vrend_shader_io *base = io->overlapping_array ? io->overlapping_array : io;
+ const int offset = src->Register.Index - io->first + io->array_offset;
+
+ if (decl_type == decl_block) {
+ if (src->Register.Indirect)
+ strbuf_appendf(result, "%s.%s[addr%d + %d]", array_or_varname, base->glsl_name,
+ src->Indirect.Index, offset);
+ else
+ strbuf_appendf(result, "%s.%s[%d]", array_or_varname, base->glsl_name, offset);
+ } else {
+ if (src->Register.Indirect)
+ strbuf_appendf(result, "%s%s[addr%d + %d]", base->glsl_name,
+ array_or_varname, src->Indirect.Index, offset);
+ else
+ strbuf_appendf(result, "%s%s[%d]", base->glsl_name,
+ array_or_varname, offset);
+ }
+ }
+}
+
+static
+void vrend_shader_write_io_as_dst(struct vrend_strbuf *result,
+ const char *array_or_varname,
+ const struct vrend_shader_io *io,
+ const struct tgsi_full_dst_register *src,
+ enum io_decl_type decl_type)
+{
+
+ if (io->first == io->last) {
+ if (io->overlapping_array)
+ strbuf_appendf(result, "%s%s[%d]", io->overlapping_array->glsl_name,
+ array_or_varname, io->array_offset);
+ else
+ strbuf_appendf(result, "%s%s", io->glsl_name, array_or_varname);
+ } else {
+ const struct vrend_shader_io *base = io->overlapping_array ? io->overlapping_array : io;
+ const int offset = src->Register.Index - io->first + io->array_offset;
+
+ if (decl_type == decl_block) {
+ if (src->Register.Indirect)
+ strbuf_appendf(result, "%s.%s[addr%d + %d]", array_or_varname, base->glsl_name,
+ src->Indirect.Index, offset);
+ else
+ strbuf_appendf(result, "%s.%s[%d]", array_or_varname, base->glsl_name, offset);
+ } else {
+ if (src->Register.Indirect)
+ strbuf_appendf(result, "%s%s[addr%d + %d]", base->glsl_name,
+ array_or_varname, src->Indirect.Index, offset);
+ else
+ strbuf_appendf(result, "%s%s[%d]", base->glsl_name,
+ array_or_varname, offset);
+ }
+ }
+}
+
bool vrend_shader_needs_alpha_func(const struct vrend_shader_key *key) {
if (!key->add_alpha_test)
return false;
@@ -7571,3 +8267,4 @@ bool vrend_shader_needs_alpha_func(const struct vrend_shader_key *key) {
return true;
}
}
+
diff --git a/src/vrend_shader.h b/src/vrend_shader.h
index de84ca46..849acd9b 100644
--- a/src/vrend_shader.h
+++ b/src/vrend_shader.h
@@ -30,6 +30,13 @@
#include "vrend_strbuf.h"
+#define VIRGL_NUM_CLIP_PLANES 8
+
+#define VREND_POLYGON_STIPPLE_SIZE 32
+
+#define VREND_SHADER_SAMPLER_VIEWS_MASK_LENGTH \
+ ((PIPE_MAX_SHADER_SAMPLER_VIEWS + 63) / 64)
+
enum gl_advanced_blend_mode
{
BLEND_NONE = 0,
@@ -70,42 +77,61 @@ struct vrend_layout_info {
unsigned sid : 16 ;
unsigned location : 16 ;
unsigned array_id : 16 ;
- unsigned usage_mask : 5;
};
struct vrend_fs_shader_info {
int num_interps;
int glsl_ver;
bool has_sample_input;
+ bool has_noperspective;
struct vrend_interp_info interpinfo[PIPE_MAX_SHADER_INPUTS];
};
struct vrend_shader_info_out {
- uint64_t num_indirect_generic : 8;
- uint64_t num_indirect_patch : 8;
- uint64_t num_generic_and_patch : 8;
- uint64_t guest_sent_io_arrays : 1;
+ uint8_t num_generic_and_patch;
+ uint8_t num_indirect_generic;
+ uint8_t num_indirect_patch;
+ bool guest_sent_io_arrays;
};
struct vrend_shader_info_in {
uint64_t generic_emitted_mask;
- uint32_t num_indirect_generic : 8;
- uint32_t num_indirect_patch : 8;
- uint32_t use_pervertex : 1;
+ uint64_t texcoord_emitted_mask;
+ bool indirect_generic_or_patch : 1;
+ bool use_pervertex : 1;
+};
+
+struct vrend_shader_io_array {
+ enum tgsi_semantic name : 6;
+ uint32_t sid : 6;
+ uint32_t size : 6;
+ uint32_t array_id : 6;
+ uint32_t padding : 8;
};
+struct vrend_shader_io_array_info {
+ uint32_t num_arrays;
+ struct vrend_shader_io_array layout[16];
+};
struct vrend_shader_info {
- uint64_t invariant_outputs;
- struct vrend_shader_info_out out;
- struct vrend_shader_info_in in;
+ uint32_t invariant_outputs[4];
+ uint64_t in_generic_emitted_mask;
+ uint64_t in_texcoord_emitted_mask;
+
+ uint64_t out_generic_emitted_mask;
+ uint64_t out_patch_emitted_mask;
+
+ struct vrend_shader_io_array_info output_arrays;
- struct vrend_layout_info generic_outputs_layout[64];
struct vrend_array *sampler_arrays;
struct vrend_array *image_arrays;
char **so_names;
struct pipe_stream_output_info so_info;
+ /* 8 cbufs + depth + stencil + samplemask */
+ int8_t fs_output_layout[12];
+
uint32_t samplers_used_mask;
uint32_t images_used_mask;
uint32_t ubo_used_mask;
@@ -124,67 +150,92 @@ struct vrend_shader_info {
int num_sampler_arrays;
int num_image_arrays;
+ uint8_t out_texcoord_emitted_mask;
uint8_t ubo_indirect : 1;
uint8_t tes_point_mode : 1;
uint8_t gles_use_tex_query_level : 1;
+ uint8_t separable_program : 1;
+ uint8_t has_input_arrays : 1;
+ uint8_t has_output_arrays : 1;
+ uint8_t use_pervertex_in : 1;
};
struct vrend_variable_shader_info {
struct vrend_fs_shader_info fs_info;
+ uint32_t num_in_clip:4;
+ uint32_t num_in_cull:4;
+ uint32_t num_out_clip:4;
+ uint32_t num_out_cull:4;
int num_ucp;
- int num_clip;
- int num_cull;
+ int legacy_color_bits;
};
struct vrend_shader_key {
- uint64_t force_invariant_inputs;
+ uint64_t out_generic_expected_mask;
+ uint64_t out_texcoord_expected_mask;
+
+ uint64_t in_generic_expected_mask;
+ uint64_t in_texcoord_expected_mask;
+ uint64_t in_patch_expected_mask;
- struct vrend_fs_shader_info *fs_info;
- struct vrend_shader_info_out input;
- struct vrend_shader_info_in output;
- struct vrend_layout_info prev_stage_generic_and_patch_outputs_layout[64];
+ uint32_t force_invariant_inputs[4];
+
+ struct vrend_fs_shader_info fs_info;
+ struct vrend_shader_io_array_info in_arrays;
union {
struct {
uint8_t surface_component_bits[PIPE_MAX_COLOR_BUFS];
uint32_t coord_replace;
uint8_t swizzle_output_rgb_to_bgr;
- uint8_t convert_linear_to_srgb_on_write;
+ uint8_t needs_manual_srgb_encode_bitmask;
uint8_t cbufs_are_a8_bitmask;
uint8_t cbufs_signed_int_bitmask;
uint8_t cbufs_unsigned_int_bitmask;
uint32_t logicop_func : 4;
uint32_t logicop_enabled : 1;
uint32_t prim_is_points : 1;
- uint32_t invert_origin : 1;
+ uint32_t lower_left_origin : 1;
+ uint32_t available_color_in_bits : 4;
} fs;
struct {
uint32_t attrib_signed_int_bitmask;
uint32_t attrib_unsigned_int_bitmask;
+ uint32_t attrib_zyxw_bitmask;
uint32_t fog_fixup_mask;
} vs;
+
+ struct {
+ uint32_t emit_clip_distance : 1;
+ } gs;
};
- uint32_t compiled_fs_uid;
+ uint64_t sampler_views_lower_swizzle_mask[VREND_SHADER_SAMPLER_VIEWS_MASK_LENGTH];
+ uint64_t sampler_views_emulated_rect_mask[VREND_SHADER_SAMPLER_VIEWS_MASK_LENGTH];
+ uint16_t tex_swizzle[PIPE_MAX_SHADER_SAMPLER_VIEWS];
uint8_t alpha_test;
- uint8_t clip_plane_enable;
- uint8_t num_cull : 4;
- uint8_t num_clip : 4;
- uint8_t pstipple_tex : 1;
+ uint8_t num_in_cull : 4;
+ uint8_t num_in_clip : 4;
+ uint8_t num_out_cull : 4;
+ uint8_t num_out_clip : 4;
+ uint8_t pstipple_enabled : 1;
uint8_t add_alpha_test : 1;
uint8_t color_two_side : 1;
uint8_t gs_present : 1;
uint8_t tcs_present : 1;
uint8_t tes_present : 1;
uint8_t flatshade : 1;
-
+ uint8_t require_input_arrays : 1;
+ uint8_t require_output_arrays : 1;
+ uint8_t use_pervertex_in : 1;
};
struct vrend_shader_cfg {
uint32_t glsl_version : 12;
uint32_t max_draw_buffers : 4;
+ uint32_t max_shader_patch_varyings : 6;
uint32_t use_gles : 1;
uint32_t use_core_profile : 1;
uint32_t use_explicit_locations : 1;
@@ -195,6 +246,9 @@ struct vrend_shader_cfg {
uint32_t use_integer : 1;
uint32_t has_dual_src_blend : 1;
uint32_t has_fbfetch_coherent : 1;
+ uint32_t has_cull_distance : 1;
+ uint32_t has_nopersective : 1;
+ uint32_t has_texture_shadow_lod : 1;
};
struct vrend_context;
@@ -229,4 +283,21 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *ctx,
bool vrend_shader_needs_alpha_func(const struct vrend_shader_key *key);
+bool vrend_shader_query_separable_program(const struct tgsi_token *tokens,
+ const struct vrend_shader_cfg *cfg);
+
+static inline bool vrend_shader_sampler_views_mask_get(
+ const uint64_t mask[static VREND_SHADER_SAMPLER_VIEWS_MASK_LENGTH],
+ int index)
+{
+ return (mask[index / 64] >> (index % 64)) & 1;
+}
+
+static inline void vrend_shader_sampler_views_mask_set(
+ uint64_t mask[static VREND_SHADER_SAMPLER_VIEWS_MASK_LENGTH],
+ int index)
+{
+ mask[index / 64] |= 1ull << (index % 64);
+}
+
#endif
diff --git a/src/vrend_strbuf.h b/src/vrend_strbuf.h
index 6fd4e929..8568d010 100644
--- a/src/vrend_strbuf.h
+++ b/src/vrend_strbuf.h
@@ -42,6 +42,7 @@ struct vrend_strbuf {
/* size of string stored without terminating NULL */
size_t size;
bool error_state;
+ bool external_buffer;
};
static inline void strbuf_set_error(struct vrend_strbuf *sb)
@@ -59,9 +60,16 @@ static inline size_t strbuf_get_len(struct vrend_strbuf *sb)
return sb->size;
}
+static inline void strbuf_reset(struct vrend_strbuf *sb)
+{
+ sb->size = 0;
+}
+
+
static inline void strbuf_free(struct vrend_strbuf *sb)
{
- free(sb->buf);
+ if (!sb->external_buffer)
+ free(sb->buf);
}
static inline bool strbuf_alloc(struct vrend_strbuf *sb, int initial_size)
@@ -72,16 +80,36 @@ static inline bool strbuf_alloc(struct vrend_strbuf *sb, int initial_size)
sb->alloc_size = initial_size;
sb->buf[0] = 0;
sb->error_state = false;
+ sb->external_buffer = false;
sb->size = 0;
return true;
}
+static inline bool strbuf_alloc_fixed(struct vrend_strbuf *sb, char *buf, int size)
+{
+ assert(buf);
+ sb->buf = buf;
+ sb->alloc_size = size;
+ sb->buf[0] = 0;
+ sb->error_state = false;
+ sb->external_buffer = true;
+ sb->size = 0;
+ return true;
+}
+
+
/* this might need tuning */
#define STRBUF_MIN_MALLOC 1024
static inline bool strbuf_grow(struct vrend_strbuf *sb, int len)
{
if (sb->size + len + 1 > sb->alloc_size) {
+
+ /* We can't grow an external buffer */
+ if (sb->external_buffer) {
+ strbuf_set_error(sb);
+ return false;
+ }
/* Reallocate to the larger size of current alloc + min realloc,
* or the resulting string size if larger.
*/
@@ -120,11 +148,14 @@ static inline void strbuf_vappendf(struct vrend_strbuf *sb, const char *fmt, va_
int len = vsnprintf(sb->buf + sb->size, sb->alloc_size - sb->size, fmt, ap);
if (len >= (int)(sb->alloc_size - sb->size)) {
- if (!strbuf_grow(sb, len))
- return;
+ if (!strbuf_grow(sb, len)) {
+ goto end;
+ }
vsnprintf(sb->buf + sb->size, sb->alloc_size - sb->size, fmt, cp);
}
sb->size += len;
+end:
+ va_end(ap);
}
__attribute__((format(printf, 2, 3)))
@@ -144,10 +175,12 @@ static inline void strbuf_vfmt(struct vrend_strbuf *sb, const char *fmt, va_list
int len = vsnprintf(sb->buf, sb->alloc_size, fmt, ap);
if (len >= (int)(sb->alloc_size)) {
if (!strbuf_grow(sb, len))
- return;
+ goto end;
vsnprintf(sb->buf, sb->alloc_size, fmt, cp);
}
sb->size = len;
+end:
+ va_end(ap);
}
__attribute__((format(printf, 2, 3)))
diff --git a/src/vrend_video.c b/src/vrend_video.c
new file mode 100644
index 00000000..38da8ff9
--- /dev/null
+++ b/src/vrend_video.c
@@ -0,0 +1,771 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Kylin Software Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * The video implementation of the vrend renderer.
+ *
+ * It is based on the general virgl video submodule and handles data transfer
+ * and synchronization between host and guest.
+ *
+ * The relationship between vaSurface and video buffer objects:
+ *
+ * GUEST (Mesa) | HOST (Virglrenderer)
+ * |
+ * +------------+ | +------------+
+ * | vaSurface | | | vaSurface | <------+
+ * +------------+ | +------------+ |
+ * | | |
+ * +---------------------------+ | +-------------------------+ |
+ * | virgl_video_buffer | | | vrend_video_buffer | |
+ * | +-----------------------+ | | | +-------------------+ | |
+ * | | vl_video_buffer | | | | | vrend_resource(s) | | |
+ * | | +-------------------+ | |<--+-->| +-------------------+ | |
+ * | | | virgl_resource(s) | | | | | +--------------------+ | |
+ * | | +-------------------+ | | | | | virgl_video_buffer |-+--+
+ * | +-----------------------+ | | | +--------------------+ |
+ * +---------------------------+ | +-------------------------+
+ *
+ * The relationship between vaContext and video codec objects:
+ *
+ * GUEST (Mesa) | HOST (Virglrenderer)
+ * |
+ * +------------+ | +------------+
+ * | vaContext | | | vaContext | <-------+
+ * +------------+ | +------------+ |
+ * | | |
+ * +------------------------+ | +--------------------------+ |
+ * | virgl_video_codec | <--+--> | vrend_video_codec | |
+ * +------------------------+ | | +--------------------+ | |
+ * | | | virgl_video_codec | -+--+
+ * | | +--------------------+ |
+ * | +--------------------------+
+ *
+ * @author Feng Jiang <jiangfeng@kylinos.cn>
+ */
+
+
+#include <sys/param.h>
+
+#include "virgl_video.h"
+#include "virgl_video_hw.h"
+
+#include "vrend_debug.h"
+#include "vrend_winsys.h"
+#include "vrend_renderer.h"
+#include "vrend_video.h"
+
+struct vrend_context;
+
+struct vrend_video_context {
+ struct vrend_context *ctx;
+ struct list_head codecs;
+ struct list_head buffers;
+};
+
+struct vrend_video_codec {
+ struct virgl_video_codec *codec;
+ uint32_t handle;
+ struct vrend_resource *feed_res; /* encoding feedback */
+ struct vrend_resource *dest_res; /* encoding coded buffer */
+ struct vrend_video_context *ctx;
+ struct list_head head;
+};
+
+struct vrend_video_plane {
+ uint32_t res_handle;
+ GLuint texture; /* texture for temporary use */
+ GLuint framebuffer; /* framebuffer for temporary use */
+ EGLImageKHR egl_image; /* egl image for temporary use */
+};
+
+struct vrend_video_buffer {
+ struct virgl_video_buffer *buffer;
+
+ uint32_t handle;
+ struct vrend_video_context *ctx;
+ struct list_head head;
+
+ uint32_t num_planes;
+ struct vrend_video_plane planes[3];
+};
+
+static struct vrend_video_codec *vrend_video_codec(
+ struct virgl_video_codec *codec)
+{
+ return virgl_video_codec_opaque_data(codec);
+}
+
+static struct vrend_video_buffer *vrend_video_buffer(
+ struct virgl_video_buffer *buffer)
+{
+ return virgl_video_buffer_opaque_data(buffer);
+}
+
+static struct vrend_video_codec *get_video_codec(
+ struct vrend_video_context *ctx,
+ uint32_t cdc_handle)
+{
+ struct vrend_video_codec *cdc;
+
+ LIST_FOR_EACH_ENTRY(cdc, &ctx->codecs, head) {
+ if (cdc->handle == cdc_handle)
+ return cdc;
+ }
+
+ return NULL;
+}
+
+static struct vrend_video_buffer *get_video_buffer(
+ struct vrend_video_context *ctx,
+ uint32_t buf_handle)
+{
+ struct vrend_video_buffer *buf;
+
+ LIST_FOR_EACH_ENTRY(buf, &ctx->buffers, head) {
+ if (buf->handle == buf_handle)
+ return buf;
+ }
+
+ return NULL;
+}
+
+
+static int sync_dmabuf_to_video_buffer(struct vrend_video_buffer *buf,
+ const struct virgl_video_dma_buf *dmabuf)
+{
+ if (!(dmabuf->flags & VIRGL_VIDEO_DMABUF_READ_ONLY)) {
+ vrend_printf("%s: dmabuf is not readable\n", __func__);
+ return -1;
+ }
+
+ for (unsigned i = 0; i < dmabuf->num_planes && i < buf->num_planes; i++) {
+ struct vrend_video_plane *plane = &buf->planes[i];
+ struct vrend_resource *res;
+
+ res = vrend_renderer_ctx_res_lookup(buf->ctx->ctx, plane->res_handle);
+ if (!res) {
+ vrend_printf("%s: res %d not found\n", __func__, plane->res_handle);
+ continue;
+ }
+
+ /* dmabuf -> eglimage */
+ if (EGL_NO_IMAGE_KHR == plane->egl_image) {
+ EGLint img_attrs[16] = {
+ EGL_LINUX_DRM_FOURCC_EXT, dmabuf->planes[i].drm_format,
+ EGL_WIDTH, dmabuf->width / (i + 1),
+ EGL_HEIGHT, dmabuf->height / (i + 1),
+ EGL_DMA_BUF_PLANE0_FD_EXT, dmabuf->planes[i].fd,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, dmabuf->planes[i].offset,
+ EGL_DMA_BUF_PLANE0_PITCH_EXT, dmabuf->planes[i].pitch,
+ EGL_NONE
+ };
+
+ plane->egl_image = eglCreateImageKHR(eglGetCurrentDisplay(),
+ EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, img_attrs);
+ }
+
+ if (EGL_NO_IMAGE_KHR == plane->egl_image) {
+ vrend_printf("%s: create egl image failed\n", __func__);
+ continue;
+ }
+
+ /* eglimage -> texture */
+ glBindTexture(GL_TEXTURE_2D, plane->texture);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D,
+ (GLeglImageOES)(plane->egl_image));
+
+ /* texture -> framebuffer */
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, plane->framebuffer);
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, plane->texture, 0);
+
+ /* framebuffer -> vrend_video_buffer.planes[i] */
+ glBindTexture(GL_TEXTURE_2D, res->id);
+ glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0,
+ res->base.width0, res->base.height0);
+ }
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+
+ return 0;
+}
+
+static int sync_video_buffer_to_dmabuf(struct vrend_video_buffer *buf,
+ const struct virgl_video_dma_buf *dmabuf)
+{
+ if (!(dmabuf->flags & VIRGL_VIDEO_DMABUF_WRITE_ONLY)) {
+ vrend_printf("%s: dmabuf is not writable\n", __func__);
+ return -1;
+ }
+
+ for (unsigned i = 0; i < dmabuf->num_planes && i < buf->num_planes; i++) {
+ struct vrend_video_plane *plane = &buf->planes[i];
+ struct vrend_resource *res;
+
+ res = vrend_renderer_ctx_res_lookup(buf->ctx->ctx, plane->res_handle);
+ if (!res) {
+ vrend_printf("%s: res %d not found\n", __func__, plane->res_handle);
+ continue;
+ }
+
+ /* dmabuf -> eglimage */
+ if (EGL_NO_IMAGE_KHR == plane->egl_image) {
+ EGLint img_attrs[16] = {
+ EGL_LINUX_DRM_FOURCC_EXT, dmabuf->planes[i].drm_format,
+ EGL_WIDTH, dmabuf->width / (i + 1),
+ EGL_HEIGHT, dmabuf->height / (i + 1),
+ EGL_DMA_BUF_PLANE0_FD_EXT, dmabuf->planes[i].fd,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, dmabuf->planes[i].offset,
+ EGL_DMA_BUF_PLANE0_PITCH_EXT, dmabuf->planes[i].pitch,
+ EGL_NONE
+ };
+
+ plane->egl_image = eglCreateImageKHR(eglGetCurrentDisplay(),
+ EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, img_attrs);
+ }
+
+ if (EGL_NO_IMAGE_KHR == plane->egl_image) {
+ vrend_printf("%s: create egl image failed\n", __func__);
+ continue;
+ }
+
+ /* eglimage -> texture */
+ glBindTexture(GL_TEXTURE_2D, plane->texture);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D,
+ (GLeglImageOES)(plane->egl_image));
+
+ /* vrend_video_buffer.planes[i] -> framebuffer */
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, plane->framebuffer);
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, res->id, 0);
+
+ /* framebuffer -> texture */
+ glBindTexture(GL_TEXTURE_2D, plane->texture);
+ glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0,
+ res->base.width0, res->base.height0);
+
+ }
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+
+ return 0;
+}
+
+
+static void vrend_video_decode_completed(
+ struct virgl_video_codec *codec,
+ const struct virgl_video_dma_buf *dmabuf)
+{
+ struct vrend_video_buffer *buf = vrend_video_buffer(dmabuf->buf);
+
+ (void)codec;
+
+ sync_dmabuf_to_video_buffer(buf, dmabuf);
+}
+
+
+static void vrend_video_enocde_upload_picture(
+ struct virgl_video_codec *codec,
+ const struct virgl_video_dma_buf *dmabuf)
+{
+ struct vrend_video_buffer *buf = vrend_video_buffer(dmabuf->buf);
+
+ (void)codec;
+
+ sync_video_buffer_to_dmabuf(buf, dmabuf);
+}
+
+static void vrend_video_encode_completed(
+ struct virgl_video_codec *codec,
+ const struct virgl_video_dma_buf *src_buf,
+ const struct virgl_video_dma_buf *ref_buf,
+ unsigned num_coded_bufs,
+ const void * const *coded_bufs,
+ const unsigned *coded_sizes)
+{
+ void *buf;
+ unsigned i, size, data_size;
+ struct virgl_video_encode_feedback feedback;
+ struct vrend_video_codec *cdc = vrend_video_codec(codec);
+
+ (void)src_buf;
+ (void)ref_buf;
+
+ if (!cdc->dest_res || !cdc->feed_res)
+ return;
+
+ memset(&feedback, 0, sizeof(feedback));
+
+ /* sync coded data to guest */
+ if (has_bit(cdc->dest_res->storage_bits, VREND_STORAGE_GL_BUFFER)) {
+ glBindBufferARB(cdc->dest_res->target, cdc->dest_res->id);
+ buf = glMapBufferRange(cdc->dest_res->target, 0,
+ cdc->dest_res->base.width0, GL_MAP_WRITE_BIT);
+ for (i = 0, data_size = 0; i < num_coded_bufs &&
+ data_size < cdc->dest_res->base.width0; i++) {
+ size = MIN(cdc->dest_res->base.width0 - data_size, coded_sizes[i]);
+ memcpy((uint8_t *)buf + data_size, coded_bufs[i], size);
+ vrend_write_to_iovec(cdc->dest_res->iov, cdc->dest_res->num_iovs,
+ data_size, coded_bufs[i], size);
+ data_size += size;
+ }
+ glUnmapBuffer(cdc->dest_res->target);
+ glBindBufferARB(cdc->dest_res->target, 0);
+ feedback.stat = VIRGL_VIDEO_ENCODE_STAT_SUCCESS;
+ feedback.coded_size = data_size;
+ } else {
+ vrend_printf("unexcepted coded res type\n");
+ feedback.stat = VIRGL_VIDEO_ENCODE_STAT_FAILURE;
+ feedback.coded_size = 0;
+ }
+
+ /* send feedback */
+ vrend_write_to_iovec(cdc->feed_res->iov, cdc->feed_res->num_iovs,
+ 0, (char *)(&feedback),
+ MIN(cdc->feed_res->base.width0, sizeof(feedback)));
+
+ cdc->dest_res = NULL;
+ cdc->feed_res = NULL;
+}
+
+static struct virgl_video_callbacks video_callbacks = {
+ .decode_completed = vrend_video_decode_completed,
+ .encode_upload_picture = vrend_video_enocde_upload_picture,
+ .encode_completed = vrend_video_encode_completed,
+};
+
+int vrend_video_init(int drm_fd)
+{
+ if (drm_fd < 0)
+ return -1;
+
+ return virgl_video_init(drm_fd, &video_callbacks, 0);
+}
+
+void vrend_video_fini(void)
+{
+ virgl_video_destroy();
+}
+
+int vrend_video_fill_caps(union virgl_caps *caps)
+{
+ return virgl_video_fill_caps(caps);
+}
+
+int vrend_video_create_codec(struct vrend_video_context *ctx,
+ uint32_t handle,
+ uint32_t profile,
+ uint32_t entrypoint,
+ uint32_t chroma_format,
+ uint32_t level,
+ uint32_t width,
+ uint32_t height,
+ uint32_t max_ref,
+ uint32_t flags)
+{
+ struct vrend_video_codec *cdc = get_video_codec(ctx, handle);
+ struct virgl_video_create_codec_args args;
+
+ if (cdc)
+ return 0;
+
+ if (profile <= PIPE_VIDEO_PROFILE_UNKNOWN ||
+ profile >= PIPE_VIDEO_PROFILE_MAX)
+ return -1;
+
+ if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_UNKNOWN ||
+ entrypoint > PIPE_VIDEO_ENTRYPOINT_ENCODE)
+ return -1;
+
+ if (chroma_format >= PIPE_VIDEO_CHROMA_FORMAT_NONE)
+ return -1;
+
+ if (!width || !height)
+ return -1;
+
+ cdc = (struct vrend_video_codec *)calloc(1, sizeof(*cdc));
+ if (!cdc)
+ return -1;
+
+ args.profile = profile;
+ args.entrypoint = entrypoint;
+ args.chroma_format = chroma_format;
+ args.level = level;
+ args.width = width;
+ args.height = height;
+ args.max_references = max_ref;
+ args.flags = flags;
+ args.opaque = cdc;
+ cdc->codec = virgl_video_create_codec(&args);
+ if (!cdc->codec) {
+ free(cdc);
+ return -1;
+ }
+
+ cdc->handle = handle;
+ cdc->ctx = ctx;
+ list_add(&cdc->head, &ctx->codecs);
+
+ return 0;
+}
+
+static void destroy_video_codec(struct vrend_video_codec *cdc)
+{
+ if (cdc) {
+ list_del(&cdc->head);
+ virgl_video_destroy_codec(cdc->codec);
+ free(cdc);
+ }
+}
+
+void vrend_video_destroy_codec(struct vrend_video_context *ctx,
+ uint32_t handle)
+{
+ struct vrend_video_codec *cdc = get_video_codec(ctx, handle);
+
+ destroy_video_codec(cdc);
+}
+
+int vrend_video_create_buffer(struct vrend_video_context *ctx,
+ uint32_t handle,
+ uint32_t format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t *res_handles,
+ unsigned int num_res)
+{
+ unsigned i;
+ struct vrend_video_plane *plane;
+ struct vrend_video_buffer *buf = get_video_buffer(ctx, handle);
+ struct virgl_video_create_buffer_args args;
+
+ if (buf)
+ return 0;
+
+ if (format <= PIPE_FORMAT_NONE || format >= PIPE_FORMAT_COUNT)
+ return -1;
+
+ if (!width || !height || !res_handles || !num_res)
+ return -1;
+
+ buf = (struct vrend_video_buffer *)calloc(1, sizeof(*buf));
+ if (!buf)
+ return -1;
+
+ args.format = format;
+ args.width = width;
+ args.height = height;
+ args.interlaced = 0;
+ args.opaque = buf;
+ buf->buffer = virgl_video_create_buffer(&args);
+ if (!buf->buffer) {
+ free(buf);
+ return -1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(buf->planes); i++)
+ buf->planes[i].egl_image = EGL_NO_IMAGE_KHR;
+
+ for (i = 0, buf->num_planes = 0;
+ i < num_res && buf->num_planes < ARRAY_SIZE(buf->planes); i++) {
+
+ if (!res_handles[i])
+ continue;
+
+ plane = &buf->planes[buf->num_planes++];
+ plane->res_handle = res_handles[i];
+ glGenFramebuffers(1, &plane->framebuffer);
+ glGenTextures(1, &plane->texture);
+ glBindTexture(GL_TEXTURE_2D, plane->texture);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glBindTexture(GL_TEXTURE_2D, 0);
+ }
+
+ buf->handle = handle;
+ buf->ctx = ctx;
+ list_add(&buf->head, &ctx->buffers);
+
+ return 0;
+}
+
+static void destroy_video_buffer(struct vrend_video_buffer *buf)
+{
+ unsigned i;
+ struct vrend_video_plane *plane;
+
+ if (!buf)
+ return;
+
+ list_del(&buf->head);
+
+ for (i = 0; i < buf->num_planes; i++) {
+ plane = &buf->planes[i];
+
+ glDeleteTextures(1, &plane->texture);
+ glDeleteFramebuffers(1, &plane->framebuffer);
+ if (plane->egl_image == EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(eglGetCurrentDisplay(), plane->egl_image);
+ }
+
+ virgl_video_destroy_buffer(buf->buffer);
+
+ free(buf);
+}
+
+void vrend_video_destroy_buffer(struct vrend_video_context *ctx,
+ uint32_t handle)
+{
+ struct vrend_video_buffer *buf = get_video_buffer(ctx, handle);
+
+ destroy_video_buffer(buf);
+}
+
+struct vrend_video_context *vrend_video_create_context(struct vrend_context *ctx)
+{
+ struct vrend_video_context *vctx;
+
+ vctx = (struct vrend_video_context *)calloc(1, sizeof(*vctx));
+ if (vctx) {
+ vctx->ctx = ctx;
+ list_inithead(&vctx->codecs);
+ list_inithead(&vctx->buffers);
+ }
+
+ return vctx;
+}
+
+void vrend_video_destroy_context(struct vrend_video_context *ctx)
+{
+ struct vrend_video_codec *vcdc, *vcdc_tmp;
+ struct vrend_video_buffer *vbuf, *vbuf_tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(vcdc, vcdc_tmp, &ctx->codecs, head)
+ destroy_video_codec(vcdc);
+
+ LIST_FOR_EACH_ENTRY_SAFE(vbuf, vbuf_tmp, &ctx->buffers, head)
+ destroy_video_buffer(vbuf);
+
+ free(ctx);
+}
+
+int vrend_video_begin_frame(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t tgt_handle)
+{
+ struct vrend_video_codec *cdc = get_video_codec(ctx, cdc_handle);
+ struct vrend_video_buffer *tgt = get_video_buffer(ctx, tgt_handle);
+
+ if (!cdc || !tgt)
+ return -1;
+
+ return virgl_video_begin_frame(cdc->codec, tgt->buffer);
+}
+
+static void modify_h264_picture_desc(struct vrend_video_codec *cdc,
+ struct vrend_video_buffer *tgt,
+ struct virgl_h264_picture_desc *desc)
+{
+ unsigned i;
+ struct vrend_video_buffer *vbuf;
+
+ (void)tgt;
+
+ for (i = 0; i < ARRAY_SIZE(desc->buffer_id); i++) {
+ vbuf = get_video_buffer(cdc->ctx, desc->buffer_id[i]);
+ desc->buffer_id[i] = virgl_video_buffer_id(vbuf ? vbuf->buffer : NULL);
+ }
+}
+
+static void modify_h265_picture_desc(struct vrend_video_codec *cdc,
+ struct vrend_video_buffer *tgt,
+ struct virgl_h265_picture_desc *desc)
+{
+ unsigned i;
+ struct vrend_video_buffer *vbuf;
+
+ (void)tgt;
+
+ for (i = 0; i < ARRAY_SIZE(desc->ref); i++) {
+ vbuf = get_video_buffer(cdc->ctx, desc->ref[i]);
+ desc->ref[i] = virgl_video_buffer_id(vbuf ? vbuf->buffer : NULL);
+ }
+}
+
+static void modify_picture_desc(struct vrend_video_codec *cdc,
+ struct vrend_video_buffer *tgt,
+ union virgl_picture_desc *desc)
+{
+ switch(virgl_video_codec_profile(cdc->codec)) {
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_CONSTRAINED_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_EXTENDED:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH10:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH422:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH444:
+ modify_h264_picture_desc(cdc, tgt, &desc->h264);
+ break;
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_10:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_STILL:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_12:
+ case PIPE_VIDEO_PROFILE_HEVC_MAIN_444:
+ modify_h265_picture_desc(cdc, tgt, &desc->h265);
+ break;
+ default:
+ break;
+ }
+}
+
+int vrend_video_decode_bitstream(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t tgt_handle,
+ uint32_t desc_handle,
+ unsigned num_buffers,
+ const uint32_t *buffer_handles,
+ const uint32_t *buffer_sizes)
+{
+ int err = -1;
+ unsigned i, num_bs, *bs_sizes = NULL;
+ void **bs_buffers = NULL;
+ struct vrend_resource *res;
+ struct vrend_video_codec *cdc = get_video_codec(ctx, cdc_handle);
+ struct vrend_video_buffer *tgt = get_video_buffer(ctx, tgt_handle);
+ union virgl_picture_desc desc;
+
+ if (!cdc || !tgt)
+ return -1;
+
+ bs_buffers = calloc(num_buffers, sizeof(void *));
+ if (!bs_buffers) {
+ vrend_printf("%s: alloc bs_buffers failed\n", __func__);
+ return -1;
+ }
+
+ bs_sizes = calloc(num_buffers, sizeof(unsigned));
+ if (!bs_sizes) {
+ vrend_printf("%s: alloc bs_sizes failed\n", __func__);
+ goto err;
+ }
+
+ for (i = 0, num_bs = 0; i < num_buffers; i++) {
+ res = vrend_renderer_ctx_res_lookup(ctx->ctx, buffer_handles[i]);
+ if (!res || !res->ptr) {
+ vrend_printf("%s: bs res %d invalid or not found",
+ __func__, buffer_handles[i]);
+ continue;
+ }
+
+ vrend_read_from_iovec(res->iov, res->num_iovs, 0,
+ res->ptr, buffer_sizes[i]);
+ bs_buffers[num_bs] = res->ptr;
+ bs_sizes[num_bs] = buffer_sizes[i];
+ num_bs++;
+ }
+
+ res = vrend_renderer_ctx_res_lookup(ctx->ctx, desc_handle);
+ if (!res) {
+ vrend_printf("%s: desc res %d not found\n", __func__, desc_handle);
+ goto err;
+ }
+ memset(&desc, 0, sizeof(desc));
+ vrend_read_from_iovec(res->iov, res->num_iovs, 0, (char *)(&desc),
+ MIN(res->base.width0, sizeof(desc)));
+ modify_picture_desc(cdc, tgt, &desc);
+
+ err = virgl_video_decode_bitstream(cdc->codec, tgt->buffer, &desc,
+ num_bs, (const void * const *)bs_buffers, bs_sizes);
+
+err:
+ free(bs_buffers);
+ free(bs_sizes);
+
+ return err;
+}
+
+int vrend_video_encode_bitstream(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t src_handle,
+ uint32_t dest_handle,
+ uint32_t desc_handle,
+ uint32_t feed_handle)
+{
+ union virgl_picture_desc desc;
+ struct vrend_resource *dest_res, *desc_res, *feed_res;
+ struct vrend_video_codec *cdc = get_video_codec(ctx, cdc_handle);
+ struct vrend_video_buffer *src = get_video_buffer(ctx, src_handle);
+
+ if (!cdc || !src)
+ return -1;
+
+ /* Feedback resource */
+ feed_res = vrend_renderer_ctx_res_lookup(ctx->ctx, feed_handle);
+ if (!feed_res) {
+ vrend_printf("%s: feedback res %d not found\n", __func__, feed_handle);
+ return -1;
+ }
+
+ /* Picture descriptor resource */
+ desc_res = vrend_renderer_ctx_res_lookup(ctx->ctx, desc_handle);
+ if (!desc_res) {
+ vrend_printf("%s: desc res %d not found\n", __func__, desc_handle);
+ return -1;
+ }
+ memset(&desc, 0, sizeof(desc));
+ vrend_read_from_iovec(desc_res->iov, desc_res->num_iovs, 0, (char *)(&desc),
+ MIN(desc_res->base.width0, sizeof(desc)));
+
+ /* Destination buffer resource. */
+ dest_res = vrend_renderer_ctx_res_lookup(ctx->ctx, dest_handle);
+ if (!dest_res) {
+ vrend_printf("%s: dest res %d not found\n", __func__, dest_handle);
+ return -1;
+ }
+
+ cdc->feed_res = feed_res;
+ cdc->dest_res = dest_res;
+
+ return virgl_video_encode_bitstream(cdc->codec, src->buffer, &desc);
+}
+
+int vrend_video_end_frame(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t tgt_handle)
+{
+ struct vrend_video_codec *cdc = get_video_codec(ctx, cdc_handle);
+ struct vrend_video_buffer *tgt = get_video_buffer(ctx, tgt_handle);
+
+ if (!cdc || !tgt)
+ return -1;
+
+ return virgl_video_end_frame(cdc->codec, tgt->buffer);
+}
+
diff --git a/src/vrend_video.h b/src/vrend_video.h
new file mode 100644
index 00000000..7b61db12
--- /dev/null
+++ b/src/vrend_video.h
@@ -0,0 +1,95 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2022 Kylin Software Co., Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * The video interface of the vrend renderer.
+ *
+ * These interfaces are mainly called by the vrend deocode submodule
+ * to process the corresponding virgl context video command.
+ *
+ * @author Feng Jiang <jiangfeng@kylinos.cn>
+ */
+
+#ifndef VREND_VIDEO_H
+#define VREND_VIDEO_H
+
+#include <virgl_hw.h>
+
+#define VREND_VIDEO_BUFFER_PLANE_NUM 3
+
+struct vrend_video_context;
+
+int vrend_video_init(int drm_fd);
+void vrend_video_fini(void);
+
+int vrend_video_fill_caps(union virgl_caps *caps);
+
+struct vrend_video_context *vrend_video_create_context(struct vrend_context *ctx);
+void vrend_video_destroy_context(struct vrend_video_context *ctx);
+
+int vrend_video_create_codec(struct vrend_video_context *ctx,
+ uint32_t handle,
+ uint32_t profile,
+ uint32_t entrypoint,
+ uint32_t chroma_format,
+ uint32_t level,
+ uint32_t width,
+ uint32_t height,
+ uint32_t max_ref,
+ uint32_t flags);
+void vrend_video_destroy_codec(struct vrend_video_context *ctx,
+ uint32_t handle);
+
+int vrend_video_create_buffer(struct vrend_video_context *ctx,
+ uint32_t handle,
+ uint32_t format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t *res_handles,
+ unsigned int num_res);
+void vrend_video_destroy_buffer(struct vrend_video_context *ctx,
+ uint32_t handle);
+
+int vrend_video_begin_frame(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t tgt_handle);
+int vrend_video_decode_bitstream(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t tgt_handle,
+ uint32_t desc_handle,
+ unsigned num_buffers,
+ const uint32_t *buffer_handles,
+ const uint32_t *buffer_sizes);
+int vrend_video_encode_bitstream(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t src_handle,
+ uint32_t dest_handle,
+ uint32_t desc_handle,
+ uint32_t feed_handle);
+int vrend_video_end_frame(struct vrend_video_context *ctx,
+ uint32_t cdc_handle,
+ uint32_t tgt_handle);
+
+#endif /* VREND_VIDEO_H */
diff --git a/src/vrend_winsys.c b/src/vrend_winsys.c
index 669af818..6a73b7fd 100644
--- a/src/vrend_winsys.c
+++ b/src/vrend_winsys.c
@@ -33,7 +33,8 @@
enum {
CONTEXT_NONE,
CONTEXT_EGL,
- CONTEXT_GLX
+ CONTEXT_GLX,
+ CONTEXT_EGL_EXTERNAL
};
static int use_context = CONTEXT_NONE;
@@ -102,6 +103,10 @@ void vrend_winsys_cleanup(void)
virgl_gbm_fini(gbm);
gbm = NULL;
}
+ } else if (use_context == CONTEXT_EGL_EXTERNAL) {
+ free(egl);
+ egl = NULL;
+ use_context = CONTEXT_NONE;
}
#endif
#ifdef HAVE_EPOXY_GLX_H
@@ -113,6 +118,23 @@ void vrend_winsys_cleanup(void)
#endif
}
+int vrend_winsys_init_external(void *egl_display)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ egl = virgl_egl_init_external(egl_display);
+ if (!egl)
+ return -1;
+
+ use_context = CONTEXT_EGL_EXTERNAL;
+#else
+ (void)egl_display;
+ vrend_printf( "EGL is not supported on this platform\n");
+ return -1;
+#endif
+
+ return 0;
+}
+
virgl_renderer_gl_context vrend_winsys_create_context(struct virgl_gl_ctx_param *param)
{
#ifdef HAVE_EPOXY_EGL_H
@@ -144,15 +166,24 @@ void vrend_winsys_destroy_context(virgl_renderer_gl_context ctx)
int vrend_winsys_make_context_current(virgl_renderer_gl_context ctx)
{
+ int ret = -1;
#ifdef HAVE_EPOXY_EGL_H
- if (use_context == CONTEXT_EGL)
- return virgl_egl_make_context_current(egl, ctx);
+ if (use_context == CONTEXT_EGL) {
+ ret = virgl_egl_make_context_current(egl, ctx);
+ if (ret)
+ vrend_printf("%s: Error switching context: %s\n",
+ __func__, virgl_egl_error_string(eglGetError()));
+ }
#endif
#ifdef HAVE_EPOXY_GLX_H
- if (use_context == CONTEXT_GLX)
- return virgl_glx_make_context_current(glx_info, ctx);
+ if (use_context == CONTEXT_GLX) {
+ ret = virgl_glx_make_context_current(glx_info, ctx);
+ if (ret)
+ vrend_printf("%s: Error switching context\n", __func__);
+ }
#endif
- return -1;
+ assert(!ret && "Failed to switch GL context");
+ return ret;
}
int vrend_winsys_has_gl_colorspace(void)
@@ -164,7 +195,8 @@ int vrend_winsys_has_gl_colorspace(void)
#endif
return use_context == CONTEXT_NONE ||
use_context == CONTEXT_GLX ||
- (use_context == CONTEXT_EGL && egl_colorspace);
+ (use_context == CONTEXT_EGL && egl_colorspace) ||
+ (use_context == CONTEXT_EGL_EXTERNAL && egl_colorspace);
}
int vrend_winsys_get_fourcc_for_texture(uint32_t tex_id, uint32_t format, int *fourcc)
diff --git a/src/vrend_winsys.h b/src/vrend_winsys.h
index 17507ffb..5e605403 100644
--- a/src/vrend_winsys.h
+++ b/src/vrend_winsys.h
@@ -48,6 +48,8 @@ extern struct virgl_gbm *gbm;
int vrend_winsys_init(uint32_t flags, int preferred_fd);
void vrend_winsys_cleanup(void);
+int vrend_winsys_init_external(void *egl_display);
+
virgl_renderer_gl_context vrend_winsys_create_context(struct virgl_gl_ctx_param *param);
void vrend_winsys_destroy_context(virgl_renderer_gl_context ctx);
int vrend_winsys_make_context_current(virgl_renderer_gl_context ctx);
diff --git a/src/vrend_winsys_egl.c b/src/vrend_winsys_egl.c
index 4dcc668f..4b38d5ea 100644
--- a/src/vrend_winsys_egl.c
+++ b/src/vrend_winsys_egl.c
@@ -33,7 +33,9 @@
#define EGL_EGLEXT_PROTOTYPES
#include <errno.h>
#include <fcntl.h>
+#include <poll.h>
#include <stdbool.h>
+#include <unistd.h>
#include <xf86drm.h>
#include "util/u_memory.h"
@@ -140,10 +142,7 @@ static bool virgl_egl_get_interface(struct egl_funcs *funcs)
assert(funcs);
- if (virgl_egl_has_extension_in_string(client_extensions, "EGL_KHR_platform_base")) {
- funcs->eglGetPlatformDisplay =
- (PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplay");
- } else if (virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_base")) {
+ if (virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_base")) {
funcs->eglGetPlatformDisplay =
(PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplayEXT");
}
@@ -310,20 +309,7 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
/* Make -Wdangling-else happy. */
} else /* Fallback to surfaceless. */
#endif
- if (virgl_egl_has_extension_in_string(client_extensions, "EGL_KHR_platform_base")) {
- PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display =
- (PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplay");
-
- if (!get_platform_display)
- goto fail;
-
- if (surfaceless) {
- egl->egl_display = get_platform_display (EGL_PLATFORM_SURFACELESS_MESA,
- EGL_DEFAULT_DISPLAY, NULL);
- } else
- egl->egl_display = get_platform_display (EGL_PLATFORM_GBM_KHR,
- (EGLNativeDisplayType)egl->gbm->device, NULL);
- } else if (virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_base")) {
+ if (virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_base")) {
PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display =
(PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplayEXT");
@@ -419,6 +405,37 @@ void virgl_egl_destroy(struct virgl_egl *egl)
free(egl);
}
+struct virgl_egl *virgl_egl_init_external(EGLDisplay egl_display)
+{
+ const char *extensions;
+ struct virgl_egl *egl;
+
+ egl = calloc(1, sizeof(struct virgl_egl));
+ if (!egl)
+ return NULL;
+
+ egl->egl_display = egl_display;
+
+ extensions = eglQueryString(egl->egl_display, EGL_EXTENSIONS);
+#ifdef VIRGL_EGL_DEBUG
+ vrend_printf( "EGL version: %s\n",
+ eglQueryString(egl->egl_display, EGL_VERSION));
+ vrend_printf( "EGL vendor: %s\n",
+ eglQueryString(egl->egl_display, EGL_VENDOR));
+ vrend_printf( "EGL extensions: %s\n", extensions);
+#endif
+
+ if (virgl_egl_init_extensions(egl, extensions)) {
+ free(egl);
+ return NULL;
+ }
+
+ gbm = virgl_gbm_init(-1);
+ egl->gbm = gbm;
+
+ return egl;
+}
+
virgl_renderer_gl_context virgl_egl_create_context(struct virgl_egl *egl, struct virgl_gl_ctx_param *vparams)
{
EGLContext egl_ctx;
@@ -445,7 +462,7 @@ int virgl_egl_make_context_current(struct virgl_egl *egl, virgl_renderer_gl_cont
EGLContext egl_ctx = (EGLContext)virglctx;
return eglMakeCurrent(egl->egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
- egl_ctx);
+ egl_ctx) ? 0 : -1;
}
virgl_renderer_gl_context virgl_egl_get_current_context(UNUSED struct virgl_egl *egl)
@@ -468,7 +485,7 @@ int virgl_egl_get_fourcc_for_texture(struct virgl_egl *egl, uint32_t tex_id, uin
}
image = eglCreateImageKHR(egl->egl_display, eglGetCurrentContext(), EGL_GL_TEXTURE_2D_KHR,
- (EGLClientBuffer)(unsigned long)tex_id, NULL);
+ (EGLClientBuffer)(uintptr_t)tex_id, NULL);
if (!image)
return EINVAL;
@@ -493,7 +510,7 @@ int virgl_egl_get_fd_for_texture2(struct virgl_egl *egl, uint32_t tex_id, int *f
int ret = EINVAL;
EGLImageKHR image = eglCreateImageKHR(egl->egl_display, eglGetCurrentContext(),
EGL_GL_TEXTURE_2D_KHR,
- (EGLClientBuffer)(unsigned long)tex_id, NULL);
+ (EGLClientBuffer)(uintptr_t)tex_id, NULL);
if (!image)
return EINVAL;
if (!has_bit(egl->extension_bits, EGL_MESA_IMAGE_DMA_BUF_EXPORT))
@@ -518,7 +535,7 @@ int virgl_egl_get_fd_for_texture(struct virgl_egl *egl, uint32_t tex_id, int *fd
EGLBoolean success;
int ret;
image = eglCreateImageKHR(egl->egl_display, eglGetCurrentContext(), EGL_GL_TEXTURE_2D_KHR,
- (EGLClientBuffer)(unsigned long)tex_id, NULL);
+ (EGLClientBuffer)(uintptr_t)tex_id, NULL);
if (!image)
return EINVAL;
@@ -719,13 +736,38 @@ void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence) {
eglDestroySyncKHR(egl->egl_display, fence);
}
-bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, uint64_t timeout)
+bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, bool blocking)
{
- EGLint ret = eglClientWaitSyncKHR(egl->egl_display, fence, 0, timeout);
- if (ret == EGL_FALSE) {
- vrend_printf("wait sync failed\n");
+ /* attempt to poll the native fence fd instead of eglClientWaitSyncKHR() to
+ * avoid Mesa's eglapi global-display-lock synchronizing vrend's sync_thread.
+ */
+ int fd = -1;
+ if (!virgl_egl_export_fence(egl, fence, &fd)) {
+ EGLint egl_result = eglClientWaitSyncKHR(egl->egl_display, fence, 0,
+ blocking ? EGL_FOREVER_KHR : 0);
+ if (egl_result == EGL_FALSE)
+ vrend_printf("wait sync failed\n");
+ return egl_result != EGL_TIMEOUT_EXPIRED_KHR;
}
- return ret != EGL_TIMEOUT_EXPIRED_KHR;
+ assert(fd >= 0);
+
+ int ret;
+ struct pollfd pfd = {
+ .fd = fd,
+ .events = POLLIN,
+ };
+ do {
+ ret = poll(&pfd, 1, blocking ? -1 : 0);
+ if (ret > 0 && (pfd.revents & (POLLERR | POLLNVAL))) {
+ ret = -1;
+ break;
+ }
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+ close(fd);
+
+ if (ret < 0)
+ vrend_printf("wait sync failed\n");
+ return ret != 0;
}
bool virgl_egl_export_signaled_fence(struct virgl_egl *egl, int *out_fd) {
@@ -741,3 +783,27 @@ bool virgl_egl_different_gpu(struct virgl_egl *egl)
{
return egl->different_gpu;
}
+
+const char *virgl_egl_error_string(EGLint error)
+{
+ switch (error) {
+#define CASE_STR( value ) case value: return #value;
+ CASE_STR( EGL_SUCCESS )
+ CASE_STR( EGL_NOT_INITIALIZED )
+ CASE_STR( EGL_BAD_ACCESS )
+ CASE_STR( EGL_BAD_ALLOC )
+ CASE_STR( EGL_BAD_ATTRIBUTE )
+ CASE_STR( EGL_BAD_CONTEXT )
+ CASE_STR( EGL_BAD_CONFIG )
+ CASE_STR( EGL_BAD_CURRENT_SURFACE )
+ CASE_STR( EGL_BAD_DISPLAY )
+ CASE_STR( EGL_BAD_SURFACE )
+ CASE_STR( EGL_BAD_MATCH )
+ CASE_STR( EGL_BAD_PARAMETER )
+ CASE_STR( EGL_BAD_NATIVE_PIXMAP )
+ CASE_STR( EGL_BAD_NATIVE_WINDOW )
+ CASE_STR( EGL_CONTEXT_LOST )
+#undef CASE_STR
+ default: return "Unknown error";
+ }
+}
diff --git a/src/vrend_winsys_egl.h b/src/vrend_winsys_egl.h
index e8dcf854..132e2154 100644
--- a/src/vrend_winsys_egl.h
+++ b/src/vrend_winsys_egl.h
@@ -37,6 +37,8 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
void virgl_egl_destroy(struct virgl_egl *egl);
+struct virgl_egl *virgl_egl_init_external(EGLDisplay egl_display);
+
virgl_renderer_gl_context virgl_egl_create_context(struct virgl_egl *egl,
struct virgl_gl_ctx_param *vparams);
@@ -73,8 +75,9 @@ void *virgl_egl_aux_plane_image_from_gbm_bo(struct virgl_egl *egl, struct gbm_bo
bool virgl_egl_supports_fences(struct virgl_egl *egl);
EGLSyncKHR virgl_egl_fence_create(struct virgl_egl *egl);
void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence);
-bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, uint64_t timeout);
+bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, bool blocking);
bool virgl_egl_export_signaled_fence(struct virgl_egl *egl, int *out_fd);
bool virgl_egl_export_fence(struct virgl_egl *egl, EGLSyncKHR fence, int *out_fd);
bool virgl_egl_different_gpu(struct virgl_egl *egl);
+const char *virgl_egl_error_string(EGLint error);
#endif
diff --git a/src/vrend_winsys_gbm.c b/src/vrend_winsys_gbm.c
index ead3c268..a92371b4 100644
--- a/src/vrend_winsys_gbm.c
+++ b/src/vrend_winsys_gbm.c
@@ -99,8 +99,8 @@ static const struct planar_layout triplanar_yuv_420_layout = {
static const struct format_conversion conversions[] = {
{ GBM_FORMAT_RGB565, VIRGL_FORMAT_B5G6R5_UNORM },
- { GBM_FORMAT_ABGR8888, VIRGL_FORMAT_B8G8R8A8_UNORM },
- { GBM_FORMAT_XBGR8888, VIRGL_FORMAT_B8G8R8X8_UNORM },
+ { GBM_FORMAT_ARGB8888, VIRGL_FORMAT_B8G8R8A8_UNORM },
+ { GBM_FORMAT_XRGB8888, VIRGL_FORMAT_B8G8R8X8_UNORM },
{ GBM_FORMAT_ABGR2101010, VIRGL_FORMAT_R10G10B10A2_UNORM },
{ GBM_FORMAT_ABGR16161616F, VIRGL_FORMAT_R16G16B16A16_FLOAT },
{ GBM_FORMAT_NV12, VIRGL_FORMAT_NV12 },
diff --git a/src/vrend_winsys_glx.c b/src/vrend_winsys_glx.c
index 5b907ad6..66703749 100644
--- a/src/vrend_winsys_glx.c
+++ b/src/vrend_winsys_glx.c
@@ -100,7 +100,7 @@ void virgl_glx_destroy_context(struct virgl_glx *d, virgl_renderer_gl_context vi
int virgl_glx_make_context_current(struct virgl_glx *d, virgl_renderer_gl_context virglctx)
{
- return glXMakeContextCurrent(d->display, d->pbuffer, d->pbuffer, virglctx);
+ return glXMakeContextCurrent(d->display, d->pbuffer, d->pbuffer, virglctx) ? 0 : -1;
}
uint32_t virgl_glx_query_video_memory(struct virgl_glx *d)
@@ -113,4 +113,4 @@ uint32_t virgl_glx_query_video_memory(struct virgl_glx *d)
}
return video_memory;
-} \ No newline at end of file
+}
diff --git a/tests/fuzzer/meson.build b/tests/fuzzer/meson.build
index 7c9a10da..596f89b5 100644
--- a/tests/fuzzer/meson.build
+++ b/tests/fuzzer/meson.build
@@ -32,10 +32,10 @@ virgl_fuzzer = executable(
dependencies : [libvirglrenderer_dep, gallium_dep, epoxy_dep]
)
-if with_venus
- virgl_venus_fuzzer = executable(
- 'virgl_venus_fuzzer',
- 'virgl_venus_fuzzer.c',
+if with_drm
+ virgl_drm_fuzzer = executable(
+ 'virgl_drm_fuzzer',
+ 'virgl_drm_fuzzer.c',
c_args : [ '-fsanitize=fuzzer' ],
link_args : [ '-fsanitize=fuzzer' ],
dependencies : [libvirglrenderer_dep]
diff --git a/tests/fuzzer/virgl_venus_fuzzer.c b/tests/fuzzer/virgl_drm_fuzzer.c
index eff1ba4c..2a38a4af 100644
--- a/tests/fuzzer/virgl_venus_fuzzer.c
+++ b/tests/fuzzer/virgl_drm_fuzzer.c
@@ -8,7 +8,7 @@
#include <stdint.h>
#include <stdlib.h>
-#include "os/os_misc.h"
+#include "util/macros.h"
#include "virglrenderer.h"
#include "virglrenderer_hw.h"
@@ -31,6 +31,21 @@ fuzz_debug_callback(UNUSED const char *fmt, UNUSED va_list ap)
/* no logging */
}
+static void
+fuzz_write_context_fence(UNUSED void *cookie,
+ UNUSED uint32_t ctx_id,
+ UNUSED uint32_t ring_idx,
+ UNUSED uint64_t fence_id)
+{
+
+}
+
+
+static struct virgl_renderer_callbacks callbacks = {
+ .version = 3,
+ .write_context_fence = fuzz_write_context_fence,
+};
+
static struct fuzz_renderer *
fuzz_renderer_get(void)
{
@@ -38,8 +53,10 @@ fuzz_renderer_get(void)
if (renderer.initialized)
return &renderer;
+ int flags = VIRGL_RENDERER_NO_VIRGL | VIRGL_RENDERER_DRM |
+ VIRGL_RENDERER_ASYNC_FENCE_CB;
int ret =
- virgl_renderer_init(NULL, VIRGL_RENDERER_VENUS | VIRGL_RENDERER_NO_VIRGL, NULL);
+ virgl_renderer_init(NULL, flags, &callbacks);
if (ret)
abort();
@@ -55,8 +72,8 @@ static uint32_t
fuzz_context_create(UNUSED struct fuzz_renderer *renderer)
{
const uint32_t ctx_id = 1;
- const char name[] = "virgl_venus_fuzzer";
- int ret = virgl_renderer_context_create_with_flags(ctx_id, VIRGL_RENDERER_CAPSET_VENUS,
+ const char name[] = "virgl_drm_fuzzer";
+ int ret = virgl_renderer_context_create_with_flags(ctx_id, VIRGL_RENDERER_CAPSET_DRM,
sizeof(name), name);
if (ret)
abort();
@@ -76,6 +93,17 @@ fuzz_context_submit(UNUSED struct fuzz_renderer *renderer,
const uint8_t *data,
size_t size)
{
+ /* We'll not be able to hit some codepaths without shmem buffer setup..
+ * but we'd also like to hit any potential errors that could come from
+ * malicious input before shmem is setup. So run the same input twice,
+ * once before and once after shmem setup.
+ */
+ virgl_renderer_submit_cmd((void *)data, ctx_id, size / 4);
+ virgl_renderer_resource_create_blob(&(struct virgl_renderer_resource_create_blob_args){
+ .res_handle = 1,
+ .ctx_id = ctx_id,
+ .size = 0x1000,
+ });
virgl_renderer_submit_cmd((void *)data, ctx_id, size / 4);
}
diff --git a/tests/fuzzer/virgl_fuzzer.c b/tests/fuzzer/virgl_fuzzer.c
index a368f1e0..af966bbf 100644
--- a/tests/fuzzer/virgl_fuzzer.c
+++ b/tests/fuzzer/virgl_fuzzer.c
@@ -36,6 +36,7 @@
#include <epoxy/egl.h>
+#include "util/macros.h"
#include "virglrenderer.h"
int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
@@ -58,12 +59,13 @@ struct fuzzer_cookie
EGLContext ctx;
};
-static void fuzzer_write_fence(void *opaque, uint32_t fence)
+static void fuzzer_write_fence(UNUSED void *opaque, UNUSED uint32_t fence)
{
}
static virgl_renderer_gl_context fuzzer_create_gl_context(
- void *cookie, int scanout_idx, struct virgl_renderer_gl_ctx_param *param)
+ void *cookie, UNUSED int scanout_idx,
+ struct virgl_renderer_gl_ctx_param *param)
{
struct fuzzer_cookie *cookie_data = cookie;
EGLContext shared = param->shared ? eglGetCurrentContext() : NULL;
@@ -85,7 +87,8 @@ static void fuzzer_destroy_gl_context(void *cookie,
eglDestroyContext(cookie_data->display, ctx);
}
-static int fuzzer_make_current(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx)
+static int fuzzer_make_current(UNUSED void *cookie, UNUSED int scanout_idx,
+ UNUSED virgl_renderer_gl_context ctx)
{
return 0;
}
diff --git a/tests/meson.build b/tests/meson.build
index 01649ebf..96385bc9 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -30,14 +30,20 @@ libvrtest_sources = [
'testvirgl_encode.h',
]
+libvrtest_depends = [
+ libvirgl_dep,
+ gallium_dep,
+ check_dep,
+]
+
+if with_tracing == 'percetto'
+ libvrtest_depends += [percetto_dep]
+endif
+
libvrtest = static_library(
'vrtest',
libvrtest_sources,
- dependencies : [
- libvirgl_dep,
- gallium_dep,
- check_dep
- ]
+ dependencies : libvrtest_depends
)
tests = [
@@ -53,15 +59,34 @@ fuzzy_tests = [
['test_fuzzer_formats', 'test_fuzzer_formats.c'],
]
+test_depends = [
+ libvirglrenderer_dep,
+ check_dep,
+]
+
+if with_tracing == 'percetto'
+ test_depends += [percetto_dep]
+endif
+
foreach t : tests
test_virgl = executable(t[0], t[1], link_with: libvrtest,
- dependencies : [libvirglrenderer_dep, check_dep])
+ dependencies : test_depends)
test(t[0], test_virgl)
endforeach
+
+fuzzytest_depends = [
+ libvirglrenderer_dep,
+ epoxy_dep,
+]
+
+if with_tracing == 'percetto'
+ fuzzytest_depends += [percetto_dep]
+endif
+
foreach t : fuzzy_tests
test_virgl_fuzzy = executable(t[0], t[1], link_with: libvrtest,
- dependencies : [libvirglrenderer_dep, epoxy_dep])
+ dependencies : fuzzytest_depends)
test(t[0], test_virgl)
endforeach
diff --git a/tests/test_fuzzer_formats.c b/tests/test_fuzzer_formats.c
index 154a2e58..7e418342 100644
--- a/tests/test_fuzzer_formats.c
+++ b/tests/test_fuzzer_formats.c
@@ -39,7 +39,6 @@
#include "vrend_winsys_egl.h"
#include "virglrenderer.h"
#include "virgl_protocol.h"
-#include "os/os_misc.h"
#include <epoxy/egl.h>
@@ -958,6 +957,61 @@ static void test_vrend_set_signle_abo_heap_overflow() {
virgl_renderer_submit_cmd((void *) cmd, ctx_id, 0xde);
}
+static void test_vrend_set_shader_images_overflow()
+{
+ uint32_t num_shaders = PIPE_MAX_SHADER_IMAGES + 1;
+ uint32_t size = num_shaders * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 3;
+ uint32_t cmd[size];
+ int i = 0;
+ cmd[i++] = ((size - 1)<< 16) | 0 << 8 | VIRGL_CCMD_SET_SHADER_IMAGES;
+ cmd[i++] = PIPE_SHADER_FRAGMENT;
+ memset(&cmd[i], 0, size - i);
+
+ virgl_renderer_submit_cmd((void *) cmd, ctx_id, size);
+}
+
+/* Test adapted from yaojun8558363@gmail.com:
+ * https://gitlab.freedesktop.org/virgl/virglrenderer/-/issues/250
+*/
+static void test_vrend_3d_resource_overflow() {
+
+ struct virgl_renderer_resource_create_args resource;
+ resource.handle = 0x4c474572;
+ resource.target = PIPE_TEXTURE_2D_ARRAY;
+ resource.format = VIRGL_FORMAT_Z24X8_UNORM;
+ resource.nr_samples = 2;
+ resource.last_level = 0;
+ resource.array_size = 3;
+ resource.bind = VIRGL_BIND_SAMPLER_VIEW;
+ resource.depth = 1;
+ resource.width = 8;
+ resource.height = 4;
+ resource.flags = 0;
+
+ virgl_renderer_resource_create(&resource, NULL, 0);
+ virgl_renderer_ctx_attach_resource(ctx_id, resource.handle);
+
+ uint32_t size = 0x400;
+ uint32_t cmd[size];
+ int i = 0;
+ cmd[i++] = (size - 1) << 16 | 0 << 8 | VIRGL_CCMD_RESOURCE_INLINE_WRITE;
+ cmd[i++] = resource.handle;
+ cmd[i++] = 0; // level
+ cmd[i++] = 0; // usage
+ cmd[i++] = 0; // stride
+ cmd[i++] = 0; // layer_stride
+ cmd[i++] = 0; // x
+ cmd[i++] = 0; // y
+ cmd[i++] = 0; // z
+ cmd[i++] = 8; // w
+ cmd[i++] = 4; // h
+ cmd[i++] = 3; // d
+ memset(&cmd[i], 0, size - i);
+
+ virgl_renderer_submit_cmd((void *) cmd, ctx_id, size);
+}
+
+
int main()
{
initialize_environment();
@@ -980,6 +1034,8 @@ int main()
test_cs_nullpointer_deference();
test_vrend_set_signle_abo_heap_overflow();
+ test_vrend_set_shader_images_overflow();
+ test_vrend_3d_resource_overflow();
virgl_renderer_context_destroy(ctx_id);
virgl_renderer_cleanup(&cookie);
diff --git a/tests/test_virgl_cmd.c b/tests/test_virgl_cmd.c
index d845ef50..4c8d6895 100644
--- a/tests/test_virgl_cmd.c
+++ b/tests/test_virgl_cmd.c
@@ -67,7 +67,7 @@ START_TEST(virgl_test_overlap_obj_id)
}
END_TEST
-#ifdef PIPE_ARCH_LITTLE_ENDIAN
+#if UTIL_ARCH_LITTLE_ENDIAN
static const uint32_t test_green = 0xff00ff00;
#else
static const uint32_t test_green = 0x00ff00ff;
@@ -372,6 +372,15 @@ START_TEST(virgl_test_render_simple)
virgl_encode_bind_shader(&ctx, fs_handle, PIPE_SHADER_FRAGMENT);
}
+ /* link shader */
+ {
+ uint32_t handles[PIPE_SHADER_TYPES];
+ memset(handles, 0, sizeof(handles));
+ handles[PIPE_SHADER_VERTEX] = vs_handle;
+ handles[PIPE_SHADER_FRAGMENT] = fs_handle;
+ virgl_encode_link_shader(&ctx, handles);
+ }
+
/* set blend state */
{
struct pipe_blend_state blend;
diff --git a/tests/test_virgl_strbuf.c b/tests/test_virgl_strbuf.c
index 17b596d9..ed7e1ab8 100644
--- a/tests/test_virgl_strbuf.c
+++ b/tests/test_virgl_strbuf.c
@@ -177,12 +177,29 @@ START_TEST(strbuf_test_appendf_str)
bool ret;
ret = strbuf_alloc(&sb, 1024);
ck_assert_int_eq(ret, true);
+ ck_assert_int_eq(sb.external_buffer, false);
strbuf_appendf(&sb, "%s5", "hello");
ck_assert_str_eq(sb.buf, "hello5");
strbuf_free(&sb);
}
END_TEST
+
+START_TEST(strbuf_test_fixed_string)
+{
+ struct vrend_strbuf sb;
+ bool ret;
+ char buf[1024];
+ ret = strbuf_alloc_fixed(&sb, buf, 1024);
+ ck_assert_int_eq(ret, true);
+ ck_assert_int_eq(sb.external_buffer, true);
+ strbuf_appendf(&sb, "%s5", "hello");
+ ck_assert_str_eq(sb.buf, "hello5");
+ strbuf_free(&sb);
+}
+END_TEST
+
+
static Suite *init_suite(void)
{
Suite *s;
@@ -202,6 +219,7 @@ static Suite *init_suite(void)
tcase_add_test(tc_core, strbuf_test_boundary2);
tcase_add_test(tc_core, strbuf_test_appendf);
tcase_add_test(tc_core, strbuf_test_appendf_str);
+ tcase_add_test(tc_core, strbuf_test_fixed_string);
return s;
}
diff --git a/tests/test_virgl_transfer.c b/tests/test_virgl_transfer.c
index bf7f4381..ad15f755 100644
--- a/tests/test_virgl_transfer.c
+++ b/tests/test_virgl_transfer.c
@@ -879,7 +879,7 @@ START_TEST(virgl_test_copy_transfer_to_staging_without_iov_fails)
virgl_encoder_copy_transfer(&ctx, &dst_res, 0, 0, &box, &src_res, 0, synchronized);
ret = virgl_renderer_submit_cmd(ctx.cbuf->buf, ctx.ctx_id, ctx.cbuf->cdw);
- ck_assert_int_eq(ret, 0);
+ ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(ctx.ctx_id, src_res.handle);
virgl_renderer_ctx_detach_resource(ctx.ctx_id, dst_res.handle);
@@ -952,6 +952,56 @@ START_TEST(virgl_test_transfer_near_res_bounds_with_stride_succeeds)
}
END_TEST
+START_TEST(test_vrend_host_backed_memory_no_data_leak)
+{
+ struct iovec iovs[1];
+ int niovs = 1;
+
+ struct virgl_context ctx = {0};
+
+ int ret = testvirgl_init_ctx_cmdbuf(&ctx);
+
+ struct virgl_renderer_resource_create_args res;
+ res.handle = 0x400;
+ res.target = PIPE_BUFFER;
+ res.format = VIRGL_FORMAT_R8_UNORM;
+ res.nr_samples = 0;
+ res.last_level = 0;
+ res.array_size = 1;
+ res.bind = VIRGL_BIND_CUSTOM;
+ res.depth = 1;
+ res.width = 32;
+ res.height = 1;
+ res.flags = 0;
+
+ uint32_t size = 32;
+ uint8_t* data = calloc(1, size);
+ memset(data, 1, 32);
+ iovs[0].iov_base = data;
+ iovs[0].iov_len = size;
+
+ struct pipe_box box = {0,0,0, size, 1,1};
+
+ virgl_renderer_resource_create(&res, NULL, 0);
+ virgl_renderer_ctx_attach_resource(ctx.ctx_id, res.handle);
+
+ ret = virgl_renderer_transfer_read_iov(res.handle, ctx.ctx_id, 0, 0, 0,
+ (struct virgl_box *)&box, 0, iovs, niovs);
+
+ ck_assert_int_eq(ret, 0);
+
+ for (int i = 0; i < 32; ++i)
+ ck_assert_int_eq(data[i], 0);
+
+ virgl_renderer_ctx_detach_resource(1, res.handle);
+
+ virgl_renderer_resource_unref(res.handle);
+ free(data);
+
+}
+END_TEST
+
+
static Suite *virgl_init_suite(void)
{
Suite *s;
@@ -981,6 +1031,7 @@ static Suite *virgl_init_suite(void)
tcase_add_test(tc_core, virgl_test_transfer_buffer_bad_strides);
tcase_add_test(tc_core, virgl_test_transfer_2d_array_bad_layer_stride);
tcase_add_test(tc_core, virgl_test_transfer_2d_bad_level);
+ tcase_add_test(tc_core, test_vrend_host_backed_memory_no_data_leak);
tcase_add_loop_test(tc_core, virgl_test_transfer_res_read_valid, 0, PIPE_MAX_TEXTURE_TYPES);
tcase_add_loop_test(tc_core, virgl_test_transfer_res_write_valid, 0, PIPE_MAX_TEXTURE_TYPES);
diff --git a/tests/testvirgl_encode.c b/tests/testvirgl_encode.c
index f44a6402..38ad5e15 100644
--- a/tests/testvirgl_encode.c
+++ b/tests/testvirgl_encode.c
@@ -942,6 +942,18 @@ int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id
return 0;
}
+int virgl_encode_link_shader(struct virgl_context *ctx, uint32_t *handles)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LINK_SHADER, 0, VIRGL_LINK_SHADER_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_VERTEX]);
+ virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_FRAGMENT]);
+ virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_GEOMETRY]);
+ virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_CTRL]);
+ virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_EVAL]);
+ virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_COMPUTE]);
+ return 0;
+}
+
int virgl_encode_bind_shader(struct virgl_context *ctx,
uint32_t handle, uint32_t type)
{
diff --git a/tests/testvirgl_encode.h b/tests/testvirgl_encode.h
index c5bd9c10..cd1ab2bf 100644
--- a/tests/testvirgl_encode.h
+++ b/tests/testvirgl_encode.h
@@ -258,6 +258,7 @@ int virgl_encoder_render_condition(struct virgl_context *ctx,
int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id);
int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id);
int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id);
+int virgl_encode_link_shader(struct virgl_context *ctx, uint32_t *handles);
int virgl_encode_bind_shader(struct virgl_context *ctx,
uint32_t handle, uint32_t type);
#endif
diff --git a/virglrenderer.pc.in b/virglrenderer.pc.in
deleted file mode 100644
index a83ee2a5..00000000
--- a/virglrenderer.pc.in
+++ /dev/null
@@ -1,10 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: virglrenderer
-Description: virgl GL renderer
-Version: @PACKAGE_VERSION@
-Cflags: -I${includedir} -I${includedir}/virgl
-Libs: -L${libdir} -lvirglrenderer
diff --git a/vtest/vtest_fuzzer.c b/vtest/vtest_fuzzer.c
index 89b12b29..7505f149 100644
--- a/vtest/vtest_fuzzer.c
+++ b/vtest/vtest_fuzzer.c
@@ -152,7 +152,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
int out_fd = open("/dev/null", O_WRONLY);
struct vtest_buffer buffer;
- buffer.buffer = data;
+ buffer.buffer = (char *)data;
buffer.size = size;
struct vtest_input input;
input.data.buffer = &buffer;
diff --git a/vtest/vtest_protocol.h b/vtest/vtest_protocol.h
index f31b8e47..c48be146 100644
--- a/vtest/vtest_protocol.h
+++ b/vtest/vtest_protocol.h
@@ -153,7 +153,7 @@
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
enum vcmd_param {
- VCMD_PARAM_MAX_SYNC_QUEUE_COUNT = 1,
+ VCMD_PARAM_MAX_TIMELINE_COUNT = 1,
};
#define VCMD_GET_PARAM_SIZE 1
#define VCMD_GET_PARAM_PARAM 0
@@ -217,7 +217,7 @@ enum vcmd_sync_wait_flag {
/* resp poll'able fd */
enum vcmd_submit_cmd2_flag {
- VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE = 1 << 0,
+ VCMD_SUBMIT_CMD2_FLAG_RING_IDX = 1 << 0,
};
struct vcmd_submit_cmd2_batch {
@@ -230,9 +230,8 @@ struct vcmd_submit_cmd2_batch {
uint32_t sync_offset;
uint32_t sync_count;
- /* ignored unless VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE is set */
- uint32_t sync_queue_index;
- uint64_t sync_queue_id;
+ /* ignored unless VCMD_SUBMIT_CMD2_FLAG_RING_IDX is set */
+ uint32_t ring_idx;
};
#define VCMD_SUBMIT_CMD2_BATCH_COUNT 0
#define VCMD_SUBMIT_CMD2_BATCH_FLAGS(n) (1 + 8 * (n) + 0)
@@ -240,9 +239,7 @@ struct vcmd_submit_cmd2_batch {
#define VCMD_SUBMIT_CMD2_BATCH_CMD_SIZE(n) (1 + 8 * (n) + 2)
#define VCMD_SUBMIT_CMD2_BATCH_SYNC_OFFSET(n) (1 + 8 * (n) + 3)
#define VCMD_SUBMIT_CMD2_BATCH_SYNC_COUNT(n) (1 + 8 * (n) + 4)
-#define VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_INDEX(n) (1 + 8 * (n) + 5)
-#define VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_ID_LO(n) (1 + 8 * (n) + 6)
-#define VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_ID_HI(n) (1 + 8 * (n) + 7)
+#define VCMD_SUBMIT_CMD2_BATCH_RING_IDX(n) (1 + 8 * (n) + 5)
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
diff --git a/vtest/vtest_renderer.c b/vtest/vtest_renderer.c
index da314c6f..dc9ba19c 100644
--- a/vtest/vtest_renderer.c
+++ b/vtest/vtest_renderer.c
@@ -53,9 +53,10 @@
#include "util/u_double_list.h"
#include "util/u_math.h"
#include "util/u_memory.h"
+#include "util/u_pointer.h"
#include "util/u_hash_table.h"
-#define VTEST_MAX_SYNC_QUEUE_COUNT 64
+#define VTEST_MAX_TIMELINE_COUNT 64
struct vtest_resource {
struct list_head head;
@@ -75,14 +76,14 @@ struct vtest_sync {
uint64_t value;
};
-struct vtest_sync_queue {
+struct vtest_timeline {
struct list_head submits;
};
-struct vtest_sync_queue_submit {
+struct vtest_timeline_submit {
struct list_head head;
- struct vtest_sync_queue *sync_queue;
+ struct vtest_timeline *timeline;
uint32_t count;
struct vtest_sync **syncs;
@@ -121,7 +122,7 @@ struct vtest_context {
struct util_hash_table *resource_table;
struct util_hash_table *sync_table;
- struct vtest_sync_queue sync_queues[VTEST_MAX_SYNC_QUEUE_COUNT];
+ struct vtest_timeline timelines[VTEST_MAX_TIMELINE_COUNT];
struct list_head sync_waits;
};
@@ -164,16 +165,16 @@ static void vtest_write_implicit_fence(UNUSED void *cookie, uint32_t fence_id_in
renderer->implicit_fence_completed = fence_id_in;
}
-static void vtest_signal_sync_queue(struct vtest_sync_queue *queue,
- struct vtest_sync_queue_submit *to_submit);
+static void vtest_signal_timeline(struct vtest_timeline *timeline,
+ struct vtest_timeline_submit *to_submit);
static void vtest_write_context_fence(UNUSED void *cookie,
UNUSED uint32_t ctx_id,
- UNUSED uint64_t queue_id,
- void *fence_cookie)
+ UNUSED uint32_t ring_idx,
+ uint64_t fence_id)
{
- struct vtest_sync_queue_submit *submit = fence_cookie;
- vtest_signal_sync_queue(submit->sync_queue, submit);
+ struct vtest_timeline_submit *submit = (void*)(uintptr_t)fence_id;
+ vtest_signal_timeline(submit->timeline, submit);
}
static int vtest_get_drm_fd(void *cookie)
@@ -278,7 +279,7 @@ static void vtest_unref_sync(struct vtest_sync *sync)
list_add(&sync->head, &renderer.free_syncs);
}
-static void vtest_free_sync_queue_submit(struct vtest_sync_queue_submit *submit)
+static void vtest_free_timeline_submit(struct vtest_timeline_submit *submit)
{
uint32_t i;
for (i = 0; i < submit->count; i++)
@@ -298,23 +299,17 @@ static void vtest_free_sync_wait(struct vtest_sync_wait *wait)
free(wait);
}
-static unsigned
-u32_hash_func(void *key)
+static uint32_t
+u32_hash_func(const void *key)
{
intptr_t ip = pointer_to_intptr(key);
- return (unsigned)(ip & 0xffffffff);
+ return (uint32_t)(ip & 0xffffffff);
}
-static int
-u32_compare_func(void *key1, void *key2)
+static bool
+u32_equal_func(const void *key1, const void *key2)
{
- if (key1 < key2) {
- return -1;
- } else if (key1 > key2) {
- return 1;
- } else {
- return 0;
- }
+ return key1 == key2;
}
static void
@@ -522,7 +517,7 @@ static struct vtest_context *vtest_new_context(struct vtest_input *input,
}
ctx->resource_table = util_hash_table_create(u32_hash_func,
- u32_compare_func,
+ u32_equal_func,
resource_destroy_func);
if (!ctx->resource_table) {
free(ctx);
@@ -530,7 +525,7 @@ static struct vtest_context *vtest_new_context(struct vtest_input *input,
}
ctx->sync_table = util_hash_table_create(u32_hash_func,
- u32_compare_func,
+ u32_equal_func,
sync_destroy_func);
if (!ctx->sync_table) {
util_hash_table_destroy(ctx->resource_table);
@@ -538,9 +533,9 @@ static struct vtest_context *vtest_new_context(struct vtest_input *input,
return NULL;
}
- for (i = 0; i < VTEST_MAX_SYNC_QUEUE_COUNT; i++) {
- struct vtest_sync_queue *queue = &ctx->sync_queues[i];
- list_inithead(&queue->submits);
+ for (i = 0; i < VTEST_MAX_TIMELINE_COUNT; i++) {
+ struct vtest_timeline *timeline = &ctx->timelines[i];
+ list_inithead(&timeline->submits);
}
list_inithead(&ctx->sync_waits);
@@ -650,13 +645,13 @@ void vtest_destroy_context(struct vtest_context *ctx)
}
list_del(&ctx->head);
- for (i = 0; i < VTEST_MAX_SYNC_QUEUE_COUNT; i++) {
- struct vtest_sync_queue *queue = &ctx->sync_queues[i];
- struct vtest_sync_queue_submit *submit, *submit_tmp;
+ for (i = 0; i < VTEST_MAX_TIMELINE_COUNT; i++) {
+ struct vtest_timeline *timeline = &ctx->timelines[i];
+ struct vtest_timeline_submit *submit, *submit_tmp;
- LIST_FOR_EACH_ENTRY_SAFE(submit, submit_tmp, &queue->submits, head)
- vtest_free_sync_queue_submit(submit);
- list_inithead(&queue->submits);
+ LIST_FOR_EACH_ENTRY_SAFE(submit, submit_tmp, &timeline->submits, head)
+ vtest_free_timeline_submit(submit);
+ list_inithead(&timeline->submits);
}
LIST_FOR_EACH_ENTRY_SAFE(wait, wait_tmp, &ctx->sync_waits, head) {
@@ -782,12 +777,12 @@ int vtest_get_param(UNUSED uint32_t length_dw)
resp_buf[VTEST_CMD_ID] = VCMD_GET_PARAM;
resp = &resp_buf[VTEST_CMD_DATA_START];
switch (param) {
- case VCMD_PARAM_MAX_SYNC_QUEUE_COUNT:
+ case VCMD_PARAM_MAX_TIMELINE_COUNT:
resp[0] = true;
/* TODO until we have a timerfd */
#ifdef HAVE_EVENTFD_H
if (!getenv("VIRGL_DISABLE_MT"))
- resp[1] = VTEST_MAX_SYNC_QUEUE_COUNT;
+ resp[1] = VTEST_MAX_TIMELINE_COUNT;
else
resp[1] = 0;
#else
@@ -908,7 +903,7 @@ int vtest_send_caps2(UNUSED uint32_t length_dw)
goto end;
}
- vtest_block_write(ctx->out_fd, caps_buf, max_size);
+ ret = vtest_block_write(ctx->out_fd, caps_buf, max_size);
if (ret < 0) {
goto end;
}
@@ -942,7 +937,7 @@ int vtest_send_caps(UNUSED uint32_t length_dw)
goto end;
}
- vtest_block_write(ctx->out_fd, caps_buf, max_size);
+ ret = vtest_block_write(ctx->out_fd, caps_buf, max_size);
if (ret < 0) {
goto end;
}
@@ -1180,6 +1175,7 @@ int vtest_resource_create_blob(UNUSED uint32_t length_dw)
fd = -1;
break;
default:
+ vtest_unref_resource(res);
return -EINVAL;
}
@@ -1191,7 +1187,7 @@ int vtest_resource_create_blob(UNUSED uint32_t length_dw)
return report_failed_call("virgl_renderer_resource_create_blob", ret);
}
- /* need dmabuf */
+ /* export blob */
if (args.blob_mem == VIRGL_RENDERER_BLOB_MEM_HOST3D) {
uint32_t fd_type;
ret = virgl_renderer_resource_export_blob(res->res_id, &fd_type, &fd);
@@ -1199,7 +1195,8 @@ int vtest_resource_create_blob(UNUSED uint32_t length_dw)
vtest_unref_resource(res);
return report_failed_call("virgl_renderer_resource_export_blob", ret);
}
- if (fd_type != VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF) {
+ if (fd_type != VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF &&
+ fd_type != VIRGL_RENDERER_BLOB_FD_TYPE_SHM) {
close(fd);
vtest_unref_resource(res);
return report_failed_call("virgl_renderer_resource_export_blob", -EINVAL);
@@ -1675,6 +1672,20 @@ static uint64_t vtest_gettime(uint32_t offset_ms)
return ns + ns_per_ms * offset_ms;
}
+static inline void write_ready(int fd)
+{
+#ifdef __GNUC__
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wunused-result"
+#endif
+ static const uint64_t val = 1;
+ write(fd, &val, sizeof(val));
+#ifdef __GNUC__
+# pragma GCC diagnostic pop
+#endif
+}
+
+
/* TODO this is slow */
static void vtest_signal_sync(struct vtest_sync *sync, uint64_t value)
{
@@ -1718,22 +1729,20 @@ static void vtest_signal_sync(struct vtest_sync *sync, uint64_t value)
}
if (is_ready) {
- const uint64_t val = 1;
-
list_del(&wait->head);
- write(wait->fd, &val, sizeof(val));
+ write_ready(wait->fd);
vtest_free_sync_wait(wait);
}
}
}
}
-static void vtest_signal_sync_queue(struct vtest_sync_queue *queue,
- struct vtest_sync_queue_submit *to_submit)
+static void vtest_signal_timeline(struct vtest_timeline *timeline,
+ struct vtest_timeline_submit *to_submit)
{
- struct vtest_sync_queue_submit *submit, *tmp;
+ struct vtest_timeline_submit *submit, *tmp;
- LIST_FOR_EACH_ENTRY_SAFE(submit, tmp, &queue->submits, head) {
+ LIST_FOR_EACH_ENTRY_SAFE(submit, tmp, &timeline->submits, head) {
uint32_t i;
list_del(&submit->head);
@@ -1969,18 +1978,15 @@ int vtest_sync_wait(uint32_t length_dw)
sync_wait_buf + 2, sync_count);
free(sync_wait_buf);
- if (ret) {
- free(wait);
+ if (ret)
return ret;
- }
is_ready = !wait->count;
if ((wait->flags & VCMD_SYNC_WAIT_FLAG_ANY) && wait->count < sync_count)
is_ready = true;
if (is_ready) {
- const uint64_t val = 1;
- write(wait->fd, &val, sizeof(val));
+ write_ready(wait->fd);
}
resp_buf[VTEST_CMD_LEN] = 0;
@@ -2002,7 +2008,7 @@ static int vtest_submit_cmd2_batch(struct vtest_context *ctx,
const uint32_t *cmds,
const uint32_t *syncs)
{
- struct vtest_sync_queue_submit *submit = NULL;
+ struct vtest_timeline_submit *submit = NULL;
uint32_t i;
int ret;
@@ -2013,7 +2019,7 @@ static int vtest_submit_cmd2_batch(struct vtest_context *ctx,
if (!batch->sync_count)
return 0;
- if (batch->flags & VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE) {
+ if (batch->flags & VCMD_SUBMIT_CMD2_FLAG_RING_IDX) {
submit = malloc(sizeof(*submit) +
sizeof(*submit->syncs) * batch->sync_count +
sizeof(*submit->values) * batch->sync_count);
@@ -2047,25 +2053,25 @@ static int vtest_submit_cmd2_batch(struct vtest_context *ctx,
if (i < batch->sync_count) {
if (submit) {
submit->count = i;
- vtest_free_sync_queue_submit(submit);
+ vtest_free_timeline_submit(submit);
}
return -EEXIST;
}
if (submit) {
- struct vtest_sync_queue *queue = &ctx->sync_queues[batch->sync_queue_index];
+ struct vtest_timeline *timeline = &ctx->timelines[batch->ring_idx];
- submit->sync_queue = queue;
+ submit->timeline = timeline;
ret = virgl_renderer_context_create_fence(ctx->ctx_id,
VIRGL_RENDERER_FENCE_FLAG_MERGEABLE,
- batch->sync_queue_id,
- submit);
+ batch->ring_idx,
+ (uintptr_t)submit);
if (ret) {
- vtest_free_sync_queue_submit(submit);
+ vtest_free_timeline_submit(submit);
return ret;
}
- list_addtail(&submit->head, &queue->submits);
+ list_addtail(&submit->head, &timeline->submits);
}
return 0;
@@ -2093,7 +2099,7 @@ int vtest_submit_cmd2(uint32_t length_dw)
}
batch_count = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_COUNT];
- if (VCMD_SUBMIT_CMD2_BATCH_COUNT + 8 * batch_count > length_dw) {
+ if (VCMD_SUBMIT_CMD2_BATCH_COUNT + 6 * batch_count > length_dw) {
free(submit_cmd2_buf);
return -EINVAL;
}
@@ -2105,16 +2111,14 @@ int vtest_submit_cmd2(uint32_t length_dw)
.cmd_size = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_CMD_SIZE(i)],
.sync_offset = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_OFFSET(i)],
.sync_count = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_COUNT(i)],
- .sync_queue_index = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_INDEX(i)],
- .sync_queue_id = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_ID_LO(i)] |
- (uint64_t)submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_ID_HI(i)] << 32,
+ .ring_idx = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_RING_IDX(i)],
};
const uint32_t *cmds = &submit_cmd2_buf[batch.cmd_offset];
const uint32_t *syncs = &submit_cmd2_buf[batch.sync_offset];
if (batch.cmd_offset + batch.cmd_size > length_dw ||
batch.sync_offset + batch.sync_count * 3 > length_dw ||
- batch.sync_queue_index >= VTEST_MAX_SYNC_QUEUE_COUNT) {
+ batch.ring_idx >= VTEST_MAX_TIMELINE_COUNT) {
free(submit_cmd2_buf);
return -EINVAL;
}
diff --git a/vtest/vtest_server.c b/vtest/vtest_server.c
index 45132f22..a6339aad 100644
--- a/vtest/vtest_server.c
+++ b/vtest/vtest_server.c
@@ -91,6 +91,7 @@ struct vtest_server
bool use_gles;
bool venus;
+ bool render_server;
int ctx_flags;
@@ -163,6 +164,8 @@ while (__AFL_LOOP(1000)) {
#define OPT_USE_GLES 'e'
#define OPT_RENDERNODE 'r'
#define OPT_VENUS 'v'
+#define OPT_RENDER_SERVER 'n'
+#define OPT_SOCKET_PATH 'p'
static void vtest_server_parse_args(int argc, char **argv)
{
@@ -177,6 +180,8 @@ static void vtest_server_parse_args(int argc, char **argv)
{"use-gles", no_argument, NULL, OPT_USE_GLES},
{"rendernode", required_argument, NULL, OPT_RENDERNODE},
{"venus", no_argument, NULL, OPT_VENUS},
+ {"render-server", no_argument, NULL, OPT_RENDER_SERVER},
+ {"socket-path", optional_argument, NULL, OPT_SOCKET_PATH},
{0, 0, 0, 0}
};
@@ -217,13 +222,24 @@ static void vtest_server_parse_args(int argc, char **argv)
server.venus = true;
break;
#endif
+#ifdef ENABLE_RENDER_SERVER
+ case OPT_RENDER_SERVER:
+ server.render_server = true;
+ break;
+#endif
+ case OPT_SOCKET_PATH:
+ server.socket_name = optarg;
+ break;
default:
printf("Usage: %s [--no-fork] [--no-loop-or-fork] [--multi-clients] "
"[--use-glx] [--use-egl-surfaceless] [--use-gles] "
- "[--rendernode <dev>]"
+ "[--rendernode <dev>] [--socket-path <path>] "
#ifdef ENABLE_VENUS
" [--venus]"
#endif
+#ifdef ENABLE_RENDER_SERVER
+ " [--render-server]"
+#endif
" [file]\n", argv[0]);
exit(EXIT_FAILURE);
break;
@@ -255,6 +271,9 @@ static void vtest_server_parse_args(int argc, char **argv)
if (server.venus) {
server.ctx_flags |= VIRGL_RENDERER_VENUS;
}
+ if (server.render_server) {
+ server.ctx_flags |= VIRGL_RENDERER_RENDER_SERVER;
+ }
}
static void vtest_server_getenv(void)