aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2021-02-24 02:06:54 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2021-02-24 02:06:54 +0000
commit559d925e3b704738704cd5db328be29bf6543e77 (patch)
tree7fb815712954c8e207872f9942fa42d18c441b5b
parent93dd4f28a3d254cf21479fad075235d023f98b7f (diff)
parenta0d14ecfcac1e000b43ef57c9cb137a7bc9d56f2 (diff)
downloadvirglrenderer-559d925e3b704738704cd5db328be29bf6543e77.tar.gz
Snap for 7166403 from a0d14ecfcac1e000b43ef57c9cb137a7bc9d56f2 to sc-release
Change-Id: Id8d696c00a55d7fd30fe7ff636202ef5e1b69366
-rw-r--r--Android.bp5
-rw-r--r--ci/.gitlab-ci.yml5
-rw-r--r--ci/build-container.sh24
-rwxr-xr-xci/run_tests.sh29
-rw-r--r--config.h.meson1
-rw-r--r--meson.build19
-rw-r--r--meson_options.txt8
-rw-r--r--src/meson.build34
-rw-r--r--src/virgl_context.c5
-rw-r--r--src/virgl_util.c139
-rw-r--r--src/virgl_util.h32
-rw-r--r--src/virglrenderer.c329
-rw-r--r--src/virglrenderer.h3
-rw-r--r--src/vrend_decode.c1060
-rw-r--r--src/vrend_renderer.c457
-rw-r--r--src/vrend_renderer.h35
-rw-r--r--src/vrend_shader.c1451
-rw-r--r--src/vrend_shader.h1
-rw-r--r--src/vrend_strbuf.h2
-rw-r--r--src/vrend_winsys.c200
-rw-r--r--src/vrend_winsys.h57
-rw-r--r--src/vrend_winsys_egl.c (renamed from src/virgl_egl_context.c)70
-rw-r--r--src/vrend_winsys_egl.h (renamed from src/virgl_egl.h)12
-rw-r--r--src/vrend_winsys_gbm.c (renamed from src/virgl_gbm.c)20
-rw-r--r--src/vrend_winsys_gbm.h (renamed from src/virgl_gbm.h)21
-rw-r--r--src/vrend_winsys_glx.c (renamed from src/virgl_glx_context.c)2
-rw-r--r--src/vrend_winsys_glx.h (renamed from src/virgl_glx.h)0
-rw-r--r--tests/test_fuzzer_formats.c2
-rw-r--r--tests/test_virgl_init.c53
29 files changed, 2406 insertions, 1670 deletions
diff --git a/Android.bp b/Android.bp
index 2130e801..9916f04c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -105,8 +105,6 @@ cc_library {
"src/gallium/auxiliary/util/u_surface.c",
"src/iov.c",
"src/virgl_context.c",
- "src/virgl_egl_context.c",
- "src/virgl_gbm.c",
"src/virglrenderer.c",
"src/virgl_resource.c",
"src/virgl_util.c",
@@ -118,6 +116,9 @@ cc_library {
"src/vrend_renderer.c",
"src/vrend_shader.c",
"src/vrend_tweaks.c",
+ "src/vrend_winsys.c",
+ "src/vrend_winsys_egl.c",
+ "src/vrend_winsys_gbm.c",
],
target: {
linux_glibc: {
diff --git a/ci/.gitlab-ci.yml b/ci/.gitlab-ci.yml
index 854359f4..401cc3a3 100644
--- a/ci/.gitlab-ci.yml
+++ b/ci/.gitlab-ci.yml
@@ -59,6 +59,11 @@ make check clang-fuzzer:
TEST_SUITE: '--make-check-clang-fuzzer'
extends: .make_check_base
+make check trace-stderr:
+ variables:
+ TEST_SUITE: '--make-check-trace-stderr'
+ extends: .make_check_base
+
.tests:
stage: test
extends: .tests_base
diff --git a/ci/build-container.sh b/ci/build-container.sh
index 3416781f..489b0c2f 100644
--- a/ci/build-container.sh
+++ b/ci/build-container.sh
@@ -13,7 +13,7 @@ export CC="gcc-8"
export CXX="g++-8"
export CFLAGS="-g3"
export CXXFLAGS="-g3"
-export GIT_DATE="`date +%Y-%m-%d -d \"15 months ago\"`"
+export GIT_DATE="2020-11-11"
export MESA_DEBUG=1
echo 'path-exclude=/usr/share/doc/*' > /etc/dpkg/dpkg.cfg.d/99-exclude-cruft
@@ -42,6 +42,7 @@ apt-get -y install --no-install-recommends \
golang-go \
kbd \
libcurl4-openssl-dev \
+ libepoxy-dev \
libgbm-dev \
libnss-systemd \
libpng-dev \
@@ -114,7 +115,7 @@ popd
# error: RPC failed; curl 56 GnuTLS recv error (-54): Error in the pull function.
git config --global http.postBuffer 1048576000
-export KNOWN_GOOD_CTS=${KNOWN_GOOD_CTS:-6c709dc9a99b70572aceb0f7698ab044383ff948}
+export KNOWN_GOOD_CTS=${KNOWN_GOOD_CTS:-524e5bcfba33d1b8dede4b4ec1ec33d24ccf8d2c}
mkdir /VK-GL-CTS
pushd /VK-GL-CTS
git clone --shallow-since="$GIT_DATE" https://github.com/KhronosGroup/VK-GL-CTS.git . && \
@@ -129,7 +130,7 @@ git clone --shallow-since="$GIT_DATE" https://github.com/KhronosGroup/VK-GL-CTS.
find . -name lib\*.a | xargs rm -rf
popd
-export KNOWN_GOOD_PIGLIT=${KNOWN_GOOD_PIGLIT:-645e15dc84fb48c1f270e322af61d7c716f1c45c}
+export KNOWN_GOOD_PIGLIT=${KNOWN_GOOD_PIGLIT:-08a92f4094c927276a20f608d7b3c5de2a72e9e7}
mkdir /piglit
pushd /piglit
git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/piglit.git . && \
@@ -142,20 +143,7 @@ git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/piglit
rm -rf /piglit
popd
-export KNOWN_GOOD_EPOXY=${KNOWN_GOOD_EPOXY:-5d818164dd2ab87b0054641f1446bc552a602320}
-mkdir /epoxy
-pushd /epoxy
-git clone --shallow-since="$GIT_DATE" https://github.com/anholt/libepoxy.git . && \
- git checkout ${KNOWN_GOOD_EPOXY} && \
- git log --oneline -n 1 && \
- mkdir -p build && \
- meson build/ && \
- meson configure build/ -Dprefix=/usr/local -Dlibdir=lib && \
- ninja -C build/ install >/dev/null && \
- rm -rf /epoxy
-popd
-
-export KNOWN_GOOD_DRM=libdrm-2.4.100
+export KNOWN_GOOD_DRM=libdrm-2.4.103
mkdir /drm
pushd /drm
git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/drm.git . && \
@@ -168,7 +156,7 @@ git clone --shallow-since="$GIT_DATE" https://gitlab.freedesktop.org/mesa/drm.gi
rm -rf /drm
popd
-export KNOWN_GOOD_MESA=${KNOWN_GOOD_MESA:-e924181ea89e5e261f8aa24564c32ed22941e752}
+export KNOWN_GOOD_MESA=${KNOWN_GOOD_MESA:-1c17223c02b68679d67a4e4a6be8b9b7a80fa2e9}
echo $KNOWN_GOOD_MESA
export MESA_REPO=https://gitlab.freedesktop.org/mesa/mesa.git
echo $MESA_REPO
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 47890f6e..f5b81c1f 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -10,6 +10,11 @@ run_setup()
use_clang_fuzzer=1
fi
+ if [ "x$2" = "xtrace_stderr" ]; then
+ use_trace_stderr=1
+ fi
+
+
# Let .gitlab-ci or local ci runner set
# desired thread count
NUM_THREADS=${NUM_THREADS:-$(expr $(expr $(nproc) / 8) + 1)}
@@ -56,7 +61,11 @@ run_setup()
export FUZZER=-Dfuzzer=true
fi
- meson build/ -Dprefix=/usr/local -Ddebug=true -Dtests=true --fatal-meson-warnings $FUZZER
+ if [ "x$use_trace_stderr" = "x1" ]; then
+ export TRACING=-Dtracing=stderr
+ fi
+
+ meson build/ -Dprefix=/usr/local -Ddebug=true -Dtests=true --fatal-meson-warnings $FUZZER $TRACING
ninja -C build -j$NUM_THREADS install
}
@@ -86,6 +95,20 @@ run_make_check_clang_fuzzer()
)
}
+run_make_check_trace_stderr()
+{
+ run_setup meson trace_stderr
+ (
+ mkdir -p ./results/make_check_trace_stderr
+ pushd ./build
+ VRENDTEST_USE_EGL_SURFACELESS=1 ninja -j$NUM_THREADS test
+ RET=$?
+ cp ./meson-logs/testlog.txt ../results/make_check_trace_stderr/
+ popd
+ return $RET
+ )
+}
+
run_deqp()
{
run_setup meson
@@ -169,6 +192,10 @@ parse_input()
run_make_check_clang_fuzzer
;;
+ --make-check-trace-stderr)
+ run_make_check_trace_stderr
+ ;;
+
--deqp-gl-gl-tests)
run_deqp gl gl
;;
diff --git a/config.h.meson b/config.h.meson
index ce1c9a68..198c84cc 100644
--- a/config.h.meson
+++ b/config.h.meson
@@ -7,3 +7,4 @@
#mesondefine HAVE_FUNC_ATTRIBUTE_VISIBILITY
#mesondefine HAVE_EVENTFD_H
#mesondefine HAVE_DLFCN_H
+#mesondefine ENABLE_TRACING
diff --git a/meson.build b/meson.build
index 440ea248..fc9dea74 100644
--- a/meson.build
+++ b/meson.build
@@ -82,6 +82,24 @@ m_dep = cc.find_library('m')
conf_data = configuration_data()
conf_data.set('VERSION', '0.8.1')
+with_tracing = get_option('tracing')
+
+if with_tracing != 'none'
+ if not cc.compiles('void f(void* v){}; int main () { void *dummy __attribute__((cleanup (f))) = 0;}')
+ error('Tracing requires compiler support for __attribute__((cleanup))')
+endif
+
+endif
+
+if with_tracing == 'perfetto'
+ vperfetto_min_dep = dependency('vperfetto_min')
+ conf_data.set('ENABLE_TRACING', 'TRACE_WITH_PERFETTO')
+endif
+
+if with_tracing == 'stderr'
+ conf_data.set('ENABLE_TRACING', 'TRACE_WITH_STDERR')
+endif
+
if cc.has_header('sys/uio.h')
conf_data.set('HAVE_SYS_UIO_H', 1)
endif
@@ -199,6 +217,7 @@ lines += 'minigbm_alloc: ' + (with_minigbm_allocation ? 'yes' : 'no' )
lines += ''
lines += 'tests: ' + (with_tests ? 'yes' : 'no' )
lines += 'fuzzer: ' + (with_fuzzer ? 'yes' : 'no' )
+lines += 'tracing: ' + with_tracing
indent = ' '
summary = indent + ('\n' + indent).join(lines)
diff --git a/meson_options.txt b/meson_options.txt
index 9d7e5b5b..ce988a63 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -58,3 +58,11 @@ option(
value : 'false',
description : 'enable running unit tests with valgrind'
)
+
+option(
+ 'tracing',
+ type : 'combo',
+ value : 'none',
+ choices : [ 'perfetto', 'stderr', 'none' ],
+ description : 'enable emitting traces using the selected backend'
+)
diff --git a/src/meson.build b/src/meson.build
index 4e1d790a..d854027f 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -33,6 +33,9 @@ virgl_sources = [
'virgl_resource.h',
'virgl_util.c',
'virgl_util.h',
+]
+
+vrend_sources = [
'vrend_blitter.c',
'vrend_blitter.h',
'vrend_debug.c',
@@ -49,6 +52,8 @@ virgl_sources = [
'vrend_strbuf.h',
'vrend_tweaks.c',
'vrend_tweaks.h',
+ 'vrend_winsys.c',
+ 'vrend_winsys.h',
]
virglrenderer_sources = [
@@ -57,17 +62,16 @@ virglrenderer_sources = [
'virglrenderer_hw.h',
]
-
-virgl_egl_sources = [
- 'virgl_gbm.c',
- 'virgl_gbm.h',
- 'virgl_egl_context.c',
- 'virgl_egl.h',
+vrend_winsys_egl_sources = [
+ 'vrend_winsys_gbm.c',
+ 'vrend_winsys_gbm.h',
+ 'vrend_winsys_egl.c',
+ 'vrend_winsys_egl.h',
]
-virgl_glx_sources = [
- 'virgl_glx_context.c',
- 'virgl_glx.h',
+vrend_winsys_glx_sources = [
+ 'vrend_winsys_glx.c',
+ 'vrend_winsys_glx.h',
]
virgl_depends = [
@@ -75,16 +79,22 @@ virgl_depends = [
epoxy_dep,
libdrm_dep,
thread_dep,
- m_dep
+ m_dep,
]
+if with_tracing == 'perfetto'
+ virgl_depends += [vperfetto_min_dep]
+endif
+
+virgl_sources += vrend_sources
+
if have_egl
- virgl_sources += virgl_egl_sources
+ virgl_sources += vrend_winsys_egl_sources
virgl_depends += [gbm_dep]
endif
if have_glx
- virgl_sources += virgl_glx_sources
+ virgl_sources += vrend_winsys_glx_sources
virgl_depends += [glx_dep]
endif
diff --git a/src/virgl_context.c b/src/virgl_context.c
index 51991c09..6df23091 100644
--- a/src/virgl_context.c
+++ b/src/virgl_context.c
@@ -52,10 +52,7 @@ virgl_context_table_init(void)
void
virgl_context_table_cleanup(void)
{
- if (virgl_context_table) {
- util_hash_table_destroy(virgl_context_table);
- virgl_context_table = NULL;
- }
+ util_hash_table_destroy(virgl_context_table);
}
void
diff --git a/src/virgl_util.c b/src/virgl_util.c
index bf8ce884..2f673ddc 100644
--- a/src/virgl_util.c
+++ b/src/virgl_util.c
@@ -22,10 +22,35 @@
*
**************************************************************************/
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include "virgl_util.h"
+#include <errno.h>
+#ifdef HAVE_EVENTFD_H
+#include <sys/eventfd.h>
+#endif
+#include <unistd.h>
+
#include "util/u_pointer.h"
+#include <stdarg.h>
+#include <stdio.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#if ENABLE_TRACING == TRACE_WITH_PERFETTO
+#include <vperfetto-min.h>
+#endif
+
+#if ENABLE_TRACING == TRACE_WITH_STDERR
+#include <stdio.h>
+#endif
+
unsigned hash_func_u32(void *key)
{
intptr_t ip = pointer_to_intptr(key);
@@ -41,3 +66,117 @@ int compare_func(void *key1, void *key2)
else
return 0;
}
+
+bool has_eventfd(void)
+{
+#ifdef HAVE_EVENTFD_H
+ return true;
+#else
+ return false;
+#endif
+}
+
+int create_eventfd(unsigned int initval)
+{
+#ifdef HAVE_EVENTFD_H
+ return eventfd(initval, EFD_CLOEXEC | EFD_NONBLOCK);
+#else
+ return -1;
+#endif
+}
+
+int write_eventfd(int fd, uint64_t val)
+{
+ const char *buf = (const char *)&val;
+ size_t count = sizeof(val);
+ ssize_t ret = 0;
+
+ while (count) {
+ ret = write(fd, buf, count);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ break;
+ }
+ count -= ret;
+ buf += ret;
+ }
+
+ return count ? -1 : 0;
+}
+
+void flush_eventfd(int fd)
+{
+ ssize_t len;
+ uint64_t value;
+ do {
+ len = read(fd, &value, sizeof(value));
+ } while ((len == -1 && errno == EINTR) || len == sizeof(value));
+}
+
+#if ENABLE_TRACING == TRACE_WITH_PERFETTO
+void trace_init(void)
+{
+ struct vperfetto_min_config config = {
+ .init_flags = VPERFETTO_INIT_FLAG_USE_SYSTEM_BACKEND,
+ .filename = NULL,
+ .shmem_size_hint_kb = 32 * 1024,
+ };
+
+ vperfetto_min_startTracing(&config);
+}
+
+char *trace_begin(const char* format, ...)
+{
+ char buffer[1024];
+ va_list args;
+ va_start (args, format);
+ vsnprintf (buffer, sizeof(buffer), format, args);
+ va_end (args);
+ vperfetto_min_beginTrackEvent_VMM(buffer);
+ return (void *)1;
+}
+
+void trace_end(char **dummy)
+{
+ (void)dummy;
+ vperfetto_min_endTrackEvent_VMM();
+}
+#endif
+
+#if ENABLE_TRACING == TRACE_WITH_STDERR
+static int nesting_depth = 0;
+void trace_init(void)
+{
+}
+
+char *trace_begin(const char* format, ...)
+{
+ for (int i = 0; i < nesting_depth; ++i)
+ fprintf(stderr, " ");
+
+ fprintf(stderr, "ENTER:");
+ char *buffer;
+ va_list args;
+ va_start (args, format);
+ int size = vasprintf(&buffer, format, args);
+
+ if (size < 0)
+ buffer=strdup("error");
+
+ va_end (args);
+ fprintf(stderr, "%s\n", buffer);
+ nesting_depth++;
+
+ return buffer;
+}
+
+void trace_end(char **func_name)
+{
+ --nesting_depth;
+ for (int i = 0; i < nesting_depth; ++i)
+ fprintf(stderr, " ");
+ fprintf(stderr, "LEAVE %s\n", *func_name);
+ free(*func_name);
+}
+#endif
diff --git a/src/virgl_util.h b/src/virgl_util.h
index 84d6ef8e..861ecd72 100644
--- a/src/virgl_util.h
+++ b/src/virgl_util.h
@@ -28,6 +28,13 @@
#include <stdint.h>
#include <stdbool.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define TRACE_WITH_PERFETTO 1
+#define TRACE_WITH_STDERR 2
+
#define BIT(n) (UINT32_C(1) << (n))
static inline bool has_bit(uint32_t mask, uint32_t bit)
@@ -49,4 +56,29 @@ unsigned hash_func_u32(void *key);
int compare_func(void *key1, void *key2);
+bool has_eventfd(void);
+int create_eventfd(unsigned int initval);
+int write_eventfd(int fd, uint64_t val);
+void flush_eventfd(int fd);
+
+#ifdef ENABLE_TRACING
+void trace_init(void);
+char *trace_begin(const char* format, ...);
+void trace_end(char **dummy);
+
+#define TRACE_INIT() trace_init()
+#define TRACE_FUNC() \
+ char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
+ trace_begin("%s", __func__)
+
+#define TRACE_SCOPE(FORMAT, ...) \
+ char *trace_dummy __attribute__((cleanup (trace_end), unused)) = \
+ trace_begin(FORMAT, __VA_ARGS__)
+
+#else
+#define TRACE_INIT()
+#define TRACE_FUNC()
+#define TRACE_SCOPE(FORMAT, ...)
+#endif
+
#endif /* VIRGL_UTIL_H */
diff --git a/src/virglrenderer.c b/src/virglrenderer.c
index ce43c897..8458b215 100644
--- a/src/virglrenderer.c
+++ b/src/virglrenderer.c
@@ -31,26 +31,33 @@
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
+
#include "pipe/p_state.h"
#include "util/u_format.h"
#include "util/u_math.h"
#include "vrend_renderer.h"
+#include "vrend_winsys.h"
#include "virglrenderer.h"
#include "virglrenderer_hw.h"
#include "virgl_context.h"
#include "virgl_resource.h"
+#include "virgl_util.h"
+
+struct global_state {
+ bool client_initialized;
+ void *cookie;
+ int flags;
+ const struct virgl_renderer_callbacks *cbs;
+
+ bool resource_initialized;
+ bool context_initialized;
+ bool winsys_initialized;
+ bool vrend_initialized;
+};
-#ifdef HAVE_EPOXY_EGL_H
-#include "virgl_gbm.h"
-#include "virgl_egl.h"
-#endif
-
-#ifdef HAVE_EPOXY_GLX_H
-#include "virgl_glx.h"
-static struct virgl_glx *glx_info;
-#endif
+static struct global_state state;
/* new API - just wrap internal API for now */
@@ -93,16 +100,14 @@ static int virgl_renderer_resource_create_internal(struct virgl_renderer_resourc
int virgl_renderer_resource_create(struct virgl_renderer_resource_create_args *args,
struct iovec *iov, uint32_t num_iovs)
{
+ TRACE_FUNC();
return virgl_renderer_resource_create_internal(args, iov, num_iovs, NULL);
}
int virgl_renderer_resource_import_eglimage(struct virgl_renderer_resource_create_args *args, void *image)
{
-#ifdef HAVE_EPOXY_EGL_H
+ TRACE_FUNC();
return virgl_renderer_resource_create_internal(args, NULL, 0, image);
-#else
- return EINVAL;
-#endif
}
void virgl_renderer_resource_set_priv(uint32_t res_handle, void *priv)
@@ -150,10 +155,8 @@ void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
{
switch (set) {
case VIRGL_RENDERER_CAPSET_VIRGL:
- vrend_renderer_fill_caps(VREND_CAP_SET, version, (union virgl_caps *)caps);
- break;
case VIRGL_RENDERER_CAPSET_VIRGL2:
- vrend_renderer_fill_caps(VREND_CAP_SET2, version, (union virgl_caps *)caps);
+ vrend_renderer_fill_caps(set, version, (union virgl_caps *)caps);
break;
default:
break;
@@ -165,6 +168,8 @@ int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *na
struct virgl_context *ctx;
int ret;
+ TRACE_FUNC();
+
/* user context id must be greater than 0 */
if (handle == 0)
return EINVAL;
@@ -187,6 +192,7 @@ int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *na
void virgl_renderer_context_destroy(uint32_t handle)
{
+ TRACE_FUNC();
virgl_context_remove(handle);
}
@@ -194,6 +200,7 @@ int virgl_renderer_submit_cmd(void *buffer,
int ctx_id,
int ndw)
{
+ TRACE_FUNC();
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
if (!ctx)
return EINVAL;
@@ -210,6 +217,8 @@ int virgl_renderer_transfer_write_iov(uint32_t handle,
struct iovec *iovec,
unsigned int iovec_cnt)
{
+ TRACE_FUNC();
+
struct virgl_resource *res = virgl_resource_lookup(handle);
struct vrend_transfer_info transfer_info;
@@ -248,6 +257,7 @@ int virgl_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id,
uint64_t offset, struct iovec *iovec,
int iovec_cnt)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(handle);
struct vrend_transfer_info transfer_info;
@@ -282,6 +292,7 @@ int virgl_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id,
int virgl_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
int num_iovs)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!res)
return EINVAL;
@@ -291,6 +302,7 @@ int virgl_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
void virgl_renderer_resource_detach_iov(int res_handle, struct iovec **iov_p, int *num_iovs_p)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!res)
return;
@@ -305,16 +317,19 @@ void virgl_renderer_resource_detach_iov(int res_handle, struct iovec **iov_p, in
int virgl_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
{
+ TRACE_FUNC();
return vrend_renderer_create_fence(client_fence_id, ctx_id);
}
void virgl_renderer_force_ctx_0(void)
{
+ TRACE_FUNC();
vrend_renderer_force_ctx_0();
}
void virgl_renderer_ctx_attach_resource(int ctx_id, int res_handle)
{
+ TRACE_FUNC();
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!ctx || !res)
@@ -324,6 +339,7 @@ void virgl_renderer_ctx_attach_resource(int ctx_id, int res_handle)
void virgl_renderer_ctx_detach_resource(int ctx_id, int res_handle)
{
+ TRACE_FUNC();
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!ctx || !res)
@@ -334,34 +350,35 @@ void virgl_renderer_ctx_detach_resource(int ctx_id, int res_handle)
int virgl_renderer_resource_get_info(int res_handle,
struct virgl_renderer_resource_info *info)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
- int ret;
if (!res || !res->pipe_resource)
return EINVAL;
if (!info)
return EINVAL;
- ret = vrend_renderer_resource_get_info(res->pipe_resource,
- (struct vrend_renderer_resource_info *)info);
+ vrend_renderer_resource_get_info(res->pipe_resource,
+ (struct vrend_renderer_resource_info *)info);
info->handle = res_handle;
-#ifdef HAVE_EPOXY_EGL_H
- if (ret == 0 && use_context == CONTEXT_EGL)
- return virgl_egl_get_fourcc_for_texture(egl, info->tex_id, info->virgl_format, &info->drm_fourcc);
-#endif
- return ret;
+ if (state.winsys_initialized) {
+ return vrend_winsys_get_fourcc_for_texture(info->tex_id,
+ info->virgl_format,
+ &info->drm_fourcc);
+ }
+
+ return 0;
}
void virgl_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
uint32_t *max_size)
{
+ TRACE_FUNC();
switch (cap_set) {
case VIRGL_RENDERER_CAPSET_VIRGL:
- vrend_renderer_get_cap_set(VREND_CAP_SET, max_ver, max_size);
- break;
case VIRGL_RENDERER_CAPSET_VIRGL2:
- vrend_renderer_get_cap_set(VREND_CAP_SET2, max_ver, max_size);
+ vrend_renderer_get_cap_set(cap_set, max_ver, max_size);
break;
default:
*max_ver = 0;
@@ -373,6 +390,7 @@ void virgl_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int num_iovs,
uint32_t offset, int x, int y, int width, int height)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(resource_id);
if (!res || !res->pipe_resource)
return;
@@ -382,67 +400,44 @@ void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int nu
}
-static struct virgl_renderer_callbacks *rcbs;
-
-static void *dev_cookie;
-
-static struct vrend_if_cbs virgl_cbs;
-
static void virgl_write_fence(uint32_t fence_id)
{
- rcbs->write_fence(dev_cookie, fence_id);
+ state.cbs->write_fence(state.cookie, fence_id);
}
static virgl_renderer_gl_context create_gl_context(int scanout_idx, struct virgl_gl_ctx_param *param)
{
struct virgl_renderer_gl_ctx_param vparam;
-#ifdef HAVE_EPOXY_EGL_H
- if (use_context == CONTEXT_EGL)
- return virgl_egl_create_context(egl, param);
-#endif
-#ifdef HAVE_EPOXY_GLX_H
- if (use_context == CONTEXT_GLX)
- return virgl_glx_create_context(glx_info, param);
-#endif
+ if (state.winsys_initialized)
+ return vrend_winsys_create_context(param);
+
vparam.version = 1;
vparam.shared = param->shared;
vparam.major_ver = param->major_ver;
vparam.minor_ver = param->minor_ver;
- return rcbs->create_gl_context(dev_cookie, scanout_idx, &vparam);
+ return state.cbs->create_gl_context(state.cookie, scanout_idx, &vparam);
}
static void destroy_gl_context(virgl_renderer_gl_context ctx)
{
-#ifdef HAVE_EPOXY_EGL_H
- if (use_context == CONTEXT_EGL) {
- virgl_egl_destroy_context(egl, ctx);
- return;
- }
-#endif
-#ifdef HAVE_EPOXY_GLX_H
- if (use_context == CONTEXT_GLX) {
- virgl_glx_destroy_context(glx_info, ctx);
+ if (state.winsys_initialized) {
+ vrend_winsys_destroy_context(ctx);
return;
}
-#endif
- rcbs->destroy_gl_context(dev_cookie, ctx);
+
+ state.cbs->destroy_gl_context(state.cookie, ctx);
}
static int make_current(virgl_renderer_gl_context ctx)
{
-#ifdef HAVE_EPOXY_EGL_H
- if (use_context == CONTEXT_EGL)
- return virgl_egl_make_context_current(egl, ctx);
-#endif
-#ifdef HAVE_EPOXY_GLX_H
- if (use_context == CONTEXT_GLX)
- return virgl_glx_make_context_current(glx_info, ctx);
-#endif
- return rcbs->make_current(dev_cookie, 0, ctx);
+ if (state.winsys_initialized)
+ return vrend_winsys_make_context_current(ctx);
+
+ return state.cbs->make_current(state.cookie, 0, ctx);
}
-static struct vrend_if_cbs virgl_cbs = {
+static const struct vrend_if_cbs vrend_cbs = {
virgl_write_fence,
create_gl_context,
destroy_gl_context,
@@ -463,132 +458,152 @@ void *virgl_renderer_get_cursor_data(uint32_t resource_id, uint32_t *width, uint
void virgl_renderer_poll(void)
{
- vrend_renderer_check_fences();
+ TRACE_FUNC();
+ if (state.vrend_initialized)
+ vrend_renderer_check_fences();
}
void virgl_renderer_cleanup(UNUSED void *cookie)
{
- vrend_renderer_fini();
- virgl_context_table_cleanup();
-
-#ifdef HAVE_EPOXY_EGL_H
- if (use_context == CONTEXT_EGL) {
- virgl_egl_destroy(egl);
- egl = NULL;
- use_context = CONTEXT_NONE;
- if (gbm) {
- virgl_gbm_fini(gbm);
- gbm = NULL;
- }
- }
-#endif
-#ifdef HAVE_EPOXY_GLX_H
- if (use_context == CONTEXT_GLX) {
- virgl_glx_destroy(glx_info);
- glx_info = NULL;
- use_context = CONTEXT_NONE;
- }
-#endif
+ TRACE_FUNC();
+ if (state.vrend_initialized)
+ vrend_renderer_prepare_reset();
+
+ if (state.context_initialized)
+ virgl_context_table_cleanup();
+
+ if (state.resource_initialized)
+ virgl_resource_table_cleanup();
+
+ if (state.vrend_initialized)
+ vrend_renderer_fini();
+
+ if (state.winsys_initialized)
+ vrend_winsys_cleanup();
+
+ memset(&state, 0, sizeof(state));
}
int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks *cbs)
{
- uint32_t renderer_flags = 0;
- if (!cookie || !cbs)
- return -1;
+ TRACE_INIT();
+ TRACE_FUNC();
- if (cbs->version < 1 || cbs->version > VIRGL_RENDERER_CALLBACKS_VERSION)
- return -1;
+ int ret;
- dev_cookie = cookie;
- rcbs = cbs;
+ /* VIRGL_RENDERER_THREAD_SYNC is a hint and can be silently ignored */
+ if (!has_eventfd() || getenv("VIRGL_DISABLE_MT"))
+ flags &= ~VIRGL_RENDERER_THREAD_SYNC;
- if (flags & VIRGL_RENDERER_USE_EGL) {
-#ifdef HAVE_EPOXY_EGL_H
- int fd = -1;
- if (cbs->version >= 2 && cbs->get_drm_fd) {
- fd = cbs->get_drm_fd(cookie);
- }
+ if (state.client_initialized && (state.cookie != cookie ||
+ state.flags != flags ||
+ state.cbs != cbs))
+ return -EBUSY;
- /*
- * If the user specifies a preferred DRM fd and we can't use it, fail. If the user doesn't
- * specify an fd, it's possible to initialize EGL without one.
- */
- gbm = virgl_gbm_init(fd);
- if (fd > 0 && !gbm)
+ if (!state.client_initialized) {
+ if (!cookie || !cbs)
+ return -1;
+ if (cbs->version < 1 || cbs->version > VIRGL_RENDERER_CALLBACKS_VERSION)
return -1;
- egl = virgl_egl_init(gbm, flags & VIRGL_RENDERER_USE_SURFACELESS,
- flags & VIRGL_RENDERER_USE_GLES);
- if (!egl) {
- if (gbm) {
- virgl_gbm_fini(gbm);
- gbm = NULL;
- }
+ state.cookie = cookie;
+ state.flags = flags;
+ state.cbs = cbs;
+ state.client_initialized = true;
+ }
- return -1;
+ if (!state.resource_initialized) {
+ ret = virgl_resource_table_init(vrend_renderer_get_pipe_callbacks());
+ if (ret)
+ goto fail;
+ state.resource_initialized = true;
+ }
+
+ if (!state.context_initialized) {
+ ret = virgl_context_table_init();
+ if (ret)
+ goto fail;
+ state.context_initialized = true;
+ }
+
+ if (!state.winsys_initialized && (flags & (VIRGL_RENDERER_USE_EGL |
+ VIRGL_RENDERER_USE_GLX))) {
+ int drm_fd = -1;
+
+ if (flags & VIRGL_RENDERER_USE_EGL) {
+ if (cbs->version >= 2 && cbs->get_drm_fd)
+ drm_fd = cbs->get_drm_fd(cookie);
}
- use_context = CONTEXT_EGL;
-#else
- vrend_printf( "EGL is not supported on this platform\n");
- return -1;
-#endif
- } else if (flags & VIRGL_RENDERER_USE_GLX) {
-#ifdef HAVE_EPOXY_GLX_H
- glx_info = virgl_glx_init();
- if (!glx_info)
- return -1;
- use_context = CONTEXT_GLX;
-#else
- vrend_printf( "GLX is not supported on this platform\n");
- return -1;
-#endif
+ ret = vrend_winsys_init(flags, drm_fd);
+ if (ret) {
+ if (drm_fd >= 0)
+ close(drm_fd);
+ goto fail;
+ }
+ state.winsys_initialized = true;
}
- if (virgl_context_table_init())
- return -1;
+ if (!state.vrend_initialized) {
+ uint32_t renderer_flags = 0;
+
+ if (flags & VIRGL_RENDERER_THREAD_SYNC)
+ renderer_flags |= VREND_USE_THREAD_SYNC;
+ if (flags & VIRGL_RENDERER_USE_EXTERNAL_BLOB)
+ renderer_flags |= VREND_USE_EXTERNAL_BLOB;
+
+ ret = vrend_renderer_init(&vrend_cbs, renderer_flags);
+ if (ret)
+ goto fail;
+ state.vrend_initialized = true;
+ }
- if (flags & VIRGL_RENDERER_THREAD_SYNC)
- renderer_flags |= VREND_USE_THREAD_SYNC;
- if (flags & VIRGL_RENDERER_USE_EXTERNAL_BLOB)
- renderer_flags |= VREND_USE_EXTERNAL_BLOB;
+ return 0;
- return vrend_renderer_init(&virgl_cbs, renderer_flags);
+fail:
+ virgl_renderer_cleanup(NULL);
+ return ret;
}
int virgl_renderer_get_fd_for_texture(uint32_t tex_id, int *fd)
{
-#ifdef HAVE_EPOXY_EGL_H
- if (!egl)
- return -1;
-
- return virgl_egl_get_fd_for_texture(egl, tex_id, fd);
-#else
+ TRACE_FUNC();
+ if (state.winsys_initialized)
+ return vrend_winsys_get_fd_for_texture(tex_id, fd);
return -1;
-#endif
}
int virgl_renderer_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset)
{
-#ifdef HAVE_EPOXY_EGL_H
- if (!egl)
- return -1;
-
- return virgl_egl_get_fd_for_texture2(egl, tex_id, fd, stride, offset);
-#else
+ TRACE_FUNC();
+ if (state.winsys_initialized)
+ return vrend_winsys_get_fd_for_texture2(tex_id, fd, stride, offset);
return -1;
-#endif
}
void virgl_renderer_reset(void)
{
- vrend_renderer_reset();
+ TRACE_FUNC();
+ if (state.vrend_initialized)
+ vrend_renderer_prepare_reset();
+
+ if (state.context_initialized)
+ virgl_context_table_reset();
+
+ if (state.resource_initialized)
+ virgl_resource_table_reset();
+
+ if (state.vrend_initialized)
+ vrend_renderer_reset();
}
int virgl_renderer_get_poll_fd(void)
{
- return vrend_renderer_get_poll_fd();
+ TRACE_FUNC();
+ if (state.vrend_initialized)
+ return vrend_renderer_get_poll_fd();
+
+ return -1;
}
virgl_debug_callback_type virgl_set_debug_callback(virgl_debug_callback_type cb)
@@ -635,6 +650,7 @@ static int virgl_renderer_supported_structures(void *execute_args, uint32_t exec
int virgl_renderer_execute(void *execute_args, uint32_t execute_size)
{
+ TRACE_FUNC();
struct virgl_renderer_hdr *hdr = execute_args;
if (hdr->stype_version != 0)
return -EINVAL;
@@ -651,6 +667,7 @@ int virgl_renderer_execute(void *execute_args, uint32_t execute_size)
int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_create_blob_args *args)
{
+ TRACE_FUNC();
struct virgl_context *ctx;
struct virgl_context_blob blob;
bool has_host_storage;
@@ -732,6 +749,7 @@ int virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_cre
int virgl_renderer_resource_map(uint32_t res_handle, void **map, uint64_t *out_size)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!res || !res->pipe_resource)
return -EINVAL;
@@ -741,6 +759,7 @@ int virgl_renderer_resource_map(uint32_t res_handle, void **map, uint64_t *out_s
int virgl_renderer_resource_unmap(uint32_t res_handle)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!res || !res->pipe_resource)
return -EINVAL;
@@ -750,6 +769,7 @@ int virgl_renderer_resource_unmap(uint32_t res_handle)
int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint32_t *map_info)
{
+ TRACE_FUNC();
struct virgl_resource *res = virgl_resource_lookup(res_handle);
if (!res || !res->pipe_resource)
return -EINVAL;
@@ -777,3 +797,10 @@ virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
return 0;
}
+
+int
+virgl_renderer_export_fence(uint32_t client_fence_id, int *fd)
+{
+ TRACE_FUNC();
+ return vrend_renderer_export_fence(client_fence_id, fd);
+}
diff --git a/src/virglrenderer.h b/src/virglrenderer.h
index db4bb5c4..e7592a80 100644
--- a/src/virglrenderer.h
+++ b/src/virglrenderer.h
@@ -297,6 +297,9 @@ VIRGL_EXPORT int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint3
VIRGL_EXPORT int
virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd);
+VIRGL_EXPORT int
+virgl_renderer_export_fence(uint32_t client_fence_id, int *fd);
+
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
#endif
diff --git a/src/vrend_decode.c b/src/vrend_decode.c
index 52e90953..f60e89ba 100644
--- a/src/vrend_decode.c
+++ b/src/vrend_decode.c
@@ -26,6 +26,7 @@
#include <stdio.h>
#include <errno.h>
#include <epoxy/gl.h>
+#include <fcntl.h>
#include "util/u_memory.h"
#include "pipe/p_defines.h"
@@ -38,35 +39,28 @@
#include "tgsi/tgsi_text.h"
#include "vrend_debug.h"
#include "vrend_tweaks.h"
+#include "virgl_util.h"
/* decode side */
#define DECODE_MAX_TOKENS 8000
-struct vrend_decoder_state {
- const uint32_t *buf;
- uint32_t buf_total;
- uint32_t buf_offset;
-};
-
struct vrend_decode_ctx {
struct virgl_context base;
-
- struct vrend_decoder_state ids, *ds;
struct vrend_context *grctx;
};
-static inline uint32_t get_buf_entry(struct vrend_decode_ctx *ctx, uint32_t offset)
+static inline uint32_t get_buf_entry(const uint32_t *buf, uint32_t offset)
{
- return ctx->ds->buf[ctx->ds->buf_offset + offset];
+ return buf[offset];
}
-static inline const void *get_buf_ptr(struct vrend_decode_ctx *ctx,
- uint32_t offset)
+static inline const void *get_buf_ptr(const uint32_t *buf, uint32_t offset)
{
- return &ctx->ds->buf[ctx->ds->buf_offset + offset];
+ return &buf[offset];
}
-static int vrend_decode_create_shader(struct vrend_decode_ctx *ctx,
+static int vrend_decode_create_shader(struct vrend_context *ctx,
+ const uint32_t *buf,
uint32_t handle,
uint16_t length)
{
@@ -81,15 +75,15 @@ static int vrend_decode_create_shader(struct vrend_decode_ctx *ctx,
if (length < VIRGL_OBJ_SHADER_HDR_SIZE(0))
return EINVAL;
- type = get_buf_entry(ctx, VIRGL_OBJ_SHADER_TYPE);
- num_tokens = get_buf_entry(ctx, VIRGL_OBJ_SHADER_NUM_TOKENS);
- offlen = get_buf_entry(ctx, VIRGL_OBJ_SHADER_OFFSET);
+ type = get_buf_entry(buf, VIRGL_OBJ_SHADER_TYPE);
+ num_tokens = get_buf_entry(buf, VIRGL_OBJ_SHADER_NUM_TOKENS);
+ offlen = get_buf_entry(buf, VIRGL_OBJ_SHADER_OFFSET);
if (type == PIPE_SHADER_COMPUTE) {
- req_local_mem = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
+ req_local_mem = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
num_so_outputs = 0;
} else {
- num_so_outputs = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
+ num_so_outputs = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
if (length < VIRGL_OBJ_SHADER_HDR_SIZE(num_so_outputs))
return EINVAL;
@@ -102,16 +96,16 @@ static int vrend_decode_create_shader(struct vrend_decode_ctx *ctx,
so_info.num_outputs = num_so_outputs;
if (so_info.num_outputs) {
for (i = 0; i < 4; i++)
- so_info.stride[i] = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_STRIDE(i));
+ so_info.stride[i] = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_STRIDE(i));
for (i = 0; i < so_info.num_outputs; i++) {
- uint32_t tmp = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_OUTPUT0(i));
+ uint32_t tmp = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_OUTPUT0(i));
so_info.output[i].register_index = tmp & 0xff;
so_info.output[i].start_component = (tmp >> 8) & 0x3;
so_info.output[i].num_components = (tmp >> 10) & 0x7;
so_info.output[i].output_buffer = (tmp >> 13) & 0x7;
so_info.output[i].dst_offset = (tmp >> 16) & 0xffff;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(i));
+ tmp = get_buf_entry(buf, VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(i));
so_info.output[i].stream = (tmp & 0x3);
so_info.output[i].need_temp = so_info.output[i].num_components < 4;
}
@@ -127,50 +121,51 @@ static int vrend_decode_create_shader(struct vrend_decode_ctx *ctx,
} else
memset(&so_info, 0, sizeof(so_info));
- shd_text = get_buf_ptr(ctx, shader_offset);
- ret = vrend_create_shader(ctx->grctx, handle, &so_info, req_local_mem, (const char *)shd_text, offlen, num_tokens, type, length - shader_offset + 1);
+ shd_text = get_buf_ptr(buf, shader_offset);
+ ret = vrend_create_shader(ctx, handle, &so_info, req_local_mem, (const char *)shd_text, offlen, num_tokens, type, length - shader_offset + 1);
return ret;
}
-static int vrend_decode_create_stream_output_target(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_stream_output_target(struct vrend_context *ctx, const uint32_t *buf,
+ uint32_t handle, uint16_t length)
{
uint32_t res_handle, buffer_size, buffer_offset;
if (length != VIRGL_OBJ_STREAMOUT_SIZE)
return EINVAL;
- res_handle = get_buf_entry(ctx, VIRGL_OBJ_STREAMOUT_RES_HANDLE);
- buffer_offset = get_buf_entry(ctx, VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET);
- buffer_size = get_buf_entry(ctx, VIRGL_OBJ_STREAMOUT_BUFFER_SIZE);
+ res_handle = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_RES_HANDLE);
+ buffer_offset = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET);
+ buffer_size = get_buf_entry(buf, VIRGL_OBJ_STREAMOUT_BUFFER_SIZE);
- return vrend_create_so_target(ctx->grctx, handle, res_handle, buffer_offset,
+ return vrend_create_so_target(ctx, handle, res_handle, buffer_offset,
buffer_size);
}
-static int vrend_decode_set_framebuffer_state(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_framebuffer_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length < 2)
return EINVAL;
- int32_t nr_cbufs = get_buf_entry(ctx, VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS);
- uint32_t zsurf_handle = get_buf_entry(ctx, VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE);
+ uint32_t nr_cbufs = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS);
+ uint32_t zsurf_handle = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE);
uint32_t surf_handle[8];
- int i;
+ uint32_t i;
- if (length != (2 + nr_cbufs))
+ if (length != (2u + nr_cbufs))
return EINVAL;
if (nr_cbufs > 8)
return EINVAL;
for (i = 0; i < nr_cbufs; i++)
- surf_handle[i] = get_buf_entry(ctx, VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(i));
- vrend_set_framebuffer_state(ctx->grctx, nr_cbufs, surf_handle, zsurf_handle);
+ surf_handle[i] = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(i));
+ vrend_set_framebuffer_state(ctx, nr_cbufs, surf_handle, zsurf_handle);
return 0;
}
-static int vrend_decode_set_framebuffer_state_no_attach(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_framebuffer_state_no_attach(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t width, height;
uint32_t layers, samples;
@@ -179,19 +174,19 @@ static int vrend_decode_set_framebuffer_state_no_attach(struct vrend_decode_ctx
if (length != VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE)
return EINVAL;
- tmp = get_buf_entry(ctx, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH_HEIGHT);
+ tmp = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH_HEIGHT);
width = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH(tmp);
height = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_HEIGHT(tmp);
- tmp = get_buf_entry(ctx, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS_SAMPLES);
+ tmp = get_buf_entry(buf, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS_SAMPLES);
layers = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS(tmp);
samples = VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SAMPLES(tmp);
- vrend_set_framebuffer_state_no_attach(ctx->grctx, width, height, layers, samples);
+ vrend_set_framebuffer_state_no_attach(ctx, width, height, layers, samples);
return 0;
}
-static int vrend_decode_clear(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_clear(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
union pipe_color_union color;
double depth;
@@ -200,18 +195,18 @@ static int vrend_decode_clear(struct vrend_decode_ctx *ctx, int length)
if (length != VIRGL_OBJ_CLEAR_SIZE)
return EINVAL;
- buffers = get_buf_entry(ctx, VIRGL_OBJ_CLEAR_BUFFERS);
+ buffers = get_buf_entry(buf, VIRGL_OBJ_CLEAR_BUFFERS);
for (i = 0; i < 4; i++)
- color.ui[i] = get_buf_entry(ctx, VIRGL_OBJ_CLEAR_COLOR_0 + i);
- const void *depth_ptr = get_buf_ptr(ctx, VIRGL_OBJ_CLEAR_DEPTH_0);
+ color.ui[i] = get_buf_entry(buf, VIRGL_OBJ_CLEAR_COLOR_0 + i);
+ const void *depth_ptr = get_buf_ptr(buf, VIRGL_OBJ_CLEAR_DEPTH_0);
memcpy(&depth, depth_ptr, sizeof(double));
- stencil = get_buf_entry(ctx, VIRGL_OBJ_CLEAR_STENCIL);
+ stencil = get_buf_entry(buf, VIRGL_OBJ_CLEAR_STENCIL);
- vrend_clear(ctx->grctx, buffers, &color, depth, stencil);
+ vrend_clear(ctx, buffers, &color, depth, stencil);
return 0;
}
-static int vrend_decode_clear_texture(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_clear_texture(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_box box;
uint32_t handle;
@@ -221,20 +216,20 @@ static int vrend_decode_clear_texture(struct vrend_decode_ctx *ctx, int length)
if (length != VIRGL_CLEAR_TEXTURE_SIZE)
return EINVAL;
- handle = get_buf_entry(ctx, VIRGL_TEXTURE_HANDLE);
- level = get_buf_entry(ctx, VIRGL_TEXTURE_LEVEL);
- box.x = get_buf_entry(ctx, VIRGL_TEXTURE_SRC_X);
- box.y = get_buf_entry(ctx, VIRGL_TEXTURE_SRC_Y);
- box.z = get_buf_entry(ctx, VIRGL_TEXTURE_SRC_Z);
- box.width = get_buf_entry(ctx, VIRGL_TEXTURE_SRC_W);
- box.height = get_buf_entry(ctx, VIRGL_TEXTURE_SRC_H);
- box.depth = get_buf_entry(ctx, VIRGL_TEXTURE_SRC_D);
- arr[0] = get_buf_entry(ctx, VIRGL_TEXTURE_ARRAY_A);
- arr[1] = get_buf_entry(ctx, VIRGL_TEXTURE_ARRAY_B);
- arr[2] = get_buf_entry(ctx, VIRGL_TEXTURE_ARRAY_C);
- arr[3] = get_buf_entry(ctx, VIRGL_TEXTURE_ARRAY_D);
-
- vrend_clear_texture(ctx->grctx, handle, level, &box, (void *) &arr);
+ handle = get_buf_entry(buf, VIRGL_TEXTURE_HANDLE);
+ level = get_buf_entry(buf, VIRGL_TEXTURE_LEVEL);
+ box.x = get_buf_entry(buf, VIRGL_TEXTURE_SRC_X);
+ box.y = get_buf_entry(buf, VIRGL_TEXTURE_SRC_Y);
+ box.z = get_buf_entry(buf, VIRGL_TEXTURE_SRC_Z);
+ box.width = get_buf_entry(buf, VIRGL_TEXTURE_SRC_W);
+ box.height = get_buf_entry(buf, VIRGL_TEXTURE_SRC_H);
+ box.depth = get_buf_entry(buf, VIRGL_TEXTURE_SRC_D);
+ arr[0] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_A);
+ arr[1] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_B);
+ arr[2] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_C);
+ arr[3] = get_buf_entry(buf, VIRGL_TEXTURE_ARRAY_D);
+
+ vrend_clear_texture(ctx, handle, level, &box, (void *) &arr);
return 0;
}
@@ -245,7 +240,7 @@ static float uif(unsigned int ui)
return myuif.f;
}
-static int vrend_decode_set_viewport_state(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_viewport_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_viewport_state vps[PIPE_MAX_VIEWPORTS];
uint i, v;
@@ -257,7 +252,7 @@ static int vrend_decode_set_viewport_state(struct vrend_decode_ctx *ctx, int len
return EINVAL;
num_viewports = (length - 1) / 6;
- start_slot = get_buf_entry(ctx, VIRGL_SET_VIEWPORT_START_SLOT);
+ start_slot = get_buf_entry(buf, VIRGL_SET_VIEWPORT_START_SLOT);
if (num_viewports > PIPE_MAX_VIEWPORTS ||
start_slot > (PIPE_MAX_VIEWPORTS - num_viewports))
@@ -265,27 +260,27 @@ static int vrend_decode_set_viewport_state(struct vrend_decode_ctx *ctx, int len
for (v = 0; v < num_viewports; v++) {
for (i = 0; i < 3; i++)
- vps[v].scale[i] = uif(get_buf_entry(ctx, VIRGL_SET_VIEWPORT_STATE_SCALE_0(v) + i));
+ vps[v].scale[i] = uif(get_buf_entry(buf, VIRGL_SET_VIEWPORT_STATE_SCALE_0(v) + i));
for (i = 0; i < 3; i++)
- vps[v].translate[i] = uif(get_buf_entry(ctx, VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(v) + i));
+ vps[v].translate[i] = uif(get_buf_entry(buf, VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(v) + i));
}
- vrend_set_viewport_states(ctx->grctx, start_slot, num_viewports, vps);
+ vrend_set_viewport_states(ctx, start_slot, num_viewports, vps);
return 0;
}
-static int vrend_decode_set_index_buffer(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_index_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1 && length != 3)
return EINVAL;
- vrend_set_index_buffer(ctx->grctx,
- get_buf_entry(ctx, VIRGL_SET_INDEX_BUFFER_HANDLE),
- (length == 3) ? get_buf_entry(ctx, VIRGL_SET_INDEX_BUFFER_INDEX_SIZE) : 0,
- (length == 3) ? get_buf_entry(ctx, VIRGL_SET_INDEX_BUFFER_OFFSET) : 0);
+ vrend_set_index_buffer(ctx,
+ get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_HANDLE),
+ (length == 3) ? get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_INDEX_SIZE) : 0,
+ (length == 3) ? get_buf_entry(buf, VIRGL_SET_INDEX_BUFFER_OFFSET) : 0);
return 0;
}
-static int vrend_decode_set_constant_buffer(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_set_constant_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t shader;
uint32_t index;
@@ -294,26 +289,26 @@ static int vrend_decode_set_constant_buffer(struct vrend_decode_ctx *ctx, uint16
if (length < 2)
return EINVAL;
- shader = get_buf_entry(ctx, VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE);
- index = get_buf_entry(ctx, VIRGL_SET_CONSTANT_BUFFER_INDEX);
+ shader = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE);
+ index = get_buf_entry(buf, VIRGL_SET_CONSTANT_BUFFER_INDEX);
if (shader >= PIPE_SHADER_TYPES)
return EINVAL;
- vrend_set_constants(ctx->grctx, shader, index, nc, get_buf_ptr(ctx, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
+ vrend_set_constants(ctx, shader, index, nc, get_buf_ptr(buf, VIRGL_SET_CONSTANT_BUFFER_DATA_START));
return 0;
}
-static int vrend_decode_set_uniform_buffer(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_uniform_buffer(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != VIRGL_SET_UNIFORM_BUFFER_SIZE)
return EINVAL;
- uint32_t shader = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE);
- uint32_t index = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_INDEX);
- uint32_t offset = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_OFFSET);
- uint32_t blength = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_LENGTH);
- uint32_t handle = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE);
+ uint32_t shader = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE);
+ uint32_t index = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_INDEX);
+ uint32_t offset = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_OFFSET);
+ uint32_t blength = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_LENGTH);
+ uint32_t handle = get_buf_entry(buf, VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE);
if (shader >= PIPE_SHADER_TYPES)
return EINVAL;
@@ -321,11 +316,11 @@ static int vrend_decode_set_uniform_buffer(struct vrend_decode_ctx *ctx, int len
if (index >= PIPE_MAX_CONSTANT_BUFFERS)
return EINVAL;
- vrend_set_uniform_buffer(ctx->grctx, shader, index, offset, blength, handle);
+ vrend_set_uniform_buffer(ctx, shader, index, offset, blength, handle);
return 0;
}
-static int vrend_decode_set_vertex_buffers(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_set_vertex_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
int num_vbo;
int i;
@@ -339,16 +334,16 @@ static int vrend_decode_set_vertex_buffers(struct vrend_decode_ctx *ctx, uint16_
return EINVAL;
for (i = 0; i < num_vbo; i++) {
- vrend_set_single_vbo(ctx->grctx, i,
- get_buf_entry(ctx, VIRGL_SET_VERTEX_BUFFER_STRIDE(i)),
- get_buf_entry(ctx, VIRGL_SET_VERTEX_BUFFER_OFFSET(i)),
- get_buf_entry(ctx, VIRGL_SET_VERTEX_BUFFER_HANDLE(i)));
+ vrend_set_single_vbo(ctx, i,
+ get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_STRIDE(i)),
+ get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_OFFSET(i)),
+ get_buf_entry(buf, VIRGL_SET_VERTEX_BUFFER_HANDLE(i)));
}
- vrend_set_num_vbo(ctx->grctx, num_vbo);
+ vrend_set_num_vbo(ctx, num_vbo);
return 0;
}
-static int vrend_decode_set_sampler_views(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_set_sampler_views(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t num_samps;
uint32_t i;
@@ -358,8 +353,8 @@ static int vrend_decode_set_sampler_views(struct vrend_decode_ctx *ctx, uint16_t
if (length < 2)
return EINVAL;
num_samps = length - 2;
- shader_type = get_buf_entry(ctx, VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE);
- start_slot = get_buf_entry(ctx, VIRGL_SET_SAMPLER_VIEWS_START_SLOT);
+ shader_type = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE);
+ start_slot = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_START_SLOT);
if (shader_type >= PIPE_SHADER_TYPES)
return EINVAL;
@@ -369,31 +364,32 @@ static int vrend_decode_set_sampler_views(struct vrend_decode_ctx *ctx, uint16_t
return EINVAL;
for (i = 0; i < num_samps; i++) {
- uint32_t handle = get_buf_entry(ctx, VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE + i);
- vrend_set_single_sampler_view(ctx->grctx, shader_type, i + start_slot, handle);
+ uint32_t handle = get_buf_entry(buf, VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE + i);
+ vrend_set_single_sampler_view(ctx, shader_type, i + start_slot, handle);
}
- vrend_set_num_sampler_views(ctx->grctx, shader_type, start_slot, num_samps);
+ vrend_set_num_sampler_views(ctx, shader_type, start_slot, num_samps);
return 0;
}
-static void vrend_decode_transfer_common(struct vrend_decode_ctx *ctx,
+static void vrend_decode_transfer_common(const uint32_t *buf,
uint32_t *dst_handle,
struct vrend_transfer_info *info)
{
- *dst_handle = get_buf_entry(ctx, VIRGL_RESOURCE_IW_RES_HANDLE);
+ *dst_handle = get_buf_entry(buf, VIRGL_RESOURCE_IW_RES_HANDLE);
- info->level = get_buf_entry(ctx, VIRGL_RESOURCE_IW_LEVEL);
- info->stride = get_buf_entry(ctx, VIRGL_RESOURCE_IW_STRIDE);
- info->layer_stride = get_buf_entry(ctx, VIRGL_RESOURCE_IW_LAYER_STRIDE);
- info->box->x = get_buf_entry(ctx, VIRGL_RESOURCE_IW_X);
- info->box->y = get_buf_entry(ctx, VIRGL_RESOURCE_IW_Y);
- info->box->z = get_buf_entry(ctx, VIRGL_RESOURCE_IW_Z);
- info->box->width = get_buf_entry(ctx, VIRGL_RESOURCE_IW_W);
- info->box->height = get_buf_entry(ctx, VIRGL_RESOURCE_IW_H);
- info->box->depth = get_buf_entry(ctx, VIRGL_RESOURCE_IW_D);
+ info->level = get_buf_entry(buf, VIRGL_RESOURCE_IW_LEVEL);
+ info->stride = get_buf_entry(buf, VIRGL_RESOURCE_IW_STRIDE);
+ info->layer_stride = get_buf_entry(buf, VIRGL_RESOURCE_IW_LAYER_STRIDE);
+ info->box->x = get_buf_entry(buf, VIRGL_RESOURCE_IW_X);
+ info->box->y = get_buf_entry(buf, VIRGL_RESOURCE_IW_Y);
+ info->box->z = get_buf_entry(buf, VIRGL_RESOURCE_IW_Z);
+ info->box->width = get_buf_entry(buf, VIRGL_RESOURCE_IW_W);
+ info->box->height = get_buf_entry(buf, VIRGL_RESOURCE_IW_H);
+ info->box->depth = get_buf_entry(buf, VIRGL_RESOURCE_IW_D);
}
-static int vrend_decode_resource_inline_write(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_resource_inline_write(struct vrend_context *ctx, const uint32_t *buf,
+ uint32_t length)
{
struct pipe_box box;
uint32_t dst_handle;
@@ -405,14 +401,11 @@ static int vrend_decode_resource_inline_write(struct vrend_decode_ctx *ctx, uint
if (length < 12)
return EINVAL;
- if (length + ctx->ds->buf_offset > ctx->ds->buf_total)
- return EINVAL;
-
memset(&info, 0, sizeof(info));
info.box = &box;
- vrend_decode_transfer_common(ctx, &dst_handle, &info);
+ vrend_decode_transfer_common(buf, &dst_handle, &info);
data_len = (length - 11) * 4;
- data = get_buf_ptr(ctx, VIRGL_RESOURCE_IW_DATA_START);
+ data = get_buf_ptr(buf, VIRGL_RESOURCE_IW_DATA_START);
info.offset = 0;
@@ -421,10 +414,10 @@ static int vrend_decode_resource_inline_write(struct vrend_decode_ctx *ctx, uint
info.iovec = &dataiovec;
info.iovec_cnt = 1;
- return vrend_transfer_inline_write(ctx->grctx, dst_handle, &info);
+ return vrend_transfer_inline_write(ctx, dst_handle, &info);
}
-static int vrend_decode_draw_vbo(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_draw_vbo(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_draw_info info;
uint32_t cso;
@@ -434,38 +427,38 @@ static int vrend_decode_draw_vbo(struct vrend_decode_ctx *ctx, int length)
return EINVAL;
memset(&info, 0, sizeof(struct pipe_draw_info));
- info.start = get_buf_entry(ctx, VIRGL_DRAW_VBO_START);
- info.count = get_buf_entry(ctx, VIRGL_DRAW_VBO_COUNT);
- info.mode = get_buf_entry(ctx, VIRGL_DRAW_VBO_MODE);
- info.indexed = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDEXED);
- info.instance_count = get_buf_entry(ctx, VIRGL_DRAW_VBO_INSTANCE_COUNT);
- info.index_bias = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDEX_BIAS);
- info.start_instance = get_buf_entry(ctx, VIRGL_DRAW_VBO_START_INSTANCE);
- info.primitive_restart = get_buf_entry(ctx, VIRGL_DRAW_VBO_PRIMITIVE_RESTART);
- info.restart_index = get_buf_entry(ctx, VIRGL_DRAW_VBO_RESTART_INDEX);
- info.min_index = get_buf_entry(ctx, VIRGL_DRAW_VBO_MIN_INDEX);
- info.max_index = get_buf_entry(ctx, VIRGL_DRAW_VBO_MAX_INDEX);
+ info.start = get_buf_entry(buf, VIRGL_DRAW_VBO_START);
+ info.count = get_buf_entry(buf, VIRGL_DRAW_VBO_COUNT);
+ info.mode = get_buf_entry(buf, VIRGL_DRAW_VBO_MODE);
+ info.indexed = !!get_buf_entry(buf, VIRGL_DRAW_VBO_INDEXED);
+ info.instance_count = get_buf_entry(buf, VIRGL_DRAW_VBO_INSTANCE_COUNT);
+ info.index_bias = get_buf_entry(buf, VIRGL_DRAW_VBO_INDEX_BIAS);
+ info.start_instance = get_buf_entry(buf, VIRGL_DRAW_VBO_START_INSTANCE);
+ info.primitive_restart = !!get_buf_entry(buf, VIRGL_DRAW_VBO_PRIMITIVE_RESTART);
+ info.restart_index = get_buf_entry(buf, VIRGL_DRAW_VBO_RESTART_INDEX);
+ info.min_index = get_buf_entry(buf, VIRGL_DRAW_VBO_MIN_INDEX);
+ info.max_index = get_buf_entry(buf, VIRGL_DRAW_VBO_MAX_INDEX);
if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
- info.vertices_per_patch = get_buf_entry(ctx, VIRGL_DRAW_VBO_VERTICES_PER_PATCH);
- info.drawid = get_buf_entry(ctx, VIRGL_DRAW_VBO_DRAWID);
+ info.vertices_per_patch = get_buf_entry(buf, VIRGL_DRAW_VBO_VERTICES_PER_PATCH);
+ info.drawid = get_buf_entry(buf, VIRGL_DRAW_VBO_DRAWID);
}
if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
- handle = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDIRECT_HANDLE);
- info.indirect.offset = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDIRECT_OFFSET);
- info.indirect.stride = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDIRECT_STRIDE);
- info.indirect.draw_count = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT);
- info.indirect.indirect_draw_count_offset = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_OFFSET);
- indirect_draw_count_handle = get_buf_entry(ctx, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_HANDLE);
+ handle = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_HANDLE);
+ info.indirect.offset = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_OFFSET);
+ info.indirect.stride = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_STRIDE);
+ info.indirect.draw_count = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT);
+ info.indirect.indirect_draw_count_offset = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_OFFSET);
+ indirect_draw_count_handle = get_buf_entry(buf, VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_HANDLE);
}
- cso = get_buf_entry(ctx, VIRGL_DRAW_VBO_COUNT_FROM_SO);
+ cso = get_buf_entry(buf, VIRGL_DRAW_VBO_COUNT_FROM_SO);
- return vrend_draw_vbo(ctx->grctx, &info, cso, handle, indirect_draw_count_handle);
+ return vrend_draw_vbo(ctx, &info, cso, handle, indirect_draw_count_handle);
}
-static int vrend_decode_create_blend(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_blend(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
struct pipe_blend_state *blend_state;
uint32_t tmp;
@@ -479,18 +472,18 @@ static int vrend_decode_create_blend(struct vrend_decode_ctx *ctx, uint32_t hand
if (!blend_state)
return ENOMEM;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_BLEND_S0);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S0);
blend_state->independent_blend_enable = (tmp & 1);
blend_state->logicop_enable = (tmp >> 1) & 0x1;
blend_state->dither = (tmp >> 2) & 0x1;
blend_state->alpha_to_coverage = (tmp >> 3) & 0x1;
blend_state->alpha_to_one = (tmp >> 4) & 0x1;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_BLEND_S1);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S1);
blend_state->logicop_func = tmp & 0xf;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
- tmp = get_buf_entry(ctx, VIRGL_OBJ_BLEND_S2(i));
+ tmp = get_buf_entry(buf, VIRGL_OBJ_BLEND_S2(i));
blend_state->rt[i].blend_enable = tmp & 0x1;
blend_state->rt[i].rgb_func = (tmp >> 1) & 0x7;
blend_state->rt[i].rgb_src_factor = (tmp >> 4) & 0x1f;
@@ -501,7 +494,7 @@ static int vrend_decode_create_blend(struct vrend_decode_ctx *ctx, uint32_t hand
blend_state->rt[i].colormask = (tmp >> 27) & 0xf;
}
- tmp = vrend_renderer_object_insert(ctx->grctx, blend_state, handle,
+ tmp = vrend_renderer_object_insert(ctx, blend_state, handle,
VIRGL_OBJECT_BLEND);
if (tmp == 0) {
FREE(blend_state);
@@ -510,7 +503,7 @@ static int vrend_decode_create_blend(struct vrend_decode_ctx *ctx, uint32_t hand
return 0;
}
-static int vrend_decode_create_dsa(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_dsa(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
int i;
struct pipe_depth_stencil_alpha_state *dsa_state;
@@ -523,7 +516,7 @@ static int vrend_decode_create_dsa(struct vrend_decode_ctx *ctx, uint32_t handle
if (!dsa_state)
return ENOMEM;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_DSA_S0);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_S0);
dsa_state->depth.enabled = tmp & 0x1;
dsa_state->depth.writemask = (tmp >> 1) & 0x1;
dsa_state->depth.func = (tmp >> 2) & 0x7;
@@ -532,7 +525,7 @@ static int vrend_decode_create_dsa(struct vrend_decode_ctx *ctx, uint32_t handle
dsa_state->alpha.func = (tmp >> 9) & 0x7;
for (i = 0; i < 2; i++) {
- tmp = get_buf_entry(ctx, VIRGL_OBJ_DSA_S1 + i);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_S1 + i);
dsa_state->stencil[i].enabled = tmp & 0x1;
dsa_state->stencil[i].func = (tmp >> 1) & 0x7;
dsa_state->stencil[i].fail_op = (tmp >> 4) & 0x7;
@@ -542,10 +535,10 @@ static int vrend_decode_create_dsa(struct vrend_decode_ctx *ctx, uint32_t handle
dsa_state->stencil[i].writemask = (tmp >> 21) & 0xff;
}
- tmp = get_buf_entry(ctx, VIRGL_OBJ_DSA_ALPHA_REF);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_DSA_ALPHA_REF);
dsa_state->alpha.ref_value = uif(tmp);
- tmp = vrend_renderer_object_insert(ctx->grctx, dsa_state, handle,
+ tmp = vrend_renderer_object_insert(ctx, dsa_state, handle,
VIRGL_OBJECT_DSA);
if (tmp == 0) {
FREE(dsa_state);
@@ -554,7 +547,7 @@ static int vrend_decode_create_dsa(struct vrend_decode_ctx *ctx, uint32_t handle
return 0;
}
-static int vrend_decode_create_rasterizer(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_rasterizer(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
struct pipe_rasterizer_state *rs_state;
uint32_t tmp;
@@ -566,7 +559,7 @@ static int vrend_decode_create_rasterizer(struct vrend_decode_ctx *ctx, uint32_t
if (!rs_state)
return ENOMEM;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_RS_S0);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_RS_S0);
#define ebit(name, bit) rs_state->name = (tmp >> bit) & 0x1
#define emask(name, bit, mask) rs_state->name = (tmp >> bit) & mask
@@ -599,19 +592,19 @@ static int vrend_decode_create_rasterizer(struct vrend_decode_ctx *ctx, uint32_t
ebit(half_pixel_center, 29);
ebit(bottom_edge_rule, 30);
ebit(force_persample_interp, 31);
- rs_state->point_size = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_POINT_SIZE));
- rs_state->sprite_coord_enable = get_buf_entry(ctx, VIRGL_OBJ_RS_SPRITE_COORD_ENABLE);
- tmp = get_buf_entry(ctx, VIRGL_OBJ_RS_S3);
+ rs_state->point_size = uif(get_buf_entry(buf, VIRGL_OBJ_RS_POINT_SIZE));
+ rs_state->sprite_coord_enable = get_buf_entry(buf, VIRGL_OBJ_RS_SPRITE_COORD_ENABLE);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_RS_S3);
emask(line_stipple_pattern, 0, 0xffff);
emask(line_stipple_factor, 16, 0xff);
emask(clip_plane_enable, 24, 0xff);
- rs_state->line_width = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_LINE_WIDTH));
- rs_state->offset_units = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_OFFSET_UNITS));
- rs_state->offset_scale = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_OFFSET_SCALE));
- rs_state->offset_clamp = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_OFFSET_CLAMP));
+ rs_state->line_width = uif(get_buf_entry(buf, VIRGL_OBJ_RS_LINE_WIDTH));
+ rs_state->offset_units = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_UNITS));
+ rs_state->offset_scale = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_SCALE));
+ rs_state->offset_clamp = uif(get_buf_entry(buf, VIRGL_OBJ_RS_OFFSET_CLAMP));
- tmp = vrend_renderer_object_insert(ctx->grctx, rs_state, handle,
+ tmp = vrend_renderer_object_insert(ctx, rs_state, handle,
VIRGL_OBJECT_RASTERIZER);
if (tmp == 0) {
FREE(rs_state);
@@ -620,7 +613,7 @@ static int vrend_decode_create_rasterizer(struct vrend_decode_ctx *ctx, uint32_t
return 0;
}
-static int vrend_decode_create_surface(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_surface(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
uint32_t res_handle, format, val0, val1;
int ret;
@@ -628,31 +621,31 @@ static int vrend_decode_create_surface(struct vrend_decode_ctx *ctx, uint32_t ha
if (length != VIRGL_OBJ_SURFACE_SIZE)
return EINVAL;
- res_handle = get_buf_entry(ctx, VIRGL_OBJ_SURFACE_RES_HANDLE);
- format = get_buf_entry(ctx, VIRGL_OBJ_SURFACE_FORMAT);
+ res_handle = get_buf_entry(buf, VIRGL_OBJ_SURFACE_RES_HANDLE);
+ format = get_buf_entry(buf, VIRGL_OBJ_SURFACE_FORMAT);
/* decide later if these are texture or buffer */
- val0 = get_buf_entry(ctx, VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT);
- val1 = get_buf_entry(ctx, VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT);
- ret = vrend_create_surface(ctx->grctx, handle, res_handle, format, val0, val1);
+ val0 = get_buf_entry(buf, VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT);
+ val1 = get_buf_entry(buf, VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT);
+ ret = vrend_create_surface(ctx, handle, res_handle, format, val0, val1);
return ret;
}
-static int vrend_decode_create_sampler_view(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_sampler_view(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
uint32_t res_handle, format, val0, val1, swizzle_packed;
if (length != VIRGL_OBJ_SAMPLER_VIEW_SIZE)
return EINVAL;
- res_handle = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE);
- format = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_VIEW_FORMAT);
- val0 = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT);
- val1 = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT);
- swizzle_packed = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE);
- return vrend_create_sampler_view(ctx->grctx, handle, res_handle, format, val0, val1,swizzle_packed);
+ res_handle = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE);
+ format = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_FORMAT);
+ val0 = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT);
+ val1 = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT);
+ swizzle_packed = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE);
+ return vrend_create_sampler_view(ctx, handle, res_handle, format, val0, val1,swizzle_packed);
}
-static int vrend_decode_create_sampler_state(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_sampler_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
struct pipe_sampler_state state;
int i;
@@ -660,7 +653,7 @@ static int vrend_decode_create_sampler_state(struct vrend_decode_ctx *ctx, uint3
if (length != VIRGL_OBJ_SAMPLER_STATE_SIZE)
return EINVAL;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_STATE_S0);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_S0);
state.wrap_s = tmp & 0x7;
state.wrap_t = (tmp >> 3) & 0x7;
state.wrap_r = (tmp >> 6) & 0x7;
@@ -671,22 +664,22 @@ static int vrend_decode_create_sampler_state(struct vrend_decode_ctx *ctx, uint3
state.compare_func = (tmp >> 16) & 0x7;
state.seamless_cube_map = (tmp >> 19) & 0x1;
- state.lod_bias = uif(get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS));
- state.min_lod = uif(get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_STATE_MIN_LOD));
- state.max_lod = uif(get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_STATE_MAX_LOD));
+ state.lod_bias = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS));
+ state.min_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MIN_LOD));
+ state.max_lod = uif(get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_MAX_LOD));
for (i = 0; i < 4; i++)
- state.border_color.ui[i] = get_buf_entry(ctx, VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(i));
+ state.border_color.ui[i] = get_buf_entry(buf, VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(i));
if (state.min_mip_filter != PIPE_TEX_MIPFILTER_NONE &&
state.min_mip_filter != PIPE_TEX_MIPFILTER_LINEAR &&
state.min_mip_filter != PIPE_TEX_MIPFILTER_NEAREST)
return EINVAL;
- return vrend_create_sampler_state(ctx->grctx, handle, &state);
+ return vrend_create_sampler_state(ctx, handle, &state);
}
-static int vrend_decode_create_ve(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_ve(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
struct pipe_vertex_element *ve = NULL;
int num_elements;
@@ -708,26 +701,26 @@ static int vrend_decode_create_ve(struct vrend_decode_ctx *ctx, uint32_t handle,
return ENOMEM;
for (i = 0; i < num_elements; i++) {
- ve[i].src_offset = get_buf_entry(ctx, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(i));
- ve[i].instance_divisor = get_buf_entry(ctx, VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(i));
- ve[i].vertex_buffer_index = get_buf_entry(ctx, VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(i));
+ ve[i].src_offset = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(i));
+ ve[i].instance_divisor = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(i));
+ ve[i].vertex_buffer_index = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(i));
if (ve[i].vertex_buffer_index >= PIPE_MAX_ATTRIBS) {
FREE(ve);
return EINVAL;
}
- ve[i].src_format = get_buf_entry(ctx, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(i));
+ ve[i].src_format = get_buf_entry(buf, VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(i));
}
}
- ret = vrend_create_vertex_elements_state(ctx->grctx, handle, num_elements, ve);
+ ret = vrend_create_vertex_elements_state(ctx, handle, num_elements, ve);
FREE(ve);
return ret;
}
-static int vrend_decode_create_query(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
+static int vrend_decode_create_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t handle, uint16_t length)
{
uint32_t query_type;
uint32_t query_index;
@@ -738,62 +731,64 @@ static int vrend_decode_create_query(struct vrend_decode_ctx *ctx, uint32_t hand
if (length != VIRGL_OBJ_QUERY_SIZE)
return EINVAL;
- tmp = get_buf_entry(ctx, VIRGL_OBJ_QUERY_TYPE_INDEX);
+ tmp = get_buf_entry(buf, VIRGL_OBJ_QUERY_TYPE_INDEX);
query_type = VIRGL_OBJ_QUERY_TYPE(tmp);
query_index = (tmp >> 16) & 0xffff;
- offset = get_buf_entry(ctx, VIRGL_OBJ_QUERY_OFFSET);
- res_handle = get_buf_entry(ctx, VIRGL_OBJ_QUERY_RES_HANDLE);
+ offset = get_buf_entry(buf, VIRGL_OBJ_QUERY_OFFSET);
+ res_handle = get_buf_entry(buf, VIRGL_OBJ_QUERY_RES_HANDLE);
- return vrend_create_query(ctx->grctx, handle, query_type, query_index, res_handle, offset);
+ return vrend_create_query(ctx, handle, query_type, query_index, res_handle, offset);
}
-static int vrend_decode_create_object(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_create_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length < 1)
return EINVAL;
- uint32_t header = get_buf_entry(ctx, VIRGL_OBJ_CREATE_HEADER);
- uint32_t handle = get_buf_entry(ctx, VIRGL_OBJ_CREATE_HANDLE);
+ uint32_t header = get_buf_entry(buf, VIRGL_OBJ_CREATE_HEADER);
+ uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_CREATE_HANDLE);
uint8_t obj_type = (header >> 8) & 0xff;
int ret = 0;
if (handle == 0)
return EINVAL;
- VREND_DEBUG(dbg_object, ctx->grctx," CREATE %-18s handle:0x%x len:%d\n",
+ VREND_DEBUG(dbg_object, ctx," CREATE %-18s handle:0x%x len:%d\n",
vrend_get_object_type_name(obj_type), handle, length);
+ TRACE_SCOPE("CREATE %-18s", vrend_get_object_type_name(obj_type));
+
switch (obj_type){
case VIRGL_OBJECT_BLEND:
- ret = vrend_decode_create_blend(ctx, handle, length);
+ ret = vrend_decode_create_blend(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_DSA:
- ret = vrend_decode_create_dsa(ctx, handle, length);
+ ret = vrend_decode_create_dsa(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_RASTERIZER:
- ret = vrend_decode_create_rasterizer(ctx, handle, length);
+ ret = vrend_decode_create_rasterizer(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_SHADER:
- ret = vrend_decode_create_shader(ctx, handle, length);
+ ret = vrend_decode_create_shader(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_VERTEX_ELEMENTS:
- ret = vrend_decode_create_ve(ctx, handle, length);
+ ret = vrend_decode_create_ve(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_SURFACE:
- ret = vrend_decode_create_surface(ctx, handle, length);
+ ret = vrend_decode_create_surface(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_SAMPLER_VIEW:
- ret = vrend_decode_create_sampler_view(ctx, handle, length);
+ ret = vrend_decode_create_sampler_view(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_SAMPLER_STATE:
- ret = vrend_decode_create_sampler_state(ctx, handle, length);
+ ret = vrend_decode_create_sampler_state(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_QUERY:
- ret = vrend_decode_create_query(ctx, handle, length);
+ ret = vrend_decode_create_query(ctx, buf, handle, length);
break;
case VIRGL_OBJECT_STREAMOUT_TARGET:
- ret = vrend_decode_create_stream_output_target(ctx, handle, length);
+ ret = vrend_decode_create_stream_output_target(ctx, buf, handle, length);
break;
default:
return EINVAL;
@@ -802,31 +797,31 @@ static int vrend_decode_create_object(struct vrend_decode_ctx *ctx, int length)
return ret;
}
-static int vrend_decode_bind_object(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_bind_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t header = get_buf_entry(ctx, VIRGL_OBJ_BIND_HEADER);
- uint32_t handle = get_buf_entry(ctx, VIRGL_OBJ_BIND_HANDLE);
+ uint32_t header = get_buf_entry(buf, VIRGL_OBJ_BIND_HEADER);
+ uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_BIND_HANDLE);
uint8_t obj_type = (header >> 8) & 0xff;
- VREND_DEBUG(dbg_object, ctx->grctx,
+ VREND_DEBUG(dbg_object, ctx,
" BIND %-20s handle:0x%x len:%d\n",
vrend_get_object_type_name(obj_type), handle, length);
switch (obj_type) {
case VIRGL_OBJECT_BLEND:
- vrend_object_bind_blend(ctx->grctx, handle);
+ vrend_object_bind_blend(ctx, handle);
break;
case VIRGL_OBJECT_DSA:
- vrend_object_bind_dsa(ctx->grctx, handle);
+ vrend_object_bind_dsa(ctx, handle);
break;
case VIRGL_OBJECT_RASTERIZER:
- vrend_object_bind_rasterizer(ctx->grctx, handle);
+ vrend_object_bind_rasterizer(ctx, handle);
break;
case VIRGL_OBJECT_VERTEX_ELEMENTS:
- vrend_bind_vertex_elements_state(ctx->grctx, handle);
+ vrend_bind_vertex_elements_state(ctx, handle);
break;
default:
return EINVAL;
@@ -835,37 +830,37 @@ static int vrend_decode_bind_object(struct vrend_decode_ctx *ctx, uint16_t lengt
return 0;
}
-static int vrend_decode_destroy_object(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_destroy_object(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t handle = get_buf_entry(ctx, VIRGL_OBJ_DESTROY_HANDLE);
+ uint32_t handle = get_buf_entry(buf, VIRGL_OBJ_DESTROY_HANDLE);
- VREND_DEBUG_EXT(dbg_object, ctx->grctx,
- uint32_t obj = (get_buf_entry(ctx, 0) >> 8) & 0xFF;
+ VREND_DEBUG_EXT(dbg_object, ctx,
+ uint32_t obj = (get_buf_entry(buf, 0) >> 8) & 0xFF;
vrend_printf(" DESTROY %-17s handle:0x%x\n",
vrend_get_object_type_name(obj), handle));
- vrend_renderer_object_destroy(ctx->grctx, handle);
+ vrend_renderer_object_destroy(ctx, handle);
return 0;
}
-static int vrend_decode_set_stencil_ref(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_stencil_ref(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != VIRGL_SET_STENCIL_REF_SIZE)
return EINVAL;
struct pipe_stencil_ref ref;
- uint32_t val = get_buf_entry(ctx, VIRGL_SET_STENCIL_REF);
+ uint32_t val = get_buf_entry(buf, VIRGL_SET_STENCIL_REF);
ref.ref_value[0] = val & 0xff;
ref.ref_value[1] = (val >> 8) & 0xff;
- vrend_set_stencil_ref(ctx->grctx, &ref);
+ vrend_set_stencil_ref(ctx, &ref);
return 0;
}
-static int vrend_decode_set_blend_color(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_blend_color(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_blend_color color;
int i;
@@ -874,13 +869,13 @@ static int vrend_decode_set_blend_color(struct vrend_decode_ctx *ctx, int length
return EINVAL;
for (i = 0; i < 4; i++)
- color.color[i] = uif(get_buf_entry(ctx, VIRGL_SET_BLEND_COLOR(i)));
+ color.color[i] = uif(get_buf_entry(buf, VIRGL_SET_BLEND_COLOR(i)));
- vrend_set_blend_color(ctx->grctx, &color);
+ vrend_set_blend_color(ctx, &color);
return 0;
}
-static int vrend_decode_set_scissor_state(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_scissor_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_scissor_state ss[PIPE_MAX_VIEWPORTS];
uint32_t temp;
@@ -897,23 +892,23 @@ static int vrend_decode_set_scissor_state(struct vrend_decode_ctx *ctx, int leng
if (num_scissor > PIPE_MAX_VIEWPORTS)
return EINVAL;
- start_slot = get_buf_entry(ctx, VIRGL_SET_SCISSOR_START_SLOT);
+ start_slot = get_buf_entry(buf, VIRGL_SET_SCISSOR_START_SLOT);
for (s = 0; s < num_scissor; s++) {
- temp = get_buf_entry(ctx, VIRGL_SET_SCISSOR_MINX_MINY(s));
+ temp = get_buf_entry(buf, VIRGL_SET_SCISSOR_MINX_MINY(s));
ss[s].minx = temp & 0xffff;
ss[s].miny = (temp >> 16) & 0xffff;
- temp = get_buf_entry(ctx, VIRGL_SET_SCISSOR_MAXX_MAXY(s));
+ temp = get_buf_entry(buf, VIRGL_SET_SCISSOR_MAXX_MAXY(s));
ss[s].maxx = temp & 0xffff;
ss[s].maxy = (temp >> 16) & 0xffff;
}
- vrend_set_scissor_state(ctx->grctx, start_slot, num_scissor, ss);
+ vrend_set_scissor_state(ctx, start_slot, num_scissor, ss);
return 0;
}
-static int vrend_decode_set_polygon_stipple(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_polygon_stipple(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_poly_stipple ps;
int i;
@@ -922,13 +917,13 @@ static int vrend_decode_set_polygon_stipple(struct vrend_decode_ctx *ctx, int le
return EINVAL;
for (i = 0; i < 32; i++)
- ps.stipple[i] = get_buf_entry(ctx, VIRGL_POLYGON_STIPPLE_P0 + i);
+ ps.stipple[i] = get_buf_entry(buf, VIRGL_POLYGON_STIPPLE_P0 + i);
- vrend_set_polygon_stipple(ctx->grctx, &ps);
+ vrend_set_polygon_stipple(ctx, &ps);
return 0;
}
-static int vrend_decode_set_clip_state(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_clip_state(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_clip_state clip;
int i, j;
@@ -938,34 +933,34 @@ static int vrend_decode_set_clip_state(struct vrend_decode_ctx *ctx, int length)
for (i = 0; i < 8; i++)
for (j = 0; j < 4; j++)
- clip.ucp[i][j] = uif(get_buf_entry(ctx, VIRGL_SET_CLIP_STATE_C0 + (i * 4) + j));
- vrend_set_clip_state(ctx->grctx, &clip);
+ clip.ucp[i][j] = uif(get_buf_entry(buf, VIRGL_SET_CLIP_STATE_C0 + (i * 4) + j));
+ vrend_set_clip_state(ctx, &clip);
return 0;
}
-static int vrend_decode_set_sample_mask(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_sample_mask(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
unsigned mask;
if (length != VIRGL_SET_SAMPLE_MASK_SIZE)
return EINVAL;
- mask = get_buf_entry(ctx, VIRGL_SET_SAMPLE_MASK_MASK);
- vrend_set_sample_mask(ctx->grctx, mask);
+ mask = get_buf_entry(buf, VIRGL_SET_SAMPLE_MASK_MASK);
+ vrend_set_sample_mask(ctx, mask);
return 0;
}
-static int vrend_decode_set_min_samples(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_min_samples(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
unsigned min_samples;
if (length != VIRGL_SET_MIN_SAMPLES_SIZE)
return EINVAL;
- min_samples = get_buf_entry(ctx, VIRGL_SET_MIN_SAMPLES_MASK);
- vrend_set_min_samples(ctx->grctx, min_samples);
+ min_samples = get_buf_entry(buf, VIRGL_SET_MIN_SAMPLES_MASK);
+ vrend_set_min_samples(ctx, min_samples);
return 0;
}
-static int vrend_decode_resource_copy_region(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_resource_copy_region(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_box box;
uint32_t dst_handle, src_handle;
@@ -975,21 +970,21 @@ static int vrend_decode_resource_copy_region(struct vrend_decode_ctx *ctx, int l
if (length != VIRGL_CMD_RESOURCE_COPY_REGION_SIZE)
return EINVAL;
- dst_handle = get_buf_entry(ctx, VIRGL_CMD_RCR_DST_RES_HANDLE);
- dst_level = get_buf_entry(ctx, VIRGL_CMD_RCR_DST_LEVEL);
- dstx = get_buf_entry(ctx, VIRGL_CMD_RCR_DST_X);
- dsty = get_buf_entry(ctx, VIRGL_CMD_RCR_DST_Y);
- dstz = get_buf_entry(ctx, VIRGL_CMD_RCR_DST_Z);
- src_handle = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_RES_HANDLE);
- src_level = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_LEVEL);
- box.x = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_X);
- box.y = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_Y);
- box.z = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_Z);
- box.width = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_W);
- box.height = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_H);
- box.depth = get_buf_entry(ctx, VIRGL_CMD_RCR_SRC_D);
-
- vrend_renderer_resource_copy_region(ctx->grctx, dst_handle,
+ dst_handle = get_buf_entry(buf, VIRGL_CMD_RCR_DST_RES_HANDLE);
+ dst_level = get_buf_entry(buf, VIRGL_CMD_RCR_DST_LEVEL);
+ dstx = get_buf_entry(buf, VIRGL_CMD_RCR_DST_X);
+ dsty = get_buf_entry(buf, VIRGL_CMD_RCR_DST_Y);
+ dstz = get_buf_entry(buf, VIRGL_CMD_RCR_DST_Z);
+ src_handle = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_RES_HANDLE);
+ src_level = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_LEVEL);
+ box.x = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_X);
+ box.y = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_Y);
+ box.z = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_Z);
+ box.width = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_W);
+ box.height = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_H);
+ box.depth = get_buf_entry(buf, VIRGL_CMD_RCR_SRC_D);
+
+ vrend_renderer_resource_copy_region(ctx, dst_handle,
dst_level, dstx, dsty, dstz,
src_handle, src_level,
&box);
@@ -997,175 +992,175 @@ static int vrend_decode_resource_copy_region(struct vrend_decode_ctx *ctx, int l
}
-static int vrend_decode_blit(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_blit(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_blit_info info;
uint32_t dst_handle, src_handle, temp;
if (length != VIRGL_CMD_BLIT_SIZE)
return EINVAL;
- temp = get_buf_entry(ctx, VIRGL_CMD_BLIT_S0);
+ temp = get_buf_entry(buf, VIRGL_CMD_BLIT_S0);
info.mask = temp & 0xff;
info.filter = (temp >> 8) & 0x3;
info.scissor_enable = (temp >> 10) & 0x1;
info.render_condition_enable = (temp >> 11) & 0x1;
info.alpha_blend = (temp >> 12) & 0x1;
- temp = get_buf_entry(ctx, VIRGL_CMD_BLIT_SCISSOR_MINX_MINY);
+ temp = get_buf_entry(buf, VIRGL_CMD_BLIT_SCISSOR_MINX_MINY);
info.scissor.minx = temp & 0xffff;
info.scissor.miny = (temp >> 16) & 0xffff;
- temp = get_buf_entry(ctx, VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY);
+ temp = get_buf_entry(buf, VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY);
info.scissor.maxx = temp & 0xffff;
info.scissor.maxy = (temp >> 16) & 0xffff;
- dst_handle = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_RES_HANDLE);
- info.dst.level = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_LEVEL);
- info.dst.format = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_FORMAT);
- info.dst.box.x = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_X);
- info.dst.box.y = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_Y);
- info.dst.box.z = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_Z);
- info.dst.box.width = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_W);
- info.dst.box.height = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_H);
- info.dst.box.depth = get_buf_entry(ctx, VIRGL_CMD_BLIT_DST_D);
-
- src_handle = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_RES_HANDLE);
- info.src.level = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_LEVEL);
- info.src.format = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_FORMAT);
- info.src.box.x = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_X);
- info.src.box.y = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_Y);
- info.src.box.z = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_Z);
- info.src.box.width = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_W);
- info.src.box.height = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_H);
- info.src.box.depth = get_buf_entry(ctx, VIRGL_CMD_BLIT_SRC_D);
-
- vrend_renderer_blit(ctx->grctx, dst_handle, src_handle, &info);
+ dst_handle = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_RES_HANDLE);
+ info.dst.level = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_LEVEL);
+ info.dst.format = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_FORMAT);
+ info.dst.box.x = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_X);
+ info.dst.box.y = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_Y);
+ info.dst.box.z = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_Z);
+ info.dst.box.width = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_W);
+ info.dst.box.height = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_H);
+ info.dst.box.depth = get_buf_entry(buf, VIRGL_CMD_BLIT_DST_D);
+
+ src_handle = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_RES_HANDLE);
+ info.src.level = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_LEVEL);
+ info.src.format = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_FORMAT);
+ info.src.box.x = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_X);
+ info.src.box.y = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_Y);
+ info.src.box.z = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_Z);
+ info.src.box.width = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_W);
+ info.src.box.height = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_H);
+ info.src.box.depth = get_buf_entry(buf, VIRGL_CMD_BLIT_SRC_D);
+
+ vrend_renderer_blit(ctx, dst_handle, src_handle, &info);
return 0;
}
-static int vrend_decode_bind_sampler_states(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_bind_sampler_states(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length < 2)
return EINVAL;
- uint32_t shader_type = get_buf_entry(ctx, VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE);
- uint32_t start_slot = get_buf_entry(ctx, VIRGL_BIND_SAMPLER_STATES_START_SLOT);
+ uint32_t shader_type = get_buf_entry(buf, VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE);
+ uint32_t start_slot = get_buf_entry(buf, VIRGL_BIND_SAMPLER_STATES_START_SLOT);
uint32_t num_states = length - 2;
if (shader_type >= PIPE_SHADER_TYPES)
return EINVAL;
- vrend_bind_sampler_states(ctx->grctx, shader_type, start_slot, num_states,
- get_buf_ptr(ctx, VIRGL_BIND_SAMPLER_STATES_S0_HANDLE));
+ vrend_bind_sampler_states(ctx, shader_type, start_slot, num_states,
+ get_buf_ptr(buf, VIRGL_BIND_SAMPLER_STATES_S0_HANDLE));
return 0;
}
-static int vrend_decode_begin_query(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_begin_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t handle = get_buf_entry(ctx, VIRGL_QUERY_BEGIN_HANDLE);
+ uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_BEGIN_HANDLE);
- return vrend_begin_query(ctx->grctx, handle);
+ return vrend_begin_query(ctx, handle);
}
-static int vrend_decode_end_query(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_end_query(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t handle = get_buf_entry(ctx, VIRGL_QUERY_END_HANDLE);
+ uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_END_HANDLE);
- return vrend_end_query(ctx->grctx, handle);
+ return vrend_end_query(ctx, handle);
}
-static int vrend_decode_get_query_result(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_get_query_result(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 2)
return EINVAL;
- uint32_t handle = get_buf_entry(ctx, VIRGL_QUERY_RESULT_HANDLE);
- uint32_t wait = get_buf_entry(ctx, VIRGL_QUERY_RESULT_WAIT);
+ uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_HANDLE);
+ uint32_t wait = get_buf_entry(buf, VIRGL_QUERY_RESULT_WAIT);
- vrend_get_query_result(ctx->grctx, handle, wait);
+ vrend_get_query_result(ctx, handle, wait);
return 0;
}
-static int vrend_decode_get_query_result_qbo(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_get_query_result_qbo(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != VIRGL_QUERY_RESULT_QBO_SIZE)
return EINVAL;
- uint32_t handle = get_buf_entry(ctx, VIRGL_QUERY_RESULT_QBO_HANDLE);
- uint32_t qbo_handle = get_buf_entry(ctx, VIRGL_QUERY_RESULT_QBO_QBO_HANDLE);
- uint32_t wait = get_buf_entry(ctx, VIRGL_QUERY_RESULT_QBO_WAIT);
- uint32_t result_type = get_buf_entry(ctx, VIRGL_QUERY_RESULT_QBO_RESULT_TYPE);
- uint32_t offset = get_buf_entry(ctx, VIRGL_QUERY_RESULT_QBO_OFFSET);
- int32_t index = get_buf_entry(ctx, VIRGL_QUERY_RESULT_QBO_INDEX);
+ uint32_t handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_HANDLE);
+ uint32_t qbo_handle = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_QBO_HANDLE);
+ uint32_t wait = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_WAIT);
+ uint32_t result_type = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_RESULT_TYPE);
+ uint32_t offset = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_OFFSET);
+ int32_t index = get_buf_entry(buf, VIRGL_QUERY_RESULT_QBO_INDEX);
- vrend_get_query_result_qbo(ctx->grctx, handle, qbo_handle, wait, result_type, offset, index);
+ vrend_get_query_result_qbo(ctx, handle, qbo_handle, wait, result_type, offset, index);
return 0;
}
-static int vrend_decode_set_render_condition(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_render_condition(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != VIRGL_RENDER_CONDITION_SIZE)
return EINVAL;
- uint32_t handle = get_buf_entry(ctx, VIRGL_RENDER_CONDITION_HANDLE);
- bool condition = get_buf_entry(ctx, VIRGL_RENDER_CONDITION_CONDITION) & 1;
- uint mode = get_buf_entry(ctx, VIRGL_RENDER_CONDITION_MODE);
+ uint32_t handle = get_buf_entry(buf, VIRGL_RENDER_CONDITION_HANDLE);
+ bool condition = get_buf_entry(buf, VIRGL_RENDER_CONDITION_CONDITION) & 1;
+ uint mode = get_buf_entry(buf, VIRGL_RENDER_CONDITION_MODE);
- vrend_render_condition(ctx->grctx, handle, condition, mode);
+ vrend_render_condition(ctx, handle, condition, mode);
return 0;
}
-static int vrend_decode_set_sub_ctx(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t ctx_sub_id = get_buf_entry(ctx, 1);
+ uint32_t ctx_sub_id = get_buf_entry(buf, 1);
- vrend_renderer_set_sub_ctx(ctx->grctx, ctx_sub_id);
+ vrend_renderer_set_sub_ctx(ctx, ctx_sub_id);
return 0;
}
-static int vrend_decode_create_sub_ctx(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_create_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t ctx_sub_id = get_buf_entry(ctx, 1);
+ uint32_t ctx_sub_id = get_buf_entry(buf, 1);
- vrend_renderer_create_sub_ctx(ctx->grctx, ctx_sub_id);
+ vrend_renderer_create_sub_ctx(ctx, ctx_sub_id);
return 0;
}
-static int vrend_decode_destroy_sub_ctx(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_destroy_sub_ctx(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != 1)
return EINVAL;
- uint32_t ctx_sub_id = get_buf_entry(ctx, 1);
+ uint32_t ctx_sub_id = get_buf_entry(buf, 1);
- vrend_renderer_destroy_sub_ctx(ctx->grctx, ctx_sub_id);
+ vrend_renderer_destroy_sub_ctx(ctx, ctx_sub_id);
return 0;
}
-static int vrend_decode_bind_shader(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_bind_shader(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t handle, type;
if (length != VIRGL_BIND_SHADER_SIZE)
return EINVAL;
- handle = get_buf_entry(ctx, VIRGL_BIND_SHADER_HANDLE);
- type = get_buf_entry(ctx, VIRGL_BIND_SHADER_TYPE);
+ handle = get_buf_entry(buf, VIRGL_BIND_SHADER_HANDLE);
+ type = get_buf_entry(buf, VIRGL_BIND_SHADER_TYPE);
- vrend_bind_shader(ctx->grctx, handle, type);
+ vrend_bind_shader(ctx, handle, type);
return 0;
}
-static int vrend_decode_set_tess_state(struct vrend_decode_ctx *ctx,
- int length)
+static int vrend_decode_set_tess_state(struct vrend_context *ctx,
+ const uint32_t *buf, uint32_t length)
{
float tess_factors[6];
int i;
@@ -1174,13 +1169,13 @@ static int vrend_decode_set_tess_state(struct vrend_decode_ctx *ctx,
return EINVAL;
for (i = 0; i < 6; i++) {
- tess_factors[i] = uif(get_buf_entry(ctx, i + 1));
+ tess_factors[i] = uif(get_buf_entry(buf, i + 1));
}
- vrend_set_tess_state(ctx->grctx, tess_factors);
+ vrend_set_tess_state(ctx, tess_factors);
return 0;
}
-static int vrend_decode_set_shader_buffers(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_set_shader_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t num_ssbo;
uint32_t shader_type, start_slot;
@@ -1189,8 +1184,8 @@ static int vrend_decode_set_shader_buffers(struct vrend_decode_ctx *ctx, uint16_
return EINVAL;
num_ssbo = (length - 2) / VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE;
- shader_type = get_buf_entry(ctx, VIRGL_SET_SHADER_BUFFER_SHADER_TYPE);
- start_slot = get_buf_entry(ctx, VIRGL_SET_SHADER_BUFFER_START_SLOT);
+ shader_type = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_SHADER_TYPE);
+ start_slot = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_START_SLOT);
if (shader_type >= PIPE_SHADER_TYPES)
return EINVAL;
@@ -1202,16 +1197,16 @@ static int vrend_decode_set_shader_buffers(struct vrend_decode_ctx *ctx, uint16_
return EINVAL;
for (uint32_t i = 0; i < num_ssbo; i++) {
- uint32_t offset = get_buf_entry(ctx, VIRGL_SET_SHADER_BUFFER_OFFSET(i));
- uint32_t buf_len = get_buf_entry(ctx, VIRGL_SET_SHADER_BUFFER_LENGTH(i));
- uint32_t handle = get_buf_entry(ctx, VIRGL_SET_SHADER_BUFFER_RES_HANDLE(i));
- vrend_set_single_ssbo(ctx->grctx, shader_type, start_slot + i, offset, buf_len,
+ uint32_t offset = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_OFFSET(i));
+ uint32_t buf_len = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_LENGTH(i));
+ uint32_t handle = get_buf_entry(buf, VIRGL_SET_SHADER_BUFFER_RES_HANDLE(i));
+ vrend_set_single_ssbo(ctx, shader_type, start_slot + i, offset, buf_len,
handle);
}
return 0;
}
-static int vrend_decode_set_atomic_buffers(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_set_atomic_buffers(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t num_abo;
uint32_t start_slot;
@@ -1220,7 +1215,7 @@ static int vrend_decode_set_atomic_buffers(struct vrend_decode_ctx *ctx, uint16_
return EINVAL;
num_abo = (length - 1) / VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE;
- start_slot = get_buf_entry(ctx, VIRGL_SET_ATOMIC_BUFFER_START_SLOT);
+ start_slot = get_buf_entry(buf, VIRGL_SET_ATOMIC_BUFFER_START_SLOT);
if (num_abo < 1)
return 0;
@@ -1230,16 +1225,16 @@ static int vrend_decode_set_atomic_buffers(struct vrend_decode_ctx *ctx, uint16_
return EINVAL;
for (uint32_t i = 0; i < num_abo; i++) {
- uint32_t offset = get_buf_entry(ctx, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2);
- uint32_t buf_len = get_buf_entry(ctx, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3);
- uint32_t handle = get_buf_entry(ctx, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4);
- vrend_set_single_abo(ctx->grctx, start_slot + i, offset, buf_len, handle);
+ uint32_t offset = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2);
+ uint32_t buf_len = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3);
+ uint32_t handle = get_buf_entry(buf, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4);
+ vrend_set_single_abo(ctx, start_slot + i, offset, buf_len, handle);
}
return 0;
}
-static int vrend_decode_set_shader_images(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_set_shader_images(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t num_images;
uint32_t shader_type, start_slot;
@@ -1247,8 +1242,8 @@ static int vrend_decode_set_shader_images(struct vrend_decode_ctx *ctx, uint16_t
return EINVAL;
num_images = (length - 2) / VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE;
- shader_type = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_SHADER_TYPE);
- start_slot = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_START_SLOT);
+ shader_type = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_SHADER_TYPE);
+ start_slot = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_START_SLOT);
if (shader_type >= PIPE_SHADER_TYPES)
return EINVAL;
@@ -1260,48 +1255,48 @@ static int vrend_decode_set_shader_images(struct vrend_decode_ctx *ctx, uint16_t
return EINVAL;
for (uint32_t i = 0; i < num_images; i++) {
- uint32_t format = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_FORMAT(i));
- uint32_t access = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_ACCESS(i));
- uint32_t layer_offset = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(i));
- uint32_t level_size = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(i));
- uint32_t handle = get_buf_entry(ctx, VIRGL_SET_SHADER_IMAGE_RES_HANDLE(i));
- vrend_set_single_image_view(ctx->grctx, shader_type, start_slot + i, format, access,
+ uint32_t format = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_FORMAT(i));
+ uint32_t access = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_ACCESS(i));
+ uint32_t layer_offset = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(i));
+ uint32_t level_size = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(i));
+ uint32_t handle = get_buf_entry(buf, VIRGL_SET_SHADER_IMAGE_RES_HANDLE(i));
+ vrend_set_single_image_view(ctx, shader_type, start_slot + i, format, access,
layer_offset, level_size, handle);
}
return 0;
}
-static int vrend_decode_memory_barrier(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_memory_barrier(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != VIRGL_MEMORY_BARRIER_SIZE)
return EINVAL;
- unsigned flags = get_buf_entry(ctx, VIRGL_MEMORY_BARRIER_FLAGS);
- vrend_memory_barrier(ctx->grctx, flags);
+ unsigned flags = get_buf_entry(buf, VIRGL_MEMORY_BARRIER_FLAGS);
+ vrend_memory_barrier(ctx, flags);
return 0;
}
-static int vrend_decode_launch_grid(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_launch_grid(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
uint32_t block[3], grid[3];
uint32_t indirect_handle, indirect_offset;
if (length != VIRGL_LAUNCH_GRID_SIZE)
return EINVAL;
- block[0] = get_buf_entry(ctx, VIRGL_LAUNCH_BLOCK_X);
- block[1] = get_buf_entry(ctx, VIRGL_LAUNCH_BLOCK_Y);
- block[2] = get_buf_entry(ctx, VIRGL_LAUNCH_BLOCK_Z);
- grid[0] = get_buf_entry(ctx, VIRGL_LAUNCH_GRID_X);
- grid[1] = get_buf_entry(ctx, VIRGL_LAUNCH_GRID_Y);
- grid[2] = get_buf_entry(ctx, VIRGL_LAUNCH_GRID_Z);
- indirect_handle = get_buf_entry(ctx, VIRGL_LAUNCH_INDIRECT_HANDLE);
- indirect_offset = get_buf_entry(ctx, VIRGL_LAUNCH_INDIRECT_OFFSET);
- vrend_launch_grid(ctx->grctx, block, grid, indirect_handle, indirect_offset);
+ block[0] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_X);
+ block[1] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_Y);
+ block[2] = get_buf_entry(buf, VIRGL_LAUNCH_BLOCK_Z);
+ grid[0] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_X);
+ grid[1] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_Y);
+ grid[2] = get_buf_entry(buf, VIRGL_LAUNCH_GRID_Z);
+ indirect_handle = get_buf_entry(buf, VIRGL_LAUNCH_INDIRECT_HANDLE);
+ indirect_offset = get_buf_entry(buf, VIRGL_LAUNCH_INDIRECT_OFFSET);
+ vrend_launch_grid(ctx, block, grid, indirect_handle, indirect_offset);
return 0;
}
-static int vrend_decode_set_streamout_targets(struct vrend_decode_ctx *ctx,
- uint16_t length)
+static int vrend_decode_set_streamout_targets(struct vrend_context *ctx,
+ const uint32_t *buf, uint32_t length)
{
uint32_t handles[16];
uint32_t num_handles = length - 1;
@@ -1313,64 +1308,63 @@ static int vrend_decode_set_streamout_targets(struct vrend_decode_ctx *ctx,
if (num_handles > ARRAY_SIZE(handles))
return EINVAL;
- append_bitmask = get_buf_entry(ctx, VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK);
+ append_bitmask = get_buf_entry(buf, VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK);
for (i = 0; i < num_handles; i++)
- handles[i] = get_buf_entry(ctx, VIRGL_SET_STREAMOUT_TARGETS_H0 + i);
- vrend_set_streamout_targets(ctx->grctx, append_bitmask, num_handles, handles);
+ handles[i] = get_buf_entry(buf, VIRGL_SET_STREAMOUT_TARGETS_H0 + i);
+ vrend_set_streamout_targets(ctx, append_bitmask, num_handles, handles);
return 0;
}
-static int vrend_decode_texture_barrier(struct vrend_decode_ctx *ctx, uint16_t length)
+static int vrend_decode_texture_barrier(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
if (length != VIRGL_TEXTURE_BARRIER_SIZE)
return EINVAL;
- unsigned flags = get_buf_entry(ctx, VIRGL_TEXTURE_BARRIER_FLAGS);
- vrend_texture_barrier(ctx->grctx, flags);
+ unsigned flags = get_buf_entry(buf, VIRGL_TEXTURE_BARRIER_FLAGS);
+ vrend_texture_barrier(ctx, flags);
return 0;
}
-static int vrend_decode_set_debug_mask(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_debug_mask(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
char *flagstring;
int slen = sizeof(uint32_t) * length;
- const uint32_t *buf;
if (length < VIRGL_SET_DEBUG_FLAGS_MIN_SIZE)
return EINVAL;
- buf = get_buf_ptr(ctx, VIRGL_SET_DEBUG_FLAGSTRING_OFFSET);
+ const uint32_t *flag_buf = get_buf_ptr(buf, VIRGL_SET_DEBUG_FLAGSTRING_OFFSET);
flagstring = malloc(slen+1);
if (!flagstring) {
return ENOMEM;
}
- memcpy(flagstring, buf, slen);
+ memcpy(flagstring, flag_buf, slen);
flagstring[slen] = 0;
- vrend_context_set_debug_flags(ctx->grctx, flagstring);
+ vrend_context_set_debug_flags(ctx, flagstring);
free(flagstring);
return 0;
}
-static int vrend_decode_set_tweaks(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_set_tweaks(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
VREND_DEBUG(dbg_tweak, NULL, "Received TWEAK set command\n");
if (length < VIRGL_SET_TWEAKS_SIZE)
return EINVAL;
- uint32_t tweak_id = get_buf_entry(ctx, VIRGL_SET_TWEAKS_ID);
- uint32_t tweak_value = get_buf_entry(ctx, VIRGL_SET_TWEAKS_VALUE);
+ uint32_t tweak_id = get_buf_entry(buf, VIRGL_SET_TWEAKS_ID);
+ uint32_t tweak_value = get_buf_entry(buf, VIRGL_SET_TWEAKS_VALUE);
- vrend_set_active_tweaks(vrend_get_context_tweaks(ctx->grctx), tweak_id, tweak_value);
+ vrend_set_active_tweaks(vrend_get_context_tweaks(ctx), tweak_id, tweak_value);
return 0;
}
-static int vrend_decode_transfer3d(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_transfer3d(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_box box;
uint32_t dst_handle;
@@ -1381,19 +1375,19 @@ static int vrend_decode_transfer3d(struct vrend_decode_ctx *ctx, int length)
memset(&info, 0, sizeof(info));
info.box = &box;
- vrend_decode_transfer_common(ctx, &dst_handle, &info);
- info.offset = get_buf_entry(ctx, VIRGL_TRANSFER3D_DATA_OFFSET);
- int transfer_mode = get_buf_entry(ctx, VIRGL_TRANSFER3D_DIRECTION);
+ vrend_decode_transfer_common(buf, &dst_handle, &info);
+ info.offset = get_buf_entry(buf, VIRGL_TRANSFER3D_DATA_OFFSET);
+ int transfer_mode = get_buf_entry(buf, VIRGL_TRANSFER3D_DIRECTION);
if (transfer_mode != VIRGL_TRANSFER_TO_HOST &&
transfer_mode != VIRGL_TRANSFER_FROM_HOST)
return EINVAL;
- return vrend_renderer_transfer_iov(ctx->grctx, dst_handle, &info,
+ return vrend_renderer_transfer_iov(ctx, dst_handle, &info,
transfer_mode);
}
-static int vrend_decode_copy_transfer3d(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_copy_transfer3d(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct pipe_box box;
struct vrend_transfer_info info;
@@ -1405,17 +1399,17 @@ static int vrend_decode_copy_transfer3d(struct vrend_decode_ctx *ctx, int length
memset(&info, 0, sizeof(info));
info.box = &box;
- vrend_decode_transfer_common(ctx, &dst_handle, &info);
- info.offset = get_buf_entry(ctx, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
- info.synchronized = (get_buf_entry(ctx, VIRGL_COPY_TRANSFER3D_SYNCHRONIZED) != 0);
+ vrend_decode_transfer_common(buf, &dst_handle, &info);
+ info.offset = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET);
+ info.synchronized = (get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SYNCHRONIZED) != 0);
- src_handle = get_buf_entry(ctx, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
+ src_handle = get_buf_entry(buf, VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE);
- return vrend_renderer_copy_transfer3d(ctx->grctx, dst_handle, src_handle,
+ return vrend_renderer_copy_transfer3d(ctx, dst_handle, src_handle,
&info);
}
-static int vrend_decode_pipe_resource_create(struct vrend_decode_ctx *ctx, int length)
+static int vrend_decode_pipe_resource_create(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
{
struct vrend_renderer_resource_create_args args = { 0 };
uint32_t blob_id;
@@ -1423,19 +1417,19 @@ static int vrend_decode_pipe_resource_create(struct vrend_decode_ctx *ctx, int l
if (length != VIRGL_PIPE_RES_CREATE_SIZE)
return EINVAL;
- args.target = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_TARGET);
- args.format = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_FORMAT);
- args.bind = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_BIND);
- args.width = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_WIDTH);
- args.height = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_HEIGHT);
- args.depth = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_DEPTH);
- args.array_size = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_ARRAY_SIZE);
- args.last_level = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_LAST_LEVEL);
- args.nr_samples = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_NR_SAMPLES);
- args.flags = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_FLAGS);
- blob_id = get_buf_entry(ctx, VIRGL_PIPE_RES_CREATE_BLOB_ID);
+ args.target = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_TARGET);
+ args.format = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_FORMAT);
+ args.bind = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_BIND);
+ args.width = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_WIDTH);
+ args.height = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_HEIGHT);
+ args.depth = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_DEPTH);
+ args.array_size = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_ARRAY_SIZE);
+ args.last_level = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_LAST_LEVEL);
+ args.nr_samples = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_NR_SAMPLES);
+ args.flags = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_FLAGS);
+ blob_id = get_buf_entry(buf, VIRGL_PIPE_RES_CREATE_BLOB_ID);
- return vrend_renderer_pipe_resource_create(ctx->grctx, blob_id, &args);
+ return vrend_renderer_pipe_resource_create(ctx, blob_id, &args);
}
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
@@ -1459,13 +1453,12 @@ struct virgl_context *vrend_renderer_context_create(uint32_t handle,
return NULL;
}
- dctx->ds = &dctx->ids;
-
return &dctx->base;
}
static void vrend_decode_ctx_destroy(struct virgl_context *ctx)
{
+ TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
vrend_destroy_context(dctx->grctx);
@@ -1475,8 +1468,8 @@ static void vrend_decode_ctx_destroy(struct virgl_context *ctx)
static void vrend_decode_ctx_attach_resource(struct virgl_context *ctx,
struct virgl_resource *res)
{
+ TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
-
/* in the future, we should import to create the pipe resource */
if (!res->pipe_resource)
return;
@@ -1488,6 +1481,7 @@ static void vrend_decode_ctx_attach_resource(struct virgl_context *ctx,
static void vrend_decode_ctx_detach_resource(struct virgl_context *ctx,
struct virgl_resource *res)
{
+ TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
vrend_renderer_detach_res_ctx(dctx->grctx, res->res_id);
}
@@ -1497,6 +1491,7 @@ static int vrend_decode_ctx_transfer_3d(struct virgl_context *ctx,
const struct vrend_transfer_info *info,
int transfer_mode)
{
+ TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
return vrend_renderer_transfer_iov(dctx->grctx, res->res_id, info,
transfer_mode);
@@ -1507,6 +1502,7 @@ static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
UNUSED uint32_t blob_flags,
struct virgl_context_blob *blob)
{
+ TRACE_FUNC();
struct vrend_decode_ctx *dctx = (struct vrend_decode_ctx *)ctx;
blob->type = VIRGL_RESOURCE_FD_INVALID;
@@ -1516,10 +1512,73 @@ static int vrend_decode_ctx_get_blob(struct virgl_context *ctx,
return blob->u.pipe_resource ? 0 : EINVAL;
}
+typedef int (*vrend_decode_callback)(struct vrend_context *ctx, const uint32_t *buf, uint32_t length);
+
+static int vrend_decode_dummy(struct vrend_context *ctx, const uint32_t *buf, uint32_t length)
+{
+ (void)ctx;
+ (void)buf;
+ (void)length;
+ return 0;
+}
+
+vrend_decode_callback decode_table[VIRGL_MAX_COMMANDS] = {
+ [VIRGL_CCMD_NOP] = vrend_decode_dummy,
+ [VIRGL_CCMD_CREATE_OBJECT] = vrend_decode_create_object,
+ [VIRGL_CCMD_BIND_OBJECT] = vrend_decode_bind_object,
+ [VIRGL_CCMD_DESTROY_OBJECT] = vrend_decode_destroy_object,
+ [VIRGL_CCMD_CLEAR] = vrend_decode_clear,
+ [VIRGL_CCMD_CLEAR_TEXTURE] = vrend_decode_clear_texture,
+ [VIRGL_CCMD_DRAW_VBO] = vrend_decode_draw_vbo,
+ [VIRGL_CCMD_SET_FRAMEBUFFER_STATE] = vrend_decode_set_framebuffer_state,
+ [VIRGL_CCMD_SET_VERTEX_BUFFERS] = vrend_decode_set_vertex_buffers,
+ [VIRGL_CCMD_RESOURCE_INLINE_WRITE] = vrend_decode_resource_inline_write,
+ [VIRGL_CCMD_SET_VIEWPORT_STATE] = vrend_decode_set_viewport_state,
+ [VIRGL_CCMD_SET_SAMPLER_VIEWS] = vrend_decode_set_sampler_views,
+ [VIRGL_CCMD_SET_INDEX_BUFFER] = vrend_decode_set_index_buffer,
+ [VIRGL_CCMD_SET_CONSTANT_BUFFER] = vrend_decode_set_constant_buffer,
+ [VIRGL_CCMD_SET_STENCIL_REF] = vrend_decode_set_stencil_ref,
+ [VIRGL_CCMD_SET_BLEND_COLOR] = vrend_decode_set_blend_color,
+ [VIRGL_CCMD_SET_SCISSOR_STATE] = vrend_decode_set_scissor_state,
+ [VIRGL_CCMD_BLIT] = vrend_decode_blit,
+ [VIRGL_CCMD_RESOURCE_COPY_REGION] = vrend_decode_resource_copy_region,
+ [VIRGL_CCMD_BIND_SAMPLER_STATES] = vrend_decode_bind_sampler_states,
+ [VIRGL_CCMD_BEGIN_QUERY] = vrend_decode_begin_query,
+ [VIRGL_CCMD_END_QUERY] = vrend_decode_end_query,
+ [VIRGL_CCMD_GET_QUERY_RESULT] = vrend_decode_get_query_result,
+ [VIRGL_CCMD_SET_POLYGON_STIPPLE] = vrend_decode_set_polygon_stipple,
+ [VIRGL_CCMD_SET_CLIP_STATE] = vrend_decode_set_clip_state,
+ [VIRGL_CCMD_SET_SAMPLE_MASK] = vrend_decode_set_sample_mask,
+ [VIRGL_CCMD_SET_MIN_SAMPLES] = vrend_decode_set_min_samples,
+ [VIRGL_CCMD_SET_STREAMOUT_TARGETS] = vrend_decode_set_streamout_targets,
+ [VIRGL_CCMD_SET_RENDER_CONDITION] = vrend_decode_set_render_condition,
+ [VIRGL_CCMD_SET_UNIFORM_BUFFER] = vrend_decode_set_uniform_buffer,
+ [VIRGL_CCMD_SET_SUB_CTX] = vrend_decode_set_sub_ctx,
+ [VIRGL_CCMD_CREATE_SUB_CTX] = vrend_decode_create_sub_ctx,
+ [VIRGL_CCMD_DESTROY_SUB_CTX] = vrend_decode_destroy_sub_ctx,
+ [VIRGL_CCMD_BIND_SHADER] = vrend_decode_bind_shader,
+ [VIRGL_CCMD_SET_TESS_STATE] = vrend_decode_set_tess_state,
+ [VIRGL_CCMD_SET_SHADER_BUFFERS] = vrend_decode_set_shader_buffers,
+ [VIRGL_CCMD_SET_SHADER_IMAGES] = vrend_decode_set_shader_images,
+ [VIRGL_CCMD_SET_ATOMIC_BUFFERS] = vrend_decode_set_atomic_buffers,
+ [VIRGL_CCMD_MEMORY_BARRIER] = vrend_decode_memory_barrier,
+ [VIRGL_CCMD_LAUNCH_GRID] = vrend_decode_launch_grid,
+ [VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH] = vrend_decode_set_framebuffer_state_no_attach,
+ [VIRGL_CCMD_TEXTURE_BARRIER] = vrend_decode_texture_barrier,
+ [VIRGL_CCMD_SET_DEBUG_FLAGS] = vrend_decode_set_debug_mask,
+ [VIRGL_CCMD_GET_QUERY_RESULT_QBO] = vrend_decode_get_query_result_qbo,
+ [VIRGL_CCMD_TRANSFER3D] = vrend_decode_transfer3d,
+ [VIRGL_CCMD_COPY_TRANSFER3D] = vrend_decode_copy_transfer3d,
+ [VIRGL_CCMD_END_TRANSFERS] = vrend_decode_dummy,
+ [VIRGL_CCMD_SET_TWEAKS] = vrend_decode_set_tweaks,
+ [VIRGL_CCMD_PIPE_RESOURCE_CREATE] = vrend_decode_pipe_resource_create
+};
+
static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
const void *buffer,
size_t size)
{
+ TRACE_FUNC();
struct vrend_decode_ctx *gdctx = (struct vrend_decode_ctx *)ctx;
bool bret;
int ret;
@@ -1528,190 +1587,54 @@ static int vrend_decode_ctx_submit_cmd(struct virgl_context *ctx,
if (bret == false)
return EINVAL;
- gdctx->ds->buf = buffer;
- gdctx->ds->buf_total = size / sizeof(uint32_t);
- gdctx->ds->buf_offset = 0;
+ const uint32_t *typed_buf = (const uint32_t *)buffer;
+ const uint32_t buf_total = size / sizeof(uint32_t);
+ uint32_t buf_offset = 0;
+
+ while (buf_offset < buf_total) {
+#ifndef NDEBUG
+ const uint32_t cur_offset = buf_offset;
+#endif
+
+ const uint32_t *buf = &typed_buf[buf_offset];
+ uint32_t len = *buf >> 16;
+ uint32_t cmd = *buf & 0xff;
- while (gdctx->ds->buf_offset < gdctx->ds->buf_total) {
- uint32_t header = gdctx->ds->buf[gdctx->ds->buf_offset];
- uint32_t len = header >> 16;
+ if (cmd >= VIRGL_MAX_COMMANDS)
+ return EINVAL;
+
+ buf_offset += len + 1;
ret = 0;
/* check if the guest is doing something bad */
- if (gdctx->ds->buf_offset + len + 1 > gdctx->ds->buf_total) {
+ if (buf_offset > buf_total) {
vrend_report_buffer_error(gdctx->grctx, 0);
break;
}
- VREND_DEBUG(dbg_cmd, gdctx->grctx,"%-4d %-20s len:%d\n",
- gdctx->ds->buf_offset, vrend_get_comand_name(header & 0xff), len);
+ VREND_DEBUG(dbg_cmd, gdctx->grctx, "%-4d %-20s len:%d\n",
+ cur_offset, vrend_get_comand_name(cmd), len);
- switch (header & 0xff) {
- case VIRGL_CCMD_CREATE_OBJECT:
- ret = vrend_decode_create_object(gdctx, len);
- break;
- case VIRGL_CCMD_BIND_OBJECT:
- ret = vrend_decode_bind_object(gdctx, len);
- break;
- case VIRGL_CCMD_DESTROY_OBJECT:
- ret = vrend_decode_destroy_object(gdctx, len);
- break;
- case VIRGL_CCMD_CLEAR:
- ret = vrend_decode_clear(gdctx, len);
- break;
- case VIRGL_CCMD_CLEAR_TEXTURE:
- ret = vrend_decode_clear_texture(gdctx, len);
- break;
- case VIRGL_CCMD_DRAW_VBO:
- ret = vrend_decode_draw_vbo(gdctx, len);
- break;
- case VIRGL_CCMD_SET_FRAMEBUFFER_STATE:
- ret = vrend_decode_set_framebuffer_state(gdctx, len);
- break;
- case VIRGL_CCMD_SET_VERTEX_BUFFERS:
- ret = vrend_decode_set_vertex_buffers(gdctx, len);
- break;
- case VIRGL_CCMD_RESOURCE_INLINE_WRITE:
- ret = vrend_decode_resource_inline_write(gdctx, len);
- break;
- case VIRGL_CCMD_SET_VIEWPORT_STATE:
- ret = vrend_decode_set_viewport_state(gdctx, len);
- break;
- case VIRGL_CCMD_SET_SAMPLER_VIEWS:
- ret = vrend_decode_set_sampler_views(gdctx, len);
- break;
- case VIRGL_CCMD_SET_INDEX_BUFFER:
- ret = vrend_decode_set_index_buffer(gdctx, len);
- break;
- case VIRGL_CCMD_SET_CONSTANT_BUFFER:
- ret = vrend_decode_set_constant_buffer(gdctx, len);
- break;
- case VIRGL_CCMD_SET_STENCIL_REF:
- ret = vrend_decode_set_stencil_ref(gdctx, len);
- break;
- case VIRGL_CCMD_SET_BLEND_COLOR:
- ret = vrend_decode_set_blend_color(gdctx, len);
- break;
- case VIRGL_CCMD_SET_SCISSOR_STATE:
- ret = vrend_decode_set_scissor_state(gdctx, len);
- break;
- case VIRGL_CCMD_BLIT:
- ret = vrend_decode_blit(gdctx, len);
- break;
- case VIRGL_CCMD_RESOURCE_COPY_REGION:
- ret = vrend_decode_resource_copy_region(gdctx, len);
- break;
- case VIRGL_CCMD_BIND_SAMPLER_STATES:
- ret = vrend_decode_bind_sampler_states(gdctx, len);
- break;
- case VIRGL_CCMD_BEGIN_QUERY:
- ret = vrend_decode_begin_query(gdctx, len);
- break;
- case VIRGL_CCMD_END_QUERY:
- ret = vrend_decode_end_query(gdctx, len);
- break;
- case VIRGL_CCMD_GET_QUERY_RESULT:
- ret = vrend_decode_get_query_result(gdctx, len);
- break;
- case VIRGL_CCMD_SET_POLYGON_STIPPLE:
- ret = vrend_decode_set_polygon_stipple(gdctx, len);
- break;
- case VIRGL_CCMD_SET_CLIP_STATE:
- ret = vrend_decode_set_clip_state(gdctx, len);
- break;
- case VIRGL_CCMD_SET_SAMPLE_MASK:
- ret = vrend_decode_set_sample_mask(gdctx, len);
- break;
- case VIRGL_CCMD_SET_MIN_SAMPLES:
- ret = vrend_decode_set_min_samples(gdctx, len);
- break;
- case VIRGL_CCMD_SET_STREAMOUT_TARGETS:
- ret = vrend_decode_set_streamout_targets(gdctx, len);
- break;
- case VIRGL_CCMD_SET_RENDER_CONDITION:
- ret = vrend_decode_set_render_condition(gdctx, len);
- break;
- case VIRGL_CCMD_SET_UNIFORM_BUFFER:
- ret = vrend_decode_set_uniform_buffer(gdctx, len);
- break;
- case VIRGL_CCMD_SET_SUB_CTX:
- ret = vrend_decode_set_sub_ctx(gdctx, len);
- break;
- case VIRGL_CCMD_CREATE_SUB_CTX:
- ret = vrend_decode_create_sub_ctx(gdctx, len);
- break;
- case VIRGL_CCMD_DESTROY_SUB_CTX:
- ret = vrend_decode_destroy_sub_ctx(gdctx, len);
- break;
- case VIRGL_CCMD_BIND_SHADER:
- ret = vrend_decode_bind_shader(gdctx, len);
- break;
- case VIRGL_CCMD_SET_TESS_STATE:
- ret = vrend_decode_set_tess_state(gdctx, len);
- break;
- case VIRGL_CCMD_SET_SHADER_BUFFERS:
- ret = vrend_decode_set_shader_buffers(gdctx, len);
- break;
- case VIRGL_CCMD_SET_SHADER_IMAGES:
- ret = vrend_decode_set_shader_images(gdctx, len);
- break;
- case VIRGL_CCMD_SET_ATOMIC_BUFFERS:
- ret = vrend_decode_set_atomic_buffers(gdctx, len);
- break;
- case VIRGL_CCMD_MEMORY_BARRIER:
- ret = vrend_decode_memory_barrier(gdctx, len);
- break;
- case VIRGL_CCMD_LAUNCH_GRID:
- ret = vrend_decode_launch_grid(gdctx, len);
- break;
- case VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH:
- ret = vrend_decode_set_framebuffer_state_no_attach(gdctx, len);
- break;
- case VIRGL_CCMD_TEXTURE_BARRIER:
- ret = vrend_decode_texture_barrier(gdctx, len);
- break;
- case VIRGL_CCMD_SET_DEBUG_FLAGS:
- ret = vrend_decode_set_debug_mask(gdctx, len);
- break;
- case VIRGL_CCMD_GET_QUERY_RESULT_QBO:
- ret = vrend_decode_get_query_result_qbo(gdctx, len);
- break;
- case VIRGL_CCMD_TRANSFER3D:
- ret = vrend_decode_transfer3d(gdctx, len);
- break;
- case VIRGL_CCMD_COPY_TRANSFER3D:
- ret = vrend_decode_copy_transfer3d(gdctx, len);
- break;
- case VIRGL_CCMD_END_TRANSFERS:
- ret = 0;
- break;
- case VIRGL_CCMD_SET_TWEAKS:
- ret = vrend_decode_set_tweaks(gdctx, len);
- break;
- case VIRGL_CCMD_PIPE_RESOURCE_CREATE:
- ret = vrend_decode_pipe_resource_create(gdctx, len);
- break;
- default:
- ret = EINVAL;
- }
+ TRACE_SCOPE("%s", vrend_get_comand_name(cmd));
- if (ret == EINVAL) {
- vrend_report_buffer_error(gdctx->grctx, header);
- goto out;
+ ret = decode_table[cmd](gdctx->grctx, buf, len);
+ if (ret) {
+ if (ret == EINVAL) {
+ vrend_report_buffer_error(gdctx->grctx, *buf);
+ return ret;
+ }
}
- if (ret == ENOMEM)
- goto out;
- gdctx->ds->buf_offset += (len) + 1;
}
return 0;
- out:
- return ret;
}
static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
uint32_t ctx_id)
{
- struct virgl_context *ctx = &dctx->base;
+ struct virgl_context *ctx = &dctx->base ;
+
+ for (unsigned i = 0; i < VIRGL_MAX_COMMANDS; ++i)
+ assert(decode_table[i]);
ctx->ctx_id = ctx_id;
ctx->destroy = vrend_decode_ctx_destroy;
@@ -1722,8 +1645,3 @@ static void vrend_decode_ctx_init_base(struct vrend_decode_ctx *dctx,
ctx->get_blob_done = NULL;
ctx->submit_cmd = vrend_decode_ctx_submit_cmd;
}
-
-void vrend_decode_reset(void)
-{
- virgl_context_table_reset();
-}
diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
index 28172e10..93b8ca2a 100644
--- a/src/vrend_renderer.c
+++ b/src/vrend_renderer.c
@@ -47,36 +47,43 @@
#include "vrend_renderer.h"
#include "vrend_debug.h"
+#include "vrend_winsys.h"
#include "virgl_util.h"
#include "virgl_hw.h"
#include "virgl_resource.h"
#include "virglrenderer.h"
+#include "virglrenderer_hw.h"
#include "tgsi/tgsi_text.h"
-#ifdef HAVE_EVENTFD
-#include <sys/eventfd.h>
-#endif
-
-#ifdef HAVE_EPOXY_EGL_H
-#include "virgl_gbm.h"
-#include "virgl_egl.h"
-extern struct virgl_gbm *gbm;
-extern struct virgl_egl *egl;
-#endif
-
-int use_context = CONTEXT_NONE;
+/*
+ * VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both
+ * virgl_caps_v1 and are exactly the same.
+ *
+ * VIRGL_RENDERER_CAPSET_VIRGL2 has version 0, 1, and 2, but they are
+ * all virgl_caps_v2 and are exactly the same.
+ *
+ * Since virgl_caps_v2 is growable and no backward-incompatible change is
+ * expected, we don't bump up these versions anymore.
+ */
+#define VREND_CAPSET_VIRGL_MAX_VERSION 1
+#define VREND_CAPSET_VIRGL2_MAX_VERSION 2
static const uint32_t fake_occlusion_query_samples_passed_default = 1024;
-struct vrend_if_cbs *vrend_clicbs;
+const struct vrend_if_cbs *vrend_clicbs;
struct vrend_fence {
uint32_t fence_id;
uint32_t ctx_id;
- GLsync syncobj;
+ union {
+ GLsync glsyncobj;
+#ifdef HAVE_EPOXY_EGL_H
+ EGLSyncKHR eglsyncobj;
+#endif
+ };
struct list_head fences;
};
@@ -197,7 +204,7 @@ static const struct {
} feature_list[] = {
FEAT(arb_or_gles_ext_texture_buffer, 31, UNAVAIL, "GL_ARB_texture_buffer_object", "GL_EXT_texture_buffer", NULL),
FEAT(arb_robustness, UNAVAIL, UNAVAIL, "GL_ARB_robustness" ),
- FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage"),
+ FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage", "GL_EXT_buffer_storage"),
FEAT(arrays_of_arrays, 43, 31, "GL_ARB_arrays_of_arrays"),
FEAT(atomic_counters, 42, 31, "GL_ARB_shader_atomic_counters" ),
FEAT(base_instance, 42, UNAVAIL, "GL_ARB_base_instance", "GL_EXT_base_instance" ),
@@ -286,12 +293,14 @@ struct global_renderer_state {
struct vrend_context *current_hw_ctx;
struct list_head waiting_query_list;
- bool inited;
bool finishing;
bool use_gles;
bool use_core_profile;
bool use_external_blob;
bool use_integer;
+#ifdef HAVE_EPOXY_EGL_H
+ bool use_egl_fence;
+#endif
bool features[feat_last];
@@ -644,6 +653,8 @@ struct vrend_sub_context {
struct vrend_context_tweaks tweaks;
uint8_t swizzle_output_rgb_to_bgr;
int fake_occlusion_query_samples_passed_multiplier;
+
+ int prim_mode;
};
struct vrend_context {
@@ -3185,7 +3196,6 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
}
key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
- key->coord_replace = ctx->sub->rs_state.point_quad_rasterization ? ctx->sub->rs_state.sprite_coord_enable : 0;
if (type == PIPE_SHADER_FRAGMENT)
key->fs_swizzle_output_rgb_to_bgr = ctx->sub->swizzle_output_rgb_to_bgr;
@@ -3247,6 +3257,25 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
key->force_invariant_inputs = ctx->sub->shaders[prev_type]->sinfo.invariant_outputs;
}
+ // Only use coord_replace if frag shader receives GL_POINTS
+ if (type == PIPE_SHADER_FRAGMENT) {
+ int fs_prim_mode = ctx->sub->prim_mode; // inherit draw-call's mode
+ switch (prev_type) {
+ case PIPE_SHADER_TESS_EVAL:
+ if (ctx->sub->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)
+ fs_prim_mode = PIPE_PRIM_POINTS;
+ break;
+ case PIPE_SHADER_GEOMETRY:
+ fs_prim_mode = ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim;
+ break;
+ }
+ key->fs_prim_is_points = (fs_prim_mode == PIPE_PRIM_POINTS);
+ key->coord_replace = ctx->sub->rs_state.point_quad_rasterization
+ && key->fs_prim_is_points
+ ? ctx->sub->rs_state.sprite_coord_enable
+ : 0x0;
+ }
+
int next_type = -1;
switch (type) {
case PIPE_SHADER_VERTEX:
@@ -4298,7 +4327,7 @@ static void vrend_draw_bind_objects(struct vrend_context *ctx, bool new_program)
}
if (vrend_state.use_core_profile && ctx->sub->prog->fs_alpha_ref_val_loc != -1) {
- glUniform1i(ctx->sub->prog->fs_alpha_ref_val_loc, ctx->sub->dsa_state.alpha.ref_value);
+ glUniform1f(ctx->sub->prog->fs_alpha_ref_val_loc, ctx->sub->dsa_state.alpha.ref_value);
}
}
@@ -4393,6 +4422,16 @@ int vrend_draw_vbo(struct vrend_context *ctx,
if (ctx->sub->blend_state_dirty)
vrend_patch_blend_state(ctx);
+ // enable primitive-mode-dependent shader variants
+ if (ctx->sub->prim_mode != (int)info->mode) {
+ // Only refresh shader program when switching in/out of GL_POINTS primitive mode
+ if (ctx->sub->prim_mode == PIPE_PRIM_POINTS
+ || (int)info->mode == PIPE_PRIM_POINTS)
+ ctx->sub->shader_dirty = true;
+
+ ctx->sub->prim_mode = (int)info->mode;
+ }
+
if (ctx->sub->shader_dirty || ctx->sub->swizzle_output_rgb_to_bgr) {
struct vrend_linked_shader_program *prog;
bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty;
@@ -5719,55 +5758,67 @@ static void vrend_free_sync_thread(void)
pipe_mutex_destroy(vrend_state.fence_mutex);
}
-#ifdef HAVE_EVENTFD
-static ssize_t
-write_full(int fd, const void *ptr, size_t count)
+static void free_fence_locked(struct vrend_fence *fence)
{
- const char *buf = ptr;
- ssize_t ret = 0;
- ssize_t total = 0;
-
- while (count) {
- ret = write(fd, buf, count);
- if (ret < 0) {
- if (errno == EINTR)
- continue;
- break;
- }
- count -= ret;
- buf += ret;
- total += ret;
+ list_del(&fence->fences);
+#ifdef HAVE_EPOXY_EGL_H
+ if (vrend_state.use_egl_fence) {
+ virgl_egl_fence_destroy(egl, fence->eglsyncobj);
+ } else
+#endif
+ {
+ glDeleteSync(fence->glsyncobj);
}
- return total;
+ free(fence);
}
-static void wait_sync(struct vrend_fence *fence)
+static void vrend_free_fences(void)
{
- GLenum glret;
- ssize_t n;
- uint64_t value = 1;
+ struct vrend_fence *fence, *stor;
- do {
- glret = glClientWaitSync(fence->syncobj, 0, 1000000000);
+ /* this is called after vrend_free_sync_thread */
+ assert(!vrend_state.sync_thread);
- switch (glret) {
- case GL_WAIT_FAILED:
- vrend_printf( "wait sync failed: illegal fence object %p\n", fence->syncobj);
- break;
- case GL_ALREADY_SIGNALED:
- case GL_CONDITION_SATISFIED:
- break;
- default:
- break;
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences)
+ free_fence_locked(fence);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences)
+ free_fence_locked(fence);
+}
+
+static bool do_wait(struct vrend_fence *fence, bool can_block)
+{
+ bool done = false;
+ int timeout = can_block ? 1000000000 : 0;
+
+#ifdef HAVE_EPOXY_EGL_H
+ if (vrend_state.use_egl_fence) {
+ do {
+ done = virgl_egl_client_wait_fence(egl, fence->eglsyncobj, timeout);
+ } while (!done && can_block);
+ return done;
+ }
+#endif
+
+ do {
+ GLenum glret = glClientWaitSync(fence->glsyncobj, 0, timeout);
+ if (glret == GL_WAIT_FAILED) {
+ vrend_printf( "wait sync failed: illegal fence object %p\n", fence->glsyncobj);
}
- } while (glret == GL_TIMEOUT_EXPIRED);
+ done = glret != GL_TIMEOUT_EXPIRED;
+ } while (!done && can_block);
+
+ return done;
+}
+
+static void wait_sync(struct vrend_fence *fence)
+{
+ do_wait(fence, /* can_block */ true);
pipe_mutex_lock(vrend_state.fence_mutex);
list_addtail(&fence->fences, &vrend_state.fence_list);
pipe_mutex_unlock(vrend_state.fence_mutex);
- n = write_full(vrend_state.eventfd, &value, sizeof(value));
- if (n != sizeof(value)) {
+ if (write_eventfd(vrend_state.eventfd, 1)) {
perror("failed to write to eventfd\n");
}
}
@@ -5808,9 +5859,6 @@ static void vrend_renderer_use_threaded_sync(void)
{
struct virgl_gl_ctx_param ctx_params;
- if (getenv("VIRGL_DISABLE_MT"))
- return;
-
ctx_params.shared = true;
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
@@ -5823,7 +5871,7 @@ static void vrend_renderer_use_threaded_sync(void)
return;
}
- vrend_state.eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ vrend_state.eventfd = create_eventfd(0);
if (vrend_state.eventfd == -1) {
vrend_printf( "Failed to create eventfd\n");
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
@@ -5842,11 +5890,6 @@ static void vrend_renderer_use_threaded_sync(void)
pipe_mutex_destroy(vrend_state.fence_mutex);
}
}
-#else
-static void vrend_renderer_use_threaded_sync(void)
-{
-}
-#endif
static void vrend_debug_cb(UNUSED GLenum source, GLenum type, UNUSED GLuint id,
UNUSED GLenum severity, UNUSED GLsizei length,
@@ -5916,7 +5959,7 @@ static enum virgl_resource_fd_type vrend_pipe_resource_export_fd(UNUSED struct p
return VIRGL_RESOURCE_FD_INVALID;
}
-static const struct virgl_resource_pipe_callbacks *
+const struct virgl_resource_pipe_callbacks *
vrend_renderer_get_pipe_callbacks(void)
{
static const struct virgl_resource_pipe_callbacks callbacks = {
@@ -5939,22 +5982,19 @@ static bool use_integer() {
return false;
}
-int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
+int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags)
{
bool gles;
int gl_ver;
virgl_gl_context gl_context;
struct virgl_gl_ctx_param ctx_params;
- if (!vrend_state.inited) {
- vrend_state.inited = true;
- virgl_resource_table_init(vrend_renderer_get_pipe_callbacks());
- vrend_clicbs = cbs;
- /* Give some defaults to be able to run the tests */
- vrend_state.max_texture_2d_size =
- vrend_state.max_texture_3d_size =
- vrend_state.max_texture_cube_size = 16384;
- }
+ vrend_clicbs = cbs;
+
+ /* Give some defaults to be able to run the tests */
+ vrend_state.max_texture_2d_size =
+ vrend_state.max_texture_3d_size =
+ vrend_state.max_texture_cube_size = 16384;
#ifndef NDEBUG
vrend_init_debug_flags();
@@ -6004,7 +6044,7 @@ int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
init_features(gles ? 0 : gl_ver,
gles ? gl_ver : 0);
- vrend_state.features[feat_srgb_write_control] &= virgl_has_gl_colorspace();
+ vrend_state.features[feat_srgb_write_control] &= vrend_winsys_has_gl_colorspace();
glGetIntegerv(GL_MAX_DRAW_BUFFERS, (GLint *) &vrend_state.max_draw_buffers);
@@ -6057,33 +6097,31 @@ int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
if (flags & VREND_USE_EXTERNAL_BLOB)
vrend_state.use_external_blob = true;
+#ifdef HAVE_EPOXY_EGL_H
+ if (vrend_state.use_gles)
+ vrend_state.use_egl_fence = virgl_egl_supports_fences(egl);
+#endif
+
return 0;
}
void
vrend_renderer_fini(void)
{
- if (!vrend_state.inited)
- return;
-
vrend_state.finishing = true;
- vrend_free_sync_thread();
if (vrend_state.eventfd != -1) {
close(vrend_state.eventfd);
vrend_state.eventfd = -1;
}
+ vrend_free_fences();
vrend_blitter_fini();
- vrend_hw_switch_context(vrend_state.ctx0, true);
- vrend_decode_reset();
- virgl_resource_table_cleanup();
vrend_destroy_context(vrend_state.ctx0);
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
- vrend_state.inited = false;
vrend_state.finishing = false;
}
@@ -6502,6 +6540,7 @@ static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint3
if (buffer_storage_flags) {
if (has_feature(feat_arb_buffer_storage)) {
glBufferStorage(gr->target, width, NULL, buffer_storage_flags);
+ gr->map_info = vrend_state.inferred_gl_caching_type;
}
#ifdef ENABLE_MINIGBM_ALLOCATION
else if (has_feature(feat_memory_object_fd) && has_feature(feat_memory_object)) {
@@ -6529,6 +6568,11 @@ static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint3
gr->gbm_bo = bo;
gr->memobj = memobj;
gr->storage_bits |= VREND_STORAGE_GBM_BUFFER | VREND_STORAGE_GL_MEMOBJ;
+
+ if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915"))
+ gr->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
+ else
+ gr->map_info = VIRGL_RENDERER_MAP_CACHE_WC;
}
#endif
else {
@@ -6538,7 +6582,6 @@ static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint3
gr->storage_bits |= VREND_STORAGE_GL_IMMUTABLE;
gr->buffer_storage_flags = buffer_storage_flags;
- gr->map_info = vrend_state.inferred_gl_caching_type;
gr->size = width;
} else
glBufferData(gr->target, width, NULL, GL_STREAM_DRAW);
@@ -7202,6 +7245,28 @@ static bool check_iov_bounds(struct vrend_resource *res,
return true;
}
+static void get_current_texture(GLenum target, GLint* tex) {
+ switch (target) {
+#define GET_TEXTURE(a) \
+ case GL_TEXTURE_ ## a: \
+ glGetIntegerv(GL_TEXTURE_BINDING_ ## a, tex); return
+ GET_TEXTURE(1D);
+ GET_TEXTURE(2D);
+ GET_TEXTURE(3D);
+ GET_TEXTURE(1D_ARRAY);
+ GET_TEXTURE(2D_ARRAY);
+ GET_TEXTURE(RECTANGLE);
+ GET_TEXTURE(CUBE_MAP);
+ GET_TEXTURE(CUBE_MAP_ARRAY);
+ GET_TEXTURE(BUFFER);
+ GET_TEXTURE(2D_MULTISAMPLE);
+ GET_TEXTURE(2D_MULTISAMPLE_ARRAY);
+#undef GET_TEXTURE
+ default:
+ vrend_printf("Unknown texture target %x\n", target);
+ }
+}
+
static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
struct vrend_resource *res,
const struct iovec *iov, int num_iovs,
@@ -7362,6 +7427,8 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
data);
} else {
uint32_t comp_size;
+ GLint old_tex = 0;
+ get_current_texture(res->target, &old_tex);
glBindTexture(res->target, res->id);
if (compressed) {
@@ -7449,7 +7516,7 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, 1.0);
}
- glBindTexture(res->target, 0);
+ glBindTexture(res->target, old_tex);
}
if (stride && !need_temp) {
@@ -7521,6 +7588,8 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
break;
}
+ GLint old_tex = 0;
+ get_current_texture(res->target, &old_tex);
glBindTexture(res->target, res->id);
if (res->target == GL_TEXTURE_CUBE_MAP) {
target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z;
@@ -7551,7 +7620,7 @@ static int vrend_transfer_send_getteximage(struct vrend_resource *res,
info->stride, info->box, info->level, info->offset,
false);
free(data);
- glBindTexture(res->target, 0);
+ glBindTexture(res->target, old_tex);
return 0;
}
@@ -7592,7 +7661,18 @@ static int vrend_transfer_send_readpixels(struct vrend_context *ctx,
else
glUseProgram(0);
- enum virgl_formats fmt = vrend_format_replace_emulated(res->base.bind, res->base.format);
+ /* If the emubgra tweak is active then reading back the BGRA format emulated
+ * by swizzling a RGBA format will take a performance hit because mesa will
+ * manually swizzling the RGBA data. This can be avoided by setting the
+ * tweak bgraswz that does this swizzling already on the GPU when blitting
+ * or rendering to an emulated BGRA surface and reading back the data as
+ * RGBA. The check whether we are on gles and emugbra is active is done
+ * in vrend_format_replace_emulated, so no need to repeat the test here */
+ enum virgl_formats fmt = res->base.format;
+ if (vrend_get_tweak_is_active(&ctx->sub->tweaks,
+ virgl_tweak_gles_brga_apply_dest_swizzle))
+ fmt = vrend_format_replace_emulated(res->base.bind, res->base.format);
+
format = tex_conv_table[fmt].glformat;
type = tex_conv_table[fmt].gltype;
/* if we are asked to invert and reading from a front then don't */
@@ -7979,18 +8059,18 @@ int vrend_renderer_copy_transfer3d(struct vrend_context *ctx,
bool use_gbm = true;
/* The guest uses copy transfers against busy resources to avoid
- * waiting. The host driver is usually smart enough to avoid blocking
- * by putting the data in a staging buffer and doing a pipelined copy.
- *
- * However, we cannot do that with GBM. Use GBM only when we have to
- * (until vrend_renderer_transfer_write_iov swizzles).
+ * waiting. The host GL driver is usually smart enough to avoid
+ * blocking by putting the data in a staging buffer and doing a
+ * pipelined copy. But when there is a GBM bo, we can only do that when
+ * VREND_STORAGE_GL_IMMUTABLE is set because it implies that the
+ * internal format is known and is known to be compatible with the
+ * subsequence glTexSubImage2D. Otherwise, we glFinish and use GBM.
*/
if (info->synchronized) {
- if (tex_conv_table[dst_res->base.format].internalformat == 0 ||
- tex_conv_table[dst_res->base.format].flags & VIRGL_TEXTURE_NEED_SWIZZLE)
- glFinish();
- else
+ if (has_bit(dst_res->storage_bits, VREND_STORAGE_GL_IMMUTABLE))
use_gbm = false;
+ else
+ glFinish();
}
if (use_gbm) {
@@ -8933,10 +9013,17 @@ int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
fence->ctx_id = ctx_id;
fence->fence_id = client_fence_id;
- fence->syncobj = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+#ifdef HAVE_EPOXY_EGL_H
+ if (vrend_state.use_egl_fence) {
+ fence->eglsyncobj = virgl_egl_fence_create(egl);
+ } else
+#endif
+ {
+ fence->glsyncobj = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+ }
glFlush();
- if (fence->syncobj == NULL)
+ if (fence->glsyncobj == NULL)
goto fail;
if (vrend_state.sync_thread) {
@@ -8954,32 +9041,12 @@ int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id)
return ENOMEM;
}
-static void free_fence_locked(struct vrend_fence *fence)
-{
- list_del(&fence->fences);
- glDeleteSync(fence->syncobj);
- free(fence);
-}
-
-static void flush_eventfd(int fd)
-{
- ssize_t len;
- uint64_t value;
- do {
- len = read(fd, &value, sizeof(value));
- } while ((len == -1 && errno == EINTR) || len == sizeof(value));
-}
-
static void vrend_renderer_check_queries(void);
void vrend_renderer_check_fences(void)
{
struct vrend_fence *fence, *stor;
uint32_t latest_id = 0;
- GLenum glret;
-
- if (!vrend_state.inited)
- return;
if (vrend_state.sync_thread) {
flush_eventfd(vrend_state.eventfd);
@@ -8994,13 +9061,11 @@ void vrend_renderer_check_fences(void)
vrend_renderer_force_ctx_0();
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
- glret = glClientWaitSync(fence->syncobj, 0, 0);
- if (glret == GL_ALREADY_SIGNALED){
+ if (do_wait(fence, /* can_block */ false)) {
latest_id = fence->fence_id;
free_fence_locked(fence);
- }
- /* don't bother checking any subsequent ones */
- else if (glret == GL_TIMEOUT_EXPIRED) {
+ } else {
+ /* don't bother checking any subsequent ones */
break;
}
}
@@ -9728,7 +9793,8 @@ static void vrend_renderer_fill_caps_v1(int gl_ver, int gles_ver, union virgl_ca
caps->v1.bset.transform_feedback_overflow_query = 1;
if (epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp") ||
- epoxy_has_gl_extension("GL_ARB_texture_mirror_clamp_to_edge")) {
+ epoxy_has_gl_extension("GL_ARB_texture_mirror_clamp_to_edge") ||
+ epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp_to_edge")) {
caps->v1.bset.mirror_clamp = true;
}
@@ -10103,15 +10169,17 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
}
#ifdef ENABLE_MINIGBM_ALLOCATION
- if (has_feature(feat_memory_object) && has_feature(feat_memory_object_fd))
- caps->v2.capability_bits |= VIRGL_CAP_ARB_BUFFER_STORAGE;
+ if (has_feature(feat_memory_object) && has_feature(feat_memory_object_fd)) {
+ if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915"))
+ caps->v2.capability_bits |= VIRGL_CAP_ARB_BUFFER_STORAGE;
+ }
#endif
if (has_feature(feat_blend_equation_advanced))
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_BLEND_EQUATION;
}
-void vrend_renderer_fill_caps(uint32_t set, UNUSED uint32_t version,
+void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
union virgl_caps *caps)
{
int gl_ver, gles_ver;
@@ -10121,18 +10189,22 @@ void vrend_renderer_fill_caps(uint32_t set, UNUSED uint32_t version,
if (!caps)
return;
- if (set > 2) {
- caps->max_version = 0;
- return;
- }
-
- if (set == 1) {
+ switch (set) {
+ case VIRGL_RENDERER_CAPSET_VIRGL:
+ if (version > VREND_CAPSET_VIRGL_MAX_VERSION)
+ return;
memset(caps, 0, sizeof(struct virgl_caps_v1));
- caps->max_version = 1;
- } else if (set == 2) {
+ caps->max_version = VREND_CAPSET_VIRGL_MAX_VERSION;
+ break;
+ case VIRGL_RENDERER_CAPSET_VIRGL2:
+ if (version > VREND_CAPSET_VIRGL2_MAX_VERSION)
+ return;
memset(caps, 0, sizeof(*caps));
- caps->max_version = 2;
+ caps->max_version = VREND_CAPSET_VIRGL2_MAX_VERSION;
fill_capset2 = true;
+ break;
+ default:
+ return;
}
/* We don't want to deal with stale error states that the caller might not
@@ -10310,8 +10382,8 @@ void vrend_context_set_debug_flags(struct vrend_context *ctx, const char *flagst
}
}
-int vrend_renderer_resource_get_info(struct pipe_resource *pres,
- struct vrend_renderer_resource_info *info)
+void vrend_renderer_resource_get_info(struct pipe_resource *pres,
+ struct vrend_renderer_resource_info *info)
{
struct vrend_resource *res = (struct vrend_resource *)pres;
int elsize;
@@ -10325,21 +10397,18 @@ int vrend_renderer_resource_get_info(struct pipe_resource *pres,
info->format = res->base.format;
info->flags = res->y_0_top ? VIRGL_RESOURCE_Y_0_TOP : 0;
info->stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, 0)) * elsize;
-
- return 0;
}
void vrend_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
uint32_t *max_size)
{
switch (cap_set) {
- case VREND_CAP_SET:
- *max_ver = 1;
+ case VIRGL_RENDERER_CAPSET_VIRGL:
+ *max_ver = VREND_CAPSET_VIRGL_MAX_VERSION;
*max_size = sizeof(struct virgl_caps_v1);
break;
- case VREND_CAP_SET2:
- /* we should never need to increase this - it should be possible to just grow virgl_caps */
- *max_ver = 2;
+ case VIRGL_RENDERER_CAPSET_VIRGL2:
+ *max_ver = VREND_CAPSET_VIRGL2_MAX_VERSION;
*max_size = sizeof(struct virgl_caps_v2);
break;
default:
@@ -10420,23 +10489,6 @@ void vrend_print_context_name(const struct vrend_context *ctx)
vrend_printf("HOST: ");
}
-#ifdef HAVE_EPOXY_EGL_H
-struct virgl_egl *egl = NULL;
-struct virgl_gbm *gbm = NULL;
-#endif
-
-int virgl_has_gl_colorspace(void)
-{
- bool egl_colorspace = false;
-#ifdef HAVE_EPOXY_EGL_H
- if (egl)
- egl_colorspace = virgl_has_egl_khr_gl_colorspace(egl);
-#endif
- return use_context == CONTEXT_NONE ||
- use_context == CONTEXT_GLX ||
- (use_context == CONTEXT_EGL && egl_colorspace);
-}
-
void vrend_renderer_destroy_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
{
@@ -10478,43 +10530,26 @@ void vrend_renderer_set_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
}
}
-static void vrend_reset_fences(void)
+void vrend_renderer_prepare_reset(void)
{
- struct vrend_fence *fence, *stor;
-
- if (vrend_state.sync_thread)
- pipe_mutex_lock(vrend_state.fence_mutex);
-
- LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
- free_fence_locked(fence);
- }
-
- if (vrend_state.sync_thread)
- pipe_mutex_unlock(vrend_state.fence_mutex);
+ /* make sure user contexts are no longer accessed */
+ vrend_free_sync_thread();
+ vrend_hw_switch_context(vrend_state.ctx0, true);
}
void vrend_renderer_reset(void)
{
- if (vrend_state.sync_thread) {
- vrend_free_sync_thread();
- vrend_state.stop_sync_thread = false;
- }
- vrend_reset_fences();
+ vrend_free_fences();
vrend_blitter_fini();
- vrend_hw_switch_context(vrend_state.ctx0, true);
- vrend_decode_reset();
- virgl_resource_table_reset();
vrend_destroy_context(vrend_state.ctx0);
vrend_state.ctx0 = vrend_create_context(0, strlen("HOST"), "HOST");
+ /* TODO respawn sync thread */
}
int vrend_renderer_get_poll_fd(void)
{
- if (!vrend_state.inited)
- return -1;
-
return vrend_state.eventfd;
}
@@ -10610,3 +10645,55 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres)
glBindBufferARB(res->target, 0);
return 0;
}
+
+int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd) {
+#ifdef HAVE_EPOXY_EGL_H
+ if (!vrend_state.use_egl_fence) {
+ return -EINVAL;
+ }
+
+ if (vrend_state.sync_thread)
+ pipe_mutex_lock(vrend_state.fence_mutex);
+
+ struct vrend_fence *fence = NULL;
+ struct vrend_fence *iter;
+ uint32_t min_fence_id = UINT_MAX;
+
+ if (!LIST_IS_EMPTY(&vrend_state.fence_list)) {
+ min_fence_id = LIST_ENTRY(struct vrend_fence, vrend_state.fence_list.next, fences)->fence_id;
+ } else if (!LIST_IS_EMPTY(&vrend_state.fence_wait_list)) {
+ min_fence_id =
+ LIST_ENTRY(struct vrend_fence, vrend_state.fence_wait_list.next, fences)->fence_id;
+ }
+
+ if (fence_id < min_fence_id) {
+ if (vrend_state.sync_thread)
+ pipe_mutex_unlock(vrend_state.fence_mutex);
+ return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL;
+ }
+
+ LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_list, fences) {
+ if (iter->fence_id == fence_id) {
+ fence = iter;
+ break;
+ }
+ }
+
+ if (!fence) {
+ LIST_FOR_EACH_ENTRY(iter, &vrend_state.fence_wait_list, fences) {
+ if (iter->fence_id == fence_id) {
+ fence = iter;
+ break;
+ }
+ }
+ }
+
+ if (vrend_state.sync_thread)
+ pipe_mutex_unlock(vrend_state.fence_mutex);
+
+ if (fence && virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd)) {
+ return 0;
+ }
+#endif
+ return -EINVAL;
+}
diff --git a/src/vrend_renderer.h b/src/vrend_renderer.h
index 424d750c..da4ffe9c 100644
--- a/src/vrend_renderer.h
+++ b/src/vrend_renderer.h
@@ -32,7 +32,7 @@
#include "vrend_debug.h"
#include "vrend_tweaks.h"
#include "vrend_iov.h"
-#include "virgl_gbm.h"
+#include "vrend_winsys_gbm.h"
#include "virgl_hw.h"
#include <epoxy/gl.h>
@@ -62,19 +62,6 @@ struct vrend_context;
#define VREND_STORAGE_GL_IMMUTABLE BIT(6)
#define VREND_STORAGE_GL_MEMOBJ BIT(7)
-enum {
- CONTEXT_NONE,
- CONTEXT_EGL,
- CONTEXT_GLX
-};
-
-extern int use_context;
-#ifdef HAVE_EPOXY_EGL_H
-extern struct virgl_egl *egl;
-extern struct virgl_gbm *gbm;
-#endif
-
-
struct vrend_resource {
struct pipe_resource base;
uint32_t storage_bits;
@@ -135,7 +122,10 @@ struct vrend_if_cbs {
#define VREND_USE_THREAD_SYNC 1
#define VREND_USE_EXTERNAL_BLOB 2
-int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags);
+const struct virgl_resource_pipe_callbacks *
+vrend_renderer_get_pipe_callbacks(void);
+
+int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags);
void vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings, uint32_t flags);
bool vrend_check_framebuffer_mixed_color_attachements(void);
@@ -357,6 +347,8 @@ int vrend_renderer_create_fence(int client_fence_id, uint32_t ctx_id);
void vrend_renderer_check_fences(void);
+int vrend_renderer_export_fence(uint32_t fence_id, int* out_fd);
+
bool vrend_hw_switch_context(struct vrend_context *ctx, bool now);
uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
uint32_t handle, enum virgl_object_type type);
@@ -431,11 +423,8 @@ struct vrend_renderer_resource_info {
uint32_t stride;
};
-int vrend_renderer_resource_get_info(struct pipe_resource *pres,
- struct vrend_renderer_resource_info *info);
-
-#define VREND_CAP_SET 1
-#define VREND_CAP_SET2 2
+void vrend_renderer_resource_get_info(struct pipe_resource *pres,
+ struct vrend_renderer_resource_info *info);
void vrend_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
uint32_t *max_size);
@@ -471,9 +460,9 @@ void vrend_renderer_blit_gl(struct vrend_context *ctx,
bool skip_dest_swizzle);
void vrend_blitter_fini(void);
+void vrend_renderer_prepare_reset(void);
void vrend_renderer_reset(void);
int vrend_renderer_get_poll_fd(void);
-void vrend_decode_reset(void);
unsigned vrend_context_has_debug_flag(const struct vrend_context *ctx,
enum virgl_debug_flags flag);
@@ -481,8 +470,6 @@ unsigned vrend_context_has_debug_flag(const struct vrend_context *ctx,
unsigned vrend_renderer_query_multisample_caps(unsigned max_samples,
struct virgl_caps_v2 *caps);
-int virgl_has_gl_colorspace(void);
-
struct gl_version {
uint32_t major;
uint32_t minor;
@@ -491,7 +478,7 @@ struct gl_version {
static const struct gl_version gl_versions[] = { {4,6}, {4,5}, {4,4}, {4,3}, {4,2}, {4,1}, {4,0},
{3,3}, {3,2}, {3,1}, {3,0} };
-extern struct vrend_if_cbs *vrend_clicbs;
+extern const struct vrend_if_cbs *vrend_clicbs;
int vrend_renderer_export_query(struct pipe_resource *pres,
struct virgl_renderer_export_query *export_query);
diff --git a/src/vrend_shader.c b/src/vrend_shader.c
index 0124fb4c..f7ace6f8 100644
--- a/src/vrend_shader.c
+++ b/src/vrend_shader.c
@@ -150,16 +150,34 @@ struct vrend_io_range {
bool used;
};
+struct vrend_glsl_strbufs {
+ int indent_level;
+ struct vrend_strbuf glsl_main;
+ struct vrend_strbuf glsl_hdr;
+ struct vrend_strbuf glsl_ver_ext;
+};
+
+struct vrend_generic_ios {
+ struct vrend_io_range input_range;
+ struct vrend_io_range output_range;
+
+ uint32_t outputs_expected_mask;
+ uint32_t inputs_emitted_mask;
+ uint32_t outputs_emitted_mask;
+};
+
+struct vrend_patch_ios {
+ struct vrend_io_range input_range;
+ struct vrend_io_range output_range;
+};
+
struct dump_ctx {
struct tgsi_iterate_context iter;
const struct vrend_shader_cfg *cfg;
struct tgsi_shader_info info;
int prog_type;
int size;
- struct vrend_strbuf glsl_main;
- int indent_level;
- struct vrend_strbuf glsl_hdr;
- struct vrend_strbuf glsl_ver_ext;
+ struct vrend_glsl_strbufs glsl_strbufs;
uint instno;
struct vrend_strbuf src_bufs[4];
@@ -175,14 +193,8 @@ struct dump_ctx {
struct vrend_shader_io system_values[32];
bool guest_sent_io_arrays;
- struct vrend_io_range generic_input_range;
- struct vrend_io_range patch_input_range;
- struct vrend_io_range generic_output_range;
- struct vrend_io_range patch_output_range;
-
- uint32_t generic_outputs_expected_mask;
- uint32_t generic_inputs_emitted_mask;
- uint32_t generic_outputs_emitted_mask;
+ struct vrend_generic_ios generic_ios;
+ struct vrend_patch_ios patch_ios;
uint32_t num_temp_ranges;
struct vrend_temp_range *temp_ranges;
@@ -367,7 +379,7 @@ enum io_type {
/* We prefer arrays of arrays, but if this is not available then TCS, GEOM, and TES
* inputs must be blocks, but FS input should not because interpolateAt* doesn't
* support dereferencing block members. */
-static inline bool prefer_generic_io_block(struct dump_ctx *ctx, enum io_type io)
+static inline bool prefer_generic_io_block(const struct dump_ctx *ctx, enum io_type io)
{
if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
return false;
@@ -513,7 +525,7 @@ static inline int gs_input_prim_to_size(int prim)
};
}
-static inline bool fs_emit_layout(struct dump_ctx *ctx)
+static inline bool fs_emit_layout(const struct dump_ctx *ctx)
{
if (ctx->fs_pixel_center)
return true;
@@ -526,7 +538,7 @@ static inline bool fs_emit_layout(struct dump_ctx *ctx)
return false;
}
-static const char *get_stage_input_name_prefix(struct dump_ctx *ctx, int processor)
+static const char *get_stage_input_name_prefix(const struct dump_ctx *ctx, int processor)
{
const char *name_prefix;
switch (processor) {
@@ -587,109 +599,108 @@ static const char *get_stage_output_name_prefix(int processor)
return name_prefix;
}
-static void require_glsl_ver(struct dump_ctx *ctx, int glsl_ver)
+static int require_glsl_ver(const struct dump_ctx *ctx, int glsl_ver)
{
- if (glsl_ver > ctx->glsl_ver_required)
- ctx->glsl_ver_required = glsl_ver;
+ return glsl_ver > ctx->glsl_ver_required ? glsl_ver : ctx->glsl_ver_required;
}
-static void emit_indent(struct dump_ctx *ctx)
+static void emit_indent(struct vrend_glsl_strbufs *glsl_strbufs)
{
- if (ctx->indent_level > 0) {
+ if (glsl_strbufs->indent_level > 0) {
/* very high levels of indentation doesn't improve readability */
- int indent_level = MIN2(ctx->indent_level, 15);
+ int indent_level = MIN2(glsl_strbufs->indent_level, 15);
char buf[16];
memset(buf, '\t', indent_level);
buf[indent_level] = '\0';
- strbuf_append(&ctx->glsl_main, buf);
+ strbuf_append(&glsl_strbufs->glsl_main, buf);
}
}
-static void emit_buf(struct dump_ctx *ctx, const char *buf)
+static void emit_buf(struct vrend_glsl_strbufs *glsl_strbufs, const char *buf)
{
- emit_indent(ctx);
- strbuf_append(&ctx->glsl_main, buf);
+ emit_indent(glsl_strbufs);
+ strbuf_append(&glsl_strbufs->glsl_main, buf);
}
-static void indent_buf(struct dump_ctx *ctx)
+static void indent_buf(struct vrend_glsl_strbufs *glsl_strbufs)
{
- ctx->indent_level++;
+ glsl_strbufs->indent_level++;
}
-static void outdent_buf(struct dump_ctx *ctx)
+static void outdent_buf(struct vrend_glsl_strbufs *glsl_strbufs)
{
- if (ctx->indent_level <= 0) {
- strbuf_set_error(&ctx->glsl_main);
+ if (glsl_strbufs->indent_level <= 0) {
+ strbuf_set_error(&glsl_strbufs->glsl_main);
return;
}
- ctx->indent_level--;
+ glsl_strbufs->indent_level--;
}
-static void set_buf_error(struct dump_ctx *ctx)
+static void set_buf_error(struct vrend_glsl_strbufs *glsl_strbufs)
{
- strbuf_set_error(&ctx->glsl_main);
+ strbuf_set_error(&glsl_strbufs->glsl_main);
}
__attribute__((format(printf, 2, 3)))
-static void emit_buff(struct dump_ctx *ctx, const char *fmt, ...)
+static void emit_buff(struct vrend_glsl_strbufs *glsl_strbufs, const char *fmt, ...)
{
va_list va;
va_start(va, fmt);
- emit_indent(ctx);
- strbuf_vappendf(&ctx->glsl_main, fmt, va);
+ emit_indent(glsl_strbufs);
+ strbuf_vappendf(&glsl_strbufs->glsl_main, fmt, va);
va_end(va);
}
-static void emit_hdr(struct dump_ctx *ctx, const char *buf)
+static void emit_hdr(struct vrend_glsl_strbufs *glsl_strbufs, const char *buf)
{
- strbuf_append(&ctx->glsl_hdr, buf);
+ strbuf_append(&glsl_strbufs->glsl_hdr, buf);
}
-static void set_hdr_error(struct dump_ctx *ctx)
+static void set_hdr_error(struct vrend_glsl_strbufs *glsl_strbufs)
{
- strbuf_set_error(&ctx->glsl_hdr);
+ strbuf_set_error(&glsl_strbufs->glsl_hdr);
}
__attribute__((format(printf, 2, 3)))
-static void emit_hdrf(struct dump_ctx *ctx, const char *fmt, ...)
+static void emit_hdrf(struct vrend_glsl_strbufs *glsl_strbufs, const char *fmt, ...)
{
va_list va;
va_start(va, fmt);
- strbuf_vappendf(&ctx->glsl_hdr, fmt, va);
+ strbuf_vappendf(&glsl_strbufs->glsl_hdr, fmt, va);
va_end(va);
}
-static void emit_ver_ext(struct dump_ctx *ctx, const char *buf)
+static void emit_ver_ext(struct vrend_glsl_strbufs *glsl_strbufs, const char *buf)
{
- strbuf_append(&ctx->glsl_ver_ext, buf);
+ strbuf_append(&glsl_strbufs->glsl_ver_ext, buf);
}
__attribute__((format(printf, 2, 3)))
-static void emit_ver_extf(struct dump_ctx *ctx, const char *fmt, ...)
+static void emit_ver_extf(struct vrend_glsl_strbufs *glsl_strbufs, const char *fmt, ...)
{
va_list va;
va_start(va, fmt);
- strbuf_vappendf(&ctx->glsl_ver_ext, fmt, va);
+ strbuf_vappendf(&glsl_strbufs->glsl_ver_ext, fmt, va);
va_end(va);
}
-static bool allocate_temp_range(struct dump_ctx *ctx, int first, int last,
+static bool allocate_temp_range(struct vrend_temp_range **temp_ranges, uint32_t *num_temp_ranges, int first, int last,
int array_id)
{
- int idx = ctx->num_temp_ranges;
+ int idx = *num_temp_ranges;
- ctx->temp_ranges = realloc(ctx->temp_ranges, sizeof(struct vrend_temp_range) * (idx + 1));
- if (!ctx->temp_ranges)
+ *temp_ranges = realloc(*temp_ranges, sizeof(struct vrend_temp_range) * (idx + 1));
+ if (!*temp_ranges)
return false;
- ctx->temp_ranges[idx].first = first;
- ctx->temp_ranges[idx].last = last;
- ctx->temp_ranges[idx].array_id = array_id;
- ctx->num_temp_ranges++;
+ (*temp_ranges)[idx].first = first;
+ (*temp_ranges)[idx].last = last;
+ (*temp_ranges)[idx].array_id = array_id;
+ (*num_temp_ranges)++;
return true;
}
-static struct vrend_temp_range *find_temp_range(struct dump_ctx *ctx, int index)
+static struct vrend_temp_range *find_temp_range(const struct dump_ctx *ctx, int index)
{
uint32_t i;
for (i = 0; i < ctx->num_temp_ranges; i++) {
@@ -736,8 +747,9 @@ static uint32_t samplertype_to_req_bits(int sampler_type)
}
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static bool add_images(struct dump_ctx *ctx, int first, int last,
- struct tgsi_declaration_image *img_decl)
+ const struct tgsi_declaration_image *img_decl)
{
int i;
@@ -797,6 +809,7 @@ static bool add_images(struct dump_ctx *ctx, int first, int last,
return true;
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static bool add_sampler_array(struct dump_ctx *ctx, int first, int last)
{
int idx = ctx->num_sampler_arrays;
@@ -810,7 +823,7 @@ static bool add_sampler_array(struct dump_ctx *ctx, int first, int last)
return true;
}
-static int lookup_sampler_array(struct dump_ctx *ctx, int index)
+static int lookup_sampler_array(const struct dump_ctx *ctx, int index)
{
uint32_t i;
for (i = 0; i < ctx->num_sampler_arrays; i++) {
@@ -836,6 +849,7 @@ int vrend_shader_lookup_sampler_array(const struct vrend_shader_info *sinfo, int
return -1;
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static bool add_samplers(struct dump_ctx *ctx, int first, int last, int sview_type, enum tgsi_return_type sview_rtype)
{
if (sview_rtype == TGSI_RETURN_TYPE_SINT ||
@@ -864,7 +878,7 @@ static bool add_samplers(struct dump_ctx *ctx, int first, int last, int sview_ty
return true;
}
-static struct vrend_array *lookup_image_array_ptr(struct dump_ctx *ctx, int index)
+static struct vrend_array *lookup_image_array_ptr(const struct dump_ctx *ctx, int index)
{
uint32_t i;
for (i = 0; i < ctx->num_image_arrays; i++) {
@@ -876,7 +890,7 @@ static struct vrend_array *lookup_image_array_ptr(struct dump_ctx *ctx, int inde
return NULL;
}
-static int lookup_image_array(struct dump_ctx *ctx, int index)
+static int lookup_image_array(const struct dump_ctx *ctx, int index)
{
struct vrend_array *image = lookup_image_array_ptr(ctx, index);
return image ? image->first : -1;
@@ -884,7 +898,7 @@ static int lookup_image_array(struct dump_ctx *ctx, int index)
static boolean
iter_inputs(struct tgsi_iterate_context *iter,
- struct tgsi_full_declaration *decl )
+ struct tgsi_full_declaration *decl)
{
struct dump_ctx *ctx = (struct dump_ctx *)iter;
switch (decl->Declaration.File) {
@@ -921,7 +935,8 @@ static bool logiop_require_inout(const struct vrend_shader_key *key)
static enum vec_type get_type(uint32_t signed_int_mask,
uint32_t unsigned_int_mask,
- int bit) {
+ int bit)
+{
if (signed_int_mask & (1 << bit))
return VEC_INT;
else if (unsigned_int_mask & (1 << bit))
@@ -932,7 +947,7 @@ static enum vec_type get_type(uint32_t signed_int_mask,
static boolean
iter_declaration(struct tgsi_iterate_context *iter,
- struct tgsi_full_declaration *decl )
+ struct tgsi_full_declaration *decl)
{
struct dump_ctx *ctx = (struct dump_ctx *)iter;
int i;
@@ -986,7 +1001,7 @@ iter_declaration(struct tgsi_iterate_context *iter,
}
if (ctx->inputs[i].first != ctx->inputs[i].last)
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
switch (ctx->inputs[i].name) {
case TGSI_SEMANTIC_COLOR:
@@ -1054,7 +1069,7 @@ iter_declaration(struct tgsi_iterate_context *iter,
name_prefix = "gl_PrimitiveID";
ctx->inputs[i].glsl_predefined_no_emit = true;
ctx->inputs[i].glsl_no_index = true;
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
ctx->shader_req_bits |= SHADER_REQ_GEOMETRY_SHADER;
break;
}
@@ -1284,7 +1299,7 @@ iter_declaration(struct tgsi_iterate_context *iter,
ctx->num_clip_dist += 4 * (ctx->outputs[i].last - ctx->outputs[i].first + 1);
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX &&
(ctx->key->gs_present || ctx->key->tcs_present))
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL)
ctx->outputs[i].glsl_gl_block = true;
if (ctx->outputs[i].last != ctx->outputs[i].first)
@@ -1459,7 +1474,7 @@ iter_declaration(struct tgsi_iterate_context *iter,
}
break;
case TGSI_FILE_TEMPORARY:
- if (!allocate_temp_range(ctx, decl->Range.First, decl->Range.Last,
+ if (!allocate_temp_range(&ctx->temp_ranges, &ctx->num_temp_ranges, decl->Range.First, decl->Range.Last,
decl->Array.ArrayID))
return false;
break;
@@ -1668,7 +1683,7 @@ iter_property(struct tgsi_iterate_context *iter,
case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
ctx->early_depth_stencil = prop->u[0].Data > 0;
if (ctx->early_depth_stencil) {
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
ctx->shader_req_bits |= SHADER_REQ_IMAGE_LOAD_STORE;
}
break;
@@ -1684,7 +1699,7 @@ iter_property(struct tgsi_iterate_context *iter,
case TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED:
ctx->fs_blend_equation_advanced = prop->u[0].Data;
if (!ctx->cfg->use_gles || ctx->cfg->glsl_version < 320) {
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
ctx->shader_req_bits |= SHADER_REQ_BLEND_EQUATION_ADVANCED;
}
break;
@@ -1697,9 +1712,8 @@ iter_property(struct tgsi_iterate_context *iter,
}
static boolean
-iter_immediate(
- struct tgsi_iterate_context *iter,
- struct tgsi_full_immediate *imm )
+iter_immediate(struct tgsi_iterate_context *iter,
+ struct tgsi_full_immediate *imm)
{
struct dump_ctx *ctx = (struct dump_ctx *) iter;
int i;
@@ -1738,18 +1752,19 @@ static char get_swiz_char(int swiz)
}
}
-static void emit_cbuf_writes(struct dump_ctx *ctx)
+static void emit_cbuf_writes(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
int i;
for (i = ctx->num_outputs; i < ctx->cfg->max_draw_buffers; i++) {
- emit_buff(ctx, "fsout_c%d = fsout_c0;\n", i);
+ emit_buff(glsl_strbufs, "fsout_c%d = fsout_c0;\n", i);
}
}
-static void emit_a8_swizzle(struct dump_ctx *ctx)
+static void emit_a8_swizzle(struct vrend_glsl_strbufs *glsl_strbufs)
{
- emit_buf(ctx, "fsout_c0.x = fsout_c0.w;\n");
+ emit_buf(glsl_strbufs, "fsout_c0.x = fsout_c0.w;\n");
}
static const char *atests[PIPE_FUNC_ALWAYS + 1] = {
@@ -1763,7 +1778,8 @@ static const char *atests[PIPE_FUNC_ALWAYS + 1] = {
"true"
};
-static void emit_alpha_test(struct dump_ctx *ctx)
+static void emit_alpha_test(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
char comp_buf[128];
@@ -1790,36 +1806,38 @@ static void emit_alpha_test(struct dump_ctx *ctx)
break;
default:
vrend_printf( "invalid alpha-test: %x\n", ctx->key->alpha_test);
- set_buf_error(ctx);
+ set_buf_error(glsl_strbufs);
return;
}
- emit_buff(ctx, "if (!(%s)) {\n\tdiscard;\n}\n", comp_buf);
+ emit_buff(glsl_strbufs, "if (!(%s)) {\n\tdiscard;\n}\n", comp_buf);
}
-static void emit_pstipple_pass(struct dump_ctx *ctx)
+static void emit_pstipple_pass(struct vrend_glsl_strbufs *glsl_strbufs)
{
- emit_buf(ctx, "stip_temp = texture(pstipple_sampler, vec2(gl_FragCoord.x / 32.0, gl_FragCoord.y / 32.0)).x;\n");
- emit_buf(ctx, "if (stip_temp > 0.0) {\n\tdiscard;\n}\n");
+ emit_buf(glsl_strbufs, "stip_temp = texture(pstipple_sampler, vec2(gl_FragCoord.x / 32.0, gl_FragCoord.y / 32.0)).x;\n");
+ emit_buf(glsl_strbufs, "if (stip_temp > 0.0) {\n\tdiscard;\n}\n");
}
-static void emit_color_select(struct dump_ctx *ctx)
+static void emit_color_select(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
if (!ctx->key->color_two_side || !(ctx->color_in_mask & 0x3))
return;
if (ctx->color_in_mask & 1)
- emit_buf(ctx, "realcolor0 = gl_FrontFacing ? ex_c0 : ex_bc0;\n");
+ emit_buf(glsl_strbufs, "realcolor0 = gl_FrontFacing ? ex_c0 : ex_bc0;\n");
if (ctx->color_in_mask & 2)
- emit_buf(ctx, "realcolor1 = gl_FrontFacing ? ex_c1 : ex_bc1;\n");
+ emit_buf(glsl_strbufs, "realcolor1 = gl_FrontFacing ? ex_c1 : ex_bc1;\n");
}
-static void emit_prescale(struct dump_ctx *ctx)
+static void emit_prescale(struct vrend_glsl_strbufs *glsl_strbufs)
{
- emit_buf(ctx, "gl_Position.y = gl_Position.y * winsys_adjust_y;\n");
+ emit_buf(glsl_strbufs, "gl_Position.y = gl_Position.y * winsys_adjust_y;\n");
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void prepare_so_movs(struct dump_ctx *ctx)
{
uint32_t i;
@@ -1865,7 +1883,7 @@ get_blockvarname(char outvar[64], const char *stage_prefix, const struct vrend_s
snprintf(outvar, 64, "%sg%dA%d_%x%s", stage_prefix, io->first, io->array_id, io->usage_mask, postfix);
}
-static void get_so_name(struct dump_ctx *ctx, bool from_block, const struct vrend_shader_io *output, int index, char out_var[255], char *wm)
+static void get_so_name(const struct dump_ctx *ctx, bool from_block, const struct vrend_shader_io *output, int index, char out_var[255], char *wm)
{
if (output->first == output->last || output->name != TGSI_SEMANTIC_GENERIC)
snprintf(out_var, 255, "%s%s", output->glsl_name, wm);
@@ -1884,7 +1902,9 @@ static void get_so_name(struct dump_ctx *ctx, bool from_block, const struct vren
}
}
-static void emit_so_movs(struct dump_ctx *ctx)
+static void emit_so_movs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ bool *has_clipvertex_so)
{
uint32_t i, j;
char outtype[15] = "";
@@ -1892,7 +1912,7 @@ static void emit_so_movs(struct dump_ctx *ctx)
if (ctx->so->num_outputs >= PIPE_MAX_SO_OUTPUTS) {
vrend_printf( "Num outputs exceeded, max is %u\n", PIPE_MAX_SO_OUTPUTS);
- set_buf_error(ctx);
+ set_buf_error(glsl_strbufs);
return;
}
@@ -1921,7 +1941,7 @@ static void emit_so_movs(struct dump_ctx *ctx)
ctx->so_names[i] = NULL;
else if (ctx->outputs[ctx->so->output[i].register_index].name == TGSI_SEMANTIC_CLIPVERTEX && ctx->has_clipvertex) {
ctx->so_names[i] = strdup("clipv_tmp");
- ctx->has_clipvertex_so = true;
+ *has_clipvertex_so = true;
} else {
char out_var[255];
get_so_name(ctx, true, output, ctx->so->output[i].register_index, out_var, "");
@@ -1945,10 +1965,10 @@ static void emit_so_movs(struct dump_ctx *ctx)
if (output->name == TGSI_SEMANTIC_CLIPDIST) {
if (output->first == output->last)
- emit_buff(ctx, "tfout%d = %s(clip_dist_temp[%d]%s);\n", i, outtype, output->sid,
+ emit_buff(glsl_strbufs, "tfout%d = %s(clip_dist_temp[%d]%s);\n", i, outtype, output->sid,
writemask);
else
- emit_buff(ctx, "tfout%d = %s(clip_dist_temp[%d]%s);\n", i, outtype,
+ emit_buff(glsl_strbufs, "tfout%d = %s(clip_dist_temp[%d]%s);\n", i, outtype,
output->sid + ctx->so->output[i].register_index - output->first,
writemask);
} else {
@@ -1957,7 +1977,7 @@ static void emit_so_movs(struct dump_ctx *ctx)
if (ctx->so->output[i].need_temp || ctx->prog_type == TGSI_PROCESSOR_GEOMETRY ||
output->glsl_predefined_no_emit) {
get_so_name(ctx, false, output, ctx->so->output[i].register_index, out_var, writemask);
- emit_buff(ctx, "tfout%d = %s(%s);\n", i, outtype, out_var);
+ emit_buff(glsl_strbufs, "tfout%d = %s(%s);\n", i, outtype, out_var);
} else {
get_so_name(ctx, true, output, ctx->so->output[i].register_index, out_var, writemask);
free(ctx->so_names[i]);
@@ -1968,7 +1988,8 @@ static void emit_so_movs(struct dump_ctx *ctx)
}
}
-static void emit_clip_dist_movs(struct dump_ctx *ctx)
+static void emit_clip_dist_movs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
int i;
bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
@@ -1979,7 +2000,7 @@ static void emit_clip_dist_movs(struct dump_ctx *ctx)
prefix = "gl_out[gl_InvocationID].";
if (ctx->num_clip_dist == 0 && ctx->key->clip_plane_enable) {
for (i = 0; i < 8; i++) {
- emit_buff(ctx, "%sgl_ClipDistance[%d] = dot(%s, clipp[%d]);\n", prefix, i, ctx->has_clipvertex ? "clipv_tmp" : "gl_Position", i);
+ emit_buff(glsl_strbufs, "%sgl_ClipDistance[%d] = dot(%s, clipp[%d]);\n", prefix, i, ctx->has_clipvertex ? "clipv_tmp" : "gl_Position", i);
}
return;
}
@@ -2003,29 +2024,32 @@ static void emit_clip_dist_movs(struct dump_ctx *ctx)
is_cull = true;
}
const char *clip_cull = is_cull ? "Cull" : "Clip";
- emit_buff(ctx, "%sgl_%sDistance[%d] = clip_dist_temp[%d].%c;\n", prefix, clip_cull,
+ emit_buff(glsl_strbufs, "%sgl_%sDistance[%d] = clip_dist_temp[%d].%c;\n", prefix, clip_cull,
is_cull ? i - ctx->num_clip_dist_prop : i, clipidx, wm);
}
}
-#define emit_arit_op2(op) emit_buff(ctx, "%s = %s(%s((%s %s %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], op, srcs[1], writemask)
-#define emit_op1(op) emit_buff(ctx, "%s = %s(%s(%s(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), op, srcs[0], writemask)
-#define emit_compare(op) emit_buff(ctx, "%s = %s(%s((%s(%s(%s), %s(%s))))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), op, get_string(sinfo.svec4), srcs[0], get_string(sinfo.svec4), srcs[1], writemask)
+#define emit_arit_op2(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((%s %s %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], op, srcs[1], writemask)
+#define emit_op1(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(%s(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), op, srcs[0], writemask)
+#define emit_compare(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((%s(%s(%s), %s(%s))))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), op, get_string(sinfo.svec4), srcs[0], get_string(sinfo.svec4), srcs[1], writemask)
-#define emit_ucompare(op) emit_buff(ctx, "%s = %s(uintBitsToFloat(%s(%s(%s(%s), %s(%s))%s) * %s(0xffffffff)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.udstconv), op, get_string(sinfo.svec4), srcs[0], get_string(sinfo.svec4), srcs[1], writemask, get_string(dinfo.udstconv))
+#define emit_ucompare(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(uintBitsToFloat(%s(%s(%s(%s), %s(%s))%s) * %s(0xffffffff)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.udstconv), op, get_string(sinfo.svec4), srcs[0], get_string(sinfo.svec4), srcs[1], writemask, get_string(dinfo.udstconv))
-static void handle_vertex_proc_exit(struct dump_ctx *ctx)
+static void handle_vertex_proc_exit(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ bool *has_clipvertex_so)
{
if (ctx->so && !ctx->key->gs_present && !ctx->key->tes_present)
- emit_so_movs(ctx);
+ emit_so_movs(ctx, glsl_strbufs, has_clipvertex_so);
- emit_clip_dist_movs(ctx);
+ emit_clip_dist_movs(ctx, glsl_strbufs);
if (!ctx->key->gs_present && !ctx->key->tes_present)
- emit_prescale(ctx);
+ emit_prescale(glsl_strbufs);
}
-static void emit_fragment_logicop(struct dump_ctx *ctx)
+static void emit_fragment_logicop(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
char src[PIPE_MAX_COLOR_BUFS][64];
char src_fb[PIPE_MAX_COLOR_BUFS][64];
@@ -2141,53 +2165,56 @@ static void emit_fragment_logicop(struct dump_ctx *ctx)
case PIPE_LOGICOP_COPY:
case PIPE_LOGICOP_CLEAR:
case PIPE_LOGICOP_SET:
- emit_buff(ctx, "fsout_c%d = %s;\n", i, full_op[i]);
+ emit_buff(glsl_strbufs, "fsout_c%d = %s;\n", i, full_op[i]);
break;
default:
- emit_buff(ctx, "fsout_c%d = vec4((%s) & %d) / %f;\n", i, full_op[i], mask[i], scale[i]);
+ emit_buff(glsl_strbufs, "fsout_c%d = vec4((%s) & %d) / %f;\n", i, full_op[i], mask[i], scale[i]);
}
}
}
-static void emit_cbuf_swizzle(struct dump_ctx *ctx)
+static void emit_cbuf_swizzle(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
for (uint i = 0; i < ctx->num_outputs; i++) {
if (ctx->key->fs_swizzle_output_rgb_to_bgr & (1 << i)) {
- emit_buff(ctx, "fsout_c%d = fsout_c%d.zyxw;\n", i, i);
+ emit_buff(glsl_strbufs, "fsout_c%d = fsout_c%d.zyxw;\n", i, i);
}
}
}
-static void handle_fragment_proc_exit(struct dump_ctx *ctx)
+static void handle_fragment_proc_exit(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
if (ctx->key->pstipple_tex)
- emit_pstipple_pass(ctx);
+ emit_pstipple_pass(glsl_strbufs);
if (ctx->key->cbufs_are_a8_bitmask)
- emit_a8_swizzle(ctx);
+ emit_a8_swizzle(glsl_strbufs);
if (ctx->key->add_alpha_test)
- emit_alpha_test(ctx);
+ emit_alpha_test(ctx, glsl_strbufs);
if (ctx->key->fs_logicop_enabled)
- emit_fragment_logicop(ctx);
+ emit_fragment_logicop(ctx, glsl_strbufs);
if (ctx->key->fs_swizzle_output_rgb_to_bgr)
- emit_cbuf_swizzle(ctx);
+ emit_cbuf_swizzle(ctx, glsl_strbufs);
if (ctx->write_all_cbufs)
- emit_cbuf_writes(ctx);
+ emit_cbuf_writes(ctx, glsl_strbufs);
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void set_texture_reqs(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
- uint32_t sreg_index)
+ const struct tgsi_full_instruction *inst,
+ uint32_t sreg_index)
{
if (sreg_index >= ARRAY_SIZE(ctx->samplers)) {
vrend_printf( "Sampler view exceeded, max is %lu\n", ARRAY_SIZE(ctx->samplers));
- set_buf_error(ctx);
+ set_buf_error(&ctx->glsl_strbufs);
return;
}
ctx->samplers[sreg_index].tgsi_sampler_type = inst->Texture.Texture;
@@ -2197,12 +2224,13 @@ static void set_texture_reqs(struct dump_ctx *ctx,
if (ctx->cfg->glsl_version >= 140)
if (ctx->shader_req_bits & (SHADER_REQ_SAMPLER_RECT |
SHADER_REQ_SAMPLER_BUF))
- require_glsl_ver(ctx, 140);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 140);
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
/* size queries are pretty much separate */
static void emit_txq(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+ const struct tgsi_full_instruction *inst,
uint32_t sreg_index,
const char *srcs[4],
const char *dst,
@@ -2243,7 +2271,7 @@ static void emit_txq(struct dump_ctx *ctx,
ctx->shader_req_bits |= SHADER_REQ_TXQ_LEVELS;
if (inst->Dst[0].Register.WriteMask & 0x7)
twm = TGSI_WRITEMASK_W;
- emit_buff(ctx, "%s%s = %s(textureQueryLevels(%s));\n", dst,
+ emit_buff(&ctx->glsl_strbufs, "%s%s = %s(textureQueryLevels(%s));\n", dst,
get_wm_string(twm), get_string(dtypeprefix),
srcs[sampler_index]);
}
@@ -2290,15 +2318,16 @@ static void emit_txq(struct dump_ctx *ctx,
writemask = ".xz";
}
- emit_buff(ctx, "%s%s = %s(textureSize(%s%s))%s;\n", dst,
+ emit_buff(&ctx->glsl_strbufs, "%s%s = %s(textureSize(%s%s))%s;\n", dst,
get_wm_string(twm), get_string(dtypeprefix),
srcs[sampler_index], bias, txq_returns_vec ? writemask : "");
}
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
/* sample queries are pretty much separate */
static void emit_txqs(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+ const struct tgsi_full_instruction *inst,
uint32_t sreg_index,
const char *srcs[4],
const char *dst)
@@ -2311,15 +2340,15 @@ static void emit_txqs(struct dump_ctx *ctx,
if (inst->Texture.Texture != TGSI_TEXTURE_2D_MSAA &&
inst->Texture.Texture != TGSI_TEXTURE_2D_ARRAY_MSAA) {
- set_buf_error(ctx);
+ set_buf_error(&ctx->glsl_strbufs);
return;
}
- emit_buff(ctx, "%s = %s(textureSamples(%s));\n", dst,
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(textureSamples(%s));\n", dst,
get_string(dtypeprefix), srcs[sampler_index]);
}
-static const char *get_tex_inst_ext(struct tgsi_full_instruction *inst)
+static const char *get_tex_inst_ext(const struct tgsi_full_instruction *inst)
{
switch (inst->Instruction.Opcode) {
case TGSI_OPCODE_LODQ:
@@ -2359,12 +2388,12 @@ static const char *get_tex_inst_ext(struct tgsi_full_instruction *inst)
}
}
-static bool fill_offset_buffer(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+static bool fill_offset_buffer(const struct dump_ctx *ctx,
+ const struct tgsi_full_instruction *inst,
char *offbuf)
{
if (inst->TexOffsets[0].File == TGSI_FILE_IMMEDIATE) {
- struct immed *imd = &ctx->imm[inst->TexOffsets[0].Index];
+ const struct immed *imd = &ctx->imm[inst->TexOffsets[0].Index];
switch (inst->Texture.Texture) {
case TGSI_TEXTURE_1D:
case TGSI_TEXTURE_1D_ARRAY:
@@ -2473,10 +2502,11 @@ static bool fill_offset_buffer(struct dump_ctx *ctx,
return true;
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void translate_tex(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
- struct source_info *sinfo,
- struct dest_info *dinfo,
+ const struct tgsi_full_instruction *inst,
+ const struct source_info *sinfo,
+ const struct dest_info *dinfo,
const char *srcs[4],
const char *dst,
const char *writemask)
@@ -2682,12 +2712,12 @@ static void translate_tex(struct dump_ctx *ctx,
if (inst->Texture.NumOffsets == 1) {
if (inst->TexOffsets[0].Index >= (int)ARRAY_SIZE(ctx->imm)) {
vrend_printf( "Immediate exceeded, max is %lu\n", ARRAY_SIZE(ctx->imm));
- set_buf_error(ctx);
+ set_buf_error(&ctx->glsl_strbufs);
return;
}
if (!fill_offset_buffer(ctx, inst, offbuf)) {
- set_buf_error(ctx);
+ set_buf_error(&ctx->glsl_strbufs);
return;
}
@@ -2733,27 +2763,27 @@ static void translate_tex(struct dump_ctx *ctx,
inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY ||
inst->Texture.Texture == TGSI_TEXTURE_RECT)) {
if (inst->Texture.Texture == TGSI_TEXTURE_1D)
- emit_buff(ctx, "%s = %s(%s(texelFetch%s(%s, ivec2(%s(%s%s), 0)%s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, ivec2(%s(%s%s), 0)%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
get_wm_string(twm), bias, offbuf,
dinfo->dst_override_no_wm[0] ? "" : writemask);
else if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY) {
/* the y coordinate must go into the z element and the y must be zero */
- emit_buff(ctx, "%s = %s(%s(texelFetch%s(%s, ivec3(%s(%s%s), 0).xzy%s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, ivec3(%s(%s%s), 0).xzy%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
get_wm_string(twm), bias, offbuf,
dinfo->dst_override_no_wm[0] ? "" : writemask);
} else {
- emit_buff(ctx, "%s = %s(%s(texelFetch%s(%s, %s(%s%s), 0%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, %s(%s%s), 0%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
get_wm_string(twm), offbuf,
dinfo->dst_override_no_wm[0] ? "" : writemask);
}
} else {
- emit_buff(ctx, "%s = %s(%s(texelFetch%s(%s, %s(%s%s)%s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texelFetch%s(%s, %s(%s%s)%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], get_string(txfi), srcs[0],
get_wm_string(twm), bias, offbuf,
@@ -2762,10 +2792,10 @@ static void translate_tex(struct dump_ctx *ctx,
} else if (ctx->cfg->glsl_version < 140 && (ctx->shader_req_bits & SHADER_REQ_SAMPLER_RECT)) {
/* rect is special in GLSL 1.30 */
if (inst->Texture.Texture == TGSI_TEXTURE_RECT)
- emit_buff(ctx, "%s = texture2DRect(%s, %s.xy)%s;\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = texture2DRect(%s, %s.xy)%s;\n",
dst, srcs[sampler_index], srcs[0], writemask);
else if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT)
- emit_buff(ctx, "%s = shadow2DRect(%s, %s.xyz)%s;\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = shadow2DRect(%s, %s.xyz)%s;\n",
dst, srcs[sampler_index], srcs[0], writemask);
} else if (is_shad && inst->Instruction.Opcode != TGSI_OPCODE_TG4) { /* TGSI returns 1.0 in alpha */
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
@@ -2776,21 +2806,21 @@ static void translate_tex(struct dump_ctx *ctx,
inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY)) {
if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D) {
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
- emit_buff(ctx, "%s = %s(%s(vec4(vec4(texture%s(%s, vec4(%s%s.xzw, 0).xwyz %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, vec4(%s%s.xzw, 0).xwyz %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
srcs[0], get_wm_string(twm), offbuf, bias, cname,
src->Register.Index, cname,
src->Register.Index, writemask);
else
- emit_buff(ctx, "%s = %s(%s(vec4(vec4(texture%s(%s, vec3(%s%s.xz, 0).xzy %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, vec3(%s%s.xz, 0).xzy %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
srcs[0], get_wm_string(twm), offbuf, bias, cname,
src->Register.Index, cname,
src->Register.Index, writemask);
} else if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY) {
- emit_buff(ctx, "%s = %s(%s(vec4(vec4(texture%s(%s, vec4(%s%s, 0).xwyz %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, vec4(%s%s, 0).xwyz %s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], srcs[0],
get_wm_string(twm), offbuf, bias, cname,
@@ -2798,7 +2828,7 @@ static void translate_tex(struct dump_ctx *ctx,
src->Register.Index, writemask);
}
} else
- emit_buff(ctx, "%s = %s(%s(vec4(vec4(texture%s(%s, %s%s%s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(vec4(texture%s(%s, %s%s%s%s)) * %sshadmask%d + %sshadadd%d)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], srcs[0],
get_wm_string(twm), offbuf, bias, cname,
@@ -2813,33 +2843,33 @@ static void translate_tex(struct dump_ctx *ctx,
inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY)) {
if (inst->Texture.Texture == TGSI_TEXTURE_1D) {
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
- emit_buff(ctx, "%s = %s(%s(texture%s(%s, vec3(%s.xw, 0).xzy %s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, vec3(%s.xw, 0).xzy %s%s)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
srcs[0], offbuf, bias,
dinfo->dst_override_no_wm[0] ? "" : writemask);
else
- emit_buff(ctx, "%s = %s(%s(texture%s(%s, vec2(%s%s, 0.5) %s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, vec2(%s%s, 0.5) %s%s)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
srcs[0], get_wm_string(twm), offbuf, bias,
dinfo->dst_override_no_wm[0] ? "" : writemask);
} else if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY) {
if (inst->Instruction.Opcode == TGSI_OPCODE_TXP)
- emit_buff(ctx, "%s = %s(%s(texture%s(%s, vec3(%s.x / %s.w, 0, %s.y) %s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, vec3(%s.x / %s.w, 0, %s.y) %s%s)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
srcs[0], srcs[0], srcs[0], offbuf, bias,
dinfo->dst_override_no_wm[0] ? "" : writemask);
else
- emit_buff(ctx, "%s = %s(%s(texture%s(%s, vec3(%s%s, 0).xzy %s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, vec3(%s%s, 0).xzy %s%s)%s));\n",
dst, get_string(dinfo->dstconv),
get_string(dtypeprefix), tex_ext, srcs[sampler_index],
srcs[0], get_wm_string(twm), offbuf, bias,
dinfo->dst_override_no_wm[0] ? "" : writemask);
}
} else {
- emit_buff(ctx, "%s = %s(%s(texture%s(%s, %s%s%s%s)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(texture%s(%s, %s%s%s%s)%s));\n",
dst, get_string(dinfo->dstconv), get_string(dtypeprefix),
tex_ext, srcs[sampler_index], srcs[0], get_wm_string(twm),
offbuf, bias, dinfo->dst_override_no_wm[0] ? "" : writemask);
@@ -2848,7 +2878,7 @@ static void translate_tex(struct dump_ctx *ctx,
}
static void
-create_swizzled_clipdist(struct dump_ctx *ctx,
+create_swizzled_clipdist(const struct dump_ctx *ctx,
struct vrend_strbuf *result,
const struct tgsi_full_src_register *src,
int input_idx,
@@ -2907,7 +2937,7 @@ create_swizzled_clipdist(struct dump_ctx *ctx,
}
static
-void load_clipdist_fs(struct dump_ctx *ctx,
+void load_clipdist_fs(const struct dump_ctx *ctx,
struct vrend_strbuf *result,
const struct tgsi_full_src_register *src,
int input_idx,
@@ -2961,7 +2991,7 @@ static enum vrend_type_qualifier get_coord_prefix(int resource, bool *is_ms, boo
}
}
-static bool is_integer_memory(struct dump_ctx *ctx, enum tgsi_file_type file_type, uint32_t index)
+static bool is_integer_memory(const struct dump_ctx *ctx, enum tgsi_file_type file_type, uint32_t index)
{
switch(file_type) {
case TGSI_FILE_BUFFER:
@@ -2975,36 +3005,37 @@ static bool is_integer_memory(struct dump_ctx *ctx, enum tgsi_file_type file_typ
return false;
}
-static void set_memory_qualifier(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+static void set_memory_qualifier(uint8_t ssbo_memory_qualifier[],
+ uint32_t ssbo_used_mask,
+ const struct tgsi_full_instruction *inst,
uint32_t reg_index, bool indirect)
{
if (inst->Memory.Qualifier == TGSI_MEMORY_COHERENT) {
if (indirect) {
- uint32_t mask = ctx->ssbo_used_mask;
- while (mask)
- ctx->ssbo_memory_qualifier[u_bit_scan(&mask)] = TGSI_MEMORY_COHERENT;
+ while (ssbo_used_mask)
+ ssbo_memory_qualifier[u_bit_scan(&ssbo_used_mask)] = TGSI_MEMORY_COHERENT;
} else
- ctx->ssbo_memory_qualifier[reg_index] = TGSI_MEMORY_COHERENT;
-
+ ssbo_memory_qualifier[reg_index] = TGSI_MEMORY_COHERENT;
}
}
-static void emit_store_mem(struct dump_ctx *ctx, const char *dst, int writemask,
+static void emit_store_mem(struct vrend_glsl_strbufs *glsl_strbufs, const char *dst, int writemask,
const char *srcs[4], const char *conversion)
{
static const char swizzle_char[] = "xyzw";
for (int i = 0; i < 4; ++i) {
if (writemask & (1 << i)) {
- emit_buff(ctx, "%s[(uint(floatBitsToUint(%s)) >> 2) + %du] = %s(%s).%c;\n",
+ emit_buff(glsl_strbufs, "%s[(uint(floatBitsToUint(%s)) >> 2) + %du] = %s(%s).%c;\n",
dst, srcs[0], i, conversion, srcs[1], swizzle_char[i]);
}
}
}
static void
-translate_store(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+translate_store(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ uint8_t ssbo_memory_qualifier[],
+ const struct tgsi_full_instruction *inst,
struct source_info *sinfo,
const char *srcs[4],
const char *dst)
@@ -3033,7 +3064,7 @@ translate_store(struct dump_ctx *ctx,
break;
}
if (!ctx->cfg->use_gles || !dst_reg->Register.Indirect) {
- emit_buff(ctx, "imageStore(%s,%s(%s(%s)),%s%s(%s));\n",
+ emit_buff(glsl_strbufs, "imageStore(%s,%s(%s(%s)),%s%s(%s));\n",
dst, get_string(coord_prefix), conversion, srcs[0],
ms_str, get_string(stypeprefix), srcs[1]);
} else {
@@ -3041,30 +3072,30 @@ translate_store(struct dump_ctx *ctx,
if (image) {
int basearrayidx = image->first;
int array_size = image->array_size;
- emit_buff(ctx, "switch (addr%d + %d) {\n", dst_reg->Indirect.Index,
+ emit_buff(glsl_strbufs, "switch (addr%d + %d) {\n", dst_reg->Indirect.Index,
dst_reg->Register.Index - basearrayidx);
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
for (int i = 0; i < array_size; ++i) {
- emit_buff(ctx, "case %d: imageStore(%simg%d[%d],%s(%s(%s)),%s%s(%s)); break;\n",
+ emit_buff(glsl_strbufs, "case %d: imageStore(%simg%d[%d],%s(%s(%s)),%s%s(%s)); break;\n",
i, cname, basearrayidx, i, get_string(coord_prefix),
conversion, srcs[0], ms_str, get_string(stypeprefix),
srcs[1]);
}
- emit_buff(ctx, "}\n");
+ emit_buff(glsl_strbufs, "}\n");
}
}
} else if (dst_reg->Register.File == TGSI_FILE_BUFFER ||
dst_reg->Register.File == TGSI_FILE_MEMORY) {
enum vrend_type_qualifier dtypeprefix;
- set_memory_qualifier(ctx, inst, dst_reg->Register.Index,
+ set_memory_qualifier(ssbo_memory_qualifier, ctx->ssbo_used_mask, inst, dst_reg->Register.Index,
dst_reg->Register.Indirect);
dtypeprefix = is_integer_memory(ctx, dst_reg->Register.File, dst_reg->Register.Index) ?
FLOAT_BITS_TO_INT : FLOAT_BITS_TO_UINT;
const char *conversion = sinfo->override_no_cast[1] ? "" : get_string(dtypeprefix);
if (!ctx->cfg->use_gles || !dst_reg->Register.Indirect) {
- emit_store_mem(ctx, dst, dst_reg->Register.WriteMask, srcs,
+ emit_store_mem(glsl_strbufs, dst, dst_reg->Register.WriteMask, srcs,
conversion);
} else {
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
@@ -3074,30 +3105,30 @@ translate_store(struct dump_ctx *ctx,
int start, array_count;
u_bit_scan_consecutive_range(&mask, &start, &array_count);
int basearrayidx = lookup_image_array(ctx, dst_reg->Register.Index);
- emit_buff(ctx, "switch (addr%d + %d) {\n", dst_reg->Indirect.Index,
+ emit_buff(glsl_strbufs, "switch (addr%d + %d) {\n", dst_reg->Indirect.Index,
dst_reg->Register.Index - base);
for (int i = 0; i < array_count; ++i) {
char dst_tmp[128];
- emit_buff(ctx, "case %d:\n", i);
+ emit_buff(glsl_strbufs, "case %d:\n", i);
snprintf(dst_tmp, 128, "%simg%d[%d]", cname, basearrayidx, i);
- emit_store_mem(ctx, dst_tmp, dst_reg->Register.WriteMask, srcs,
+ emit_store_mem(glsl_strbufs, dst_tmp, dst_reg->Register.WriteMask, srcs,
conversion);
- emit_buff(ctx, "break;\n");
+ emit_buff(glsl_strbufs, "break;\n");
}
- emit_buf(ctx, "}\n");
+ emit_buf(glsl_strbufs, "}\n");
}
}
}
-static void emit_load_mem(struct dump_ctx *ctx, const char *dst, int writemask,
+static void emit_load_mem(struct vrend_glsl_strbufs *glsl_strbufs, const char *dst, int writemask,
const char *conversion, const char *atomic_op, const char *src0,
const char *atomic_src)
{
static const char swizzle_char[] = "xyzw";
for (int i = 0; i < 4; ++i) {
if (writemask & (1 << i)) {
- emit_buff(ctx, "%s.%c = (%s(%s(%s[ssbo_addr_temp + %du]%s)));\n", dst,
+ emit_buff(glsl_strbufs, "%s.%c = (%s(%s(%s[ssbo_addr_temp + %du]%s)));\n", dst,
swizzle_char[i], conversion, atomic_op, src0, i, atomic_src);
}
}
@@ -3105,8 +3136,11 @@ static void emit_load_mem(struct dump_ctx *ctx, const char *dst, int writemask,
static void
-translate_load(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+translate_load(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ uint8_t ssbo_memory_qualifier[],
+ struct vrend_shader_image images[],
+ const struct tgsi_full_instruction *inst,
struct source_info *sinfo,
struct dest_info *dinfo,
const char *srcs[4],
@@ -3147,10 +3181,10 @@ translate_load(struct dump_ctx *ctx,
(ctx->images[sinfo->sreg_index].decl.Format != PIPE_FORMAT_R32_FLOAT) &&
(ctx->images[sinfo->sreg_index].decl.Format != PIPE_FORMAT_R32_SINT) &&
(ctx->images[sinfo->sreg_index].decl.Format != PIPE_FORMAT_R32_UINT))
- ctx->images[sinfo->sreg_index].decl.Writable = 0;
+ images[sinfo->sreg_index].decl.Writable = 0;
if (!ctx->cfg->use_gles || !inst->Src[0].Register.Indirect) {
- emit_buff(ctx, "%s = %s(imageLoad(%s, %s(%s(%s))%s)%s);\n",
+ emit_buff(glsl_strbufs, "%s = %s(imageLoad(%s, %s(%s(%s))%s)%s);\n",
dst, get_string(dtypeprefix),
srcs[0], get_string(coord_prefix), conversion, srcs[1],
ms_str, wm);
@@ -3160,17 +3194,17 @@ translate_load(struct dump_ctx *ctx,
if (image) {
int basearrayidx = image->first;
int array_size = image->array_size;
- emit_buff(ctx, "switch (addr%d + %d) {\n", inst->Src[0].Indirect.Index, inst->Src[0].Register.Index - basearrayidx);
+ emit_buff(glsl_strbufs, "switch (addr%d + %d) {\n", inst->Src[0].Indirect.Index, inst->Src[0].Register.Index - basearrayidx);
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
for (int i = 0; i < array_size; ++i) {
snprintf(src, 32, "%simg%d[%d]", cname, basearrayidx, i);
- emit_buff(ctx, "case %d: %s = %s(imageLoad(%s, %s(%s(%s))%s)%s);break;\n",
+ emit_buff(glsl_strbufs, "case %d: %s = %s(imageLoad(%s, %s(%s(%s))%s)%s);break;\n",
i, dst, get_string(dtypeprefix),
src, get_string(coord_prefix), conversion, srcs[1],
ms_str, wm);
}
- emit_buff(ctx, "}\n");
+ emit_buff(glsl_strbufs, "}\n");
}
}
} else if (src->Register.File == TGSI_FILE_BUFFER ||
@@ -3178,14 +3212,14 @@ translate_load(struct dump_ctx *ctx,
char mydst[255], atomic_op[9], atomic_src[10];
enum vrend_type_qualifier dtypeprefix;
- set_memory_qualifier(ctx, inst, inst->Src[0].Register.Index, inst->Src[0].Register.Indirect);
+ set_memory_qualifier(ssbo_memory_qualifier, ctx->ssbo_used_mask, inst, inst->Src[0].Register.Index, inst->Src[0].Register.Indirect);
strcpy(mydst, dst);
char *wmp = strchr(mydst, '.');
if (wmp)
wmp[0] = 0;
- emit_buff(ctx, "ssbo_addr_temp = uint(floatBitsToUint(%s)) >> 2;\n", srcs[1]);
+ emit_buff(glsl_strbufs, "ssbo_addr_temp = uint(floatBitsToUint(%s)) >> 2;\n", srcs[1]);
atomic_op[0] = atomic_src[0] = '\0';
if (ctx->ssbo_atomic_mask & (1 << src->Register.Index)) {
@@ -3197,7 +3231,7 @@ translate_load(struct dump_ctx *ctx,
dtypeprefix = (is_integer_memory(ctx, src->Register.File, src->Register.Index)) ? INT_BITS_TO_FLOAT : UINT_BITS_TO_FLOAT;
if (!ctx->cfg->use_gles || !inst->Src[0].Register.Indirect) {
- emit_load_mem(ctx, mydst, inst->Dst[0].Register.WriteMask, get_string(dtypeprefix), atomic_op, srcs[0], atomic_src);
+ emit_load_mem(glsl_strbufs, mydst, inst->Dst[0].Register.WriteMask, get_string(dtypeprefix), atomic_op, srcs[0], atomic_src);
} else {
char src[128] = "";
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
@@ -3208,17 +3242,17 @@ translate_load(struct dump_ctx *ctx,
uint32_t mask = ctx->ssbo_used_mask;
u_bit_scan_consecutive_range(&mask, &start, &array_count);
- emit_buff(ctx, "switch (addr%d + %d) {\n", inst->Src[0].Indirect.Index, inst->Src[0].Register.Index - base);
+ emit_buff(glsl_strbufs, "switch (addr%d + %d) {\n", inst->Src[0].Indirect.Index, inst->Src[0].Register.Index - base);
for (int i = 0; i < array_count; ++i) {
- emit_buff(ctx, "case %d:\n", i);
+ emit_buff(glsl_strbufs, "case %d:\n", i);
snprintf(src, 128,"%sssboarr%s[%d].%sssbocontents%d", cname, atomic_str, i, cname, base);
- emit_load_mem(ctx, mydst, inst->Dst[0].Register.WriteMask, get_string(dtypeprefix), atomic_op, src, atomic_src);
- emit_buff(ctx, " break;\n");
+ emit_load_mem(glsl_strbufs, mydst, inst->Dst[0].Register.WriteMask, get_string(dtypeprefix), atomic_op, src, atomic_src);
+ emit_buff(glsl_strbufs, " break;\n");
}
- emit_buf(ctx, "}\n");
+ emit_buf(glsl_strbufs, "}\n");
}
} else if (src->Register.File == TGSI_FILE_HW_ATOMIC) {
- emit_buff(ctx, "%s = uintBitsToFloat(atomicCounter(%s));\n", dst, srcs[0]);
+ emit_buff(glsl_strbufs, "%s = uintBitsToFloat(atomicCounter(%s));\n", dst, srcs[0]);
}
}
@@ -3265,8 +3299,9 @@ static const char *get_atomic_opname(int tgsi_opcode, bool *is_cas)
return opname;
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void
-translate_resq(struct dump_ctx *ctx, struct tgsi_full_instruction *inst,
+translate_resq(struct dump_ctx *ctx, const struct tgsi_full_instruction *inst,
const char *srcs[4], const char *dst, const char *writemask)
{
const struct tgsi_full_src_register *src = &inst->Src[0];
@@ -3274,7 +3309,7 @@ translate_resq(struct dump_ctx *ctx, struct tgsi_full_instruction *inst,
if (src->Register.File == TGSI_FILE_IMAGE) {
if (inst->Dst[0].Register.WriteMask & 0x8) {
ctx->shader_req_bits |= SHADER_REQ_TXQS | SHADER_REQ_INTS;
- emit_buff(ctx, "%s = %s(imageSamples(%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(imageSamples(%s));\n",
dst, get_string(INT_BITS_TO_FLOAT), srcs[0]);
}
if (inst->Dst[0].Register.WriteMask & 0x7) {
@@ -3284,19 +3319,20 @@ translate_resq(struct dump_ctx *ctx, struct tgsi_full_instruction *inst,
bool skip_emit_writemask = inst->Memory.Texture == TGSI_TEXTURE_BUFFER ||
(!ctx->cfg->use_gles && inst->Memory.Texture == TGSI_TEXTURE_1D);
- emit_buff(ctx, "%s = %s(imageSize(%s)%s%s);\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(imageSize(%s)%s%s);\n",
dst, get_string(INT_BITS_TO_FLOAT), srcs[0],
swizzle_mask, skip_emit_writemask ? "" : writemask);
}
} else if (src->Register.File == TGSI_FILE_BUFFER) {
- emit_buff(ctx, "%s = %s(int(%s.length()) << 2);\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(int(%s.length()) << 2);\n",
dst, get_string(INT_BITS_TO_FLOAT), srcs[0]);
}
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void
translate_atomic(struct dump_ctx *ctx,
- struct tgsi_full_instruction *inst,
+ const struct tgsi_full_instruction *inst,
struct source_info *sinfo,
const char *srcs[4],
char *dst)
@@ -3340,7 +3376,7 @@ translate_atomic(struct dump_ctx *ctx,
opname = get_atomic_opname(inst->Instruction.Opcode, &is_cas);
if (!opname) {
- set_buf_error(ctx);
+ set_buf_error(&ctx->glsl_strbufs);
return;
}
@@ -3357,7 +3393,7 @@ translate_atomic(struct dump_ctx *ctx,
}
if (!ctx->cfg->use_gles || !inst->Src[0].Register.Indirect) {
- emit_buff(ctx, "%s = %s(imageAtomic%s(%s, %s(%s(%s))%s, %s(%s(%s))%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(imageAtomic%s(%s, %s(%s(%s))%s, %s(%s(%s))%s));\n",
dst, get_string(dtypeprefix), opname, srcs[0],
get_string(coord_prefix), conversion, srcs[1], ms_str,
get_string(stypecast), get_string(stypeprefix), srcs[2],
@@ -3368,18 +3404,18 @@ translate_atomic(struct dump_ctx *ctx,
if (image) {
int basearrayidx = image->first;
int array_size = image->array_size;
- emit_buff(ctx, "switch (addr%d + %d) {\n", inst->Src[0].Indirect.Index, inst->Src[0].Register.Index - basearrayidx);
+ emit_buff(&ctx->glsl_strbufs, "switch (addr%d + %d) {\n", inst->Src[0].Indirect.Index, inst->Src[0].Register.Index - basearrayidx);
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
for (int i = 0; i < array_size; ++i) {
snprintf(src, 32, "%simg%d[%d]", cname, basearrayidx, i);
- emit_buff(ctx, "case %d: %s = %s(imageAtomic%s(%s, %s(%s(%s))%s, %s(%s(%s))%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "case %d: %s = %s(imageAtomic%s(%s, %s(%s(%s))%s, %s(%s(%s))%s));\n",
i, dst, get_string(dtypeprefix), opname, src,
get_string(coord_prefix), conversion, srcs[1],
ms_str, get_string(stypecast),
get_string(stypeprefix), srcs[2], cas_str);
}
- emit_buff(ctx, "}\n");
+ emit_buff(&ctx->glsl_strbufs, "}\n");
}
}
ctx->shader_req_bits |= SHADER_REQ_IMAGE_ATOMIC;
@@ -3396,19 +3432,19 @@ translate_atomic(struct dump_ctx *ctx,
stypeprefix = FLOAT_BITS_TO_UINT;
}
- emit_buff(ctx, "%s = %s(atomic%s(%s[int(floatBitsToInt(%s)) >> 2], %s(%s(%s).x)%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(atomic%s(%s[int(floatBitsToInt(%s)) >> 2], %s(%s(%s).x)%s));\n",
dst, get_string(dtypeprefix), opname, srcs[0], srcs[1],
get_string(type), get_string(stypeprefix), srcs[2], cas_str);
}
if(src->Register.File == TGSI_FILE_HW_ATOMIC) {
if (sinfo->imm_value == -1)
- emit_buff(ctx, "%s = %s(atomicCounterDecrement(%s) + 1u);\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(atomicCounterDecrement(%s) + 1u);\n",
dst, get_string(dtypeprefix), srcs[0]);
else if (sinfo->imm_value == 1)
- emit_buff(ctx, "%s = %s(atomicCounterIncrement(%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(atomicCounterIncrement(%s));\n",
dst, get_string(dtypeprefix), srcs[0]);
else
- emit_buff(ctx, "%s = %s(atomicCounter%sARB(%s, floatBitsToUint(%s).x%s));\n",
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(atomicCounter%sARB(%s, floatBitsToUint(%s).x%s));\n",
dst, get_string(dtypeprefix), opname, srcs[0], srcs[2],
cas_str);
}
@@ -3434,7 +3470,7 @@ static const char *reswizzle_dest(const struct vrend_shader_io *io, const struct
return writemask;
}
-static void get_destination_info_generic(struct dump_ctx *ctx,
+static void get_destination_info_generic(const struct dump_ctx *ctx,
const struct tgsi_full_dst_register *dst_reg,
const struct vrend_shader_io *io,
const char *writemask, char dsts[255])
@@ -3470,6 +3506,7 @@ static void get_destination_info_generic(struct dump_ctx *ctx,
}
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static bool
get_destination_info(struct dump_ctx *ctx,
const struct tgsi_full_instruction *inst,
@@ -3597,11 +3634,11 @@ get_destination_info(struct dump_ctx *ctx,
ctx->outputs[j].glsl_name,
ctx->outputs[j].override_no_wm ? "" : writemask);
} else if (ctx->outputs[j].name == TGSI_SEMANTIC_GENERIC) {
- struct vrend_shader_io *io = ctx->generic_output_range.used ? &ctx->generic_output_range.io : &ctx->outputs[j];
+ struct vrend_shader_io *io = ctx->generic_ios.output_range.used ? &ctx->generic_ios.output_range.io : &ctx->outputs[j];
get_destination_info_generic(ctx, dst_reg, io, writemask, dsts[i]);
dinfo->dst_override_no_wm[i] = ctx->outputs[j].override_no_wm;
} else if (ctx->outputs[j].name == TGSI_SEMANTIC_PATCH) {
- struct vrend_shader_io *io = ctx->patch_output_range.used ? &ctx->patch_output_range.io : &ctx->outputs[j];
+ struct vrend_shader_io *io = ctx->patch_ios.output_range.used ? &ctx->patch_ios.output_range.io : &ctx->outputs[j];
char reswizzled[6] = "";
const char *wm = reswizzle_dest(io, dst_reg, reswizzled, writemask);
if (io->last != io->first) {
@@ -3726,7 +3763,7 @@ static const char *shift_swizzles(const struct vrend_shader_io *io, const struct
return swizzle;
}
-static void get_source_info_generic(struct dump_ctx *ctx,
+static void get_source_info_generic(const struct dump_ctx *ctx,
enum io_type iot,
enum vrend_type_qualifier srcstypeprefix,
const char *prefix,
@@ -3817,6 +3854,7 @@ static void get_source_info_patch(enum vrend_type_qualifier srcstypeprefix,
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static bool
get_source_info(struct dump_ctx *ctx,
const struct tgsi_full_instruction *inst,
@@ -3952,10 +3990,10 @@ get_source_info(struct dump_ctx *ctx,
if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE && i == 1) {
strbuf_fmt(src_buf, "floatBitsToInt(%s%s%s%s)", prefix, ctx->inputs[j].glsl_name, arrayname, swizzle);
} else if (ctx->inputs[j].name == TGSI_SEMANTIC_GENERIC) {
- struct vrend_shader_io *io = ctx->generic_input_range.used ? &ctx->generic_input_range.io : &ctx->inputs[j];
+ struct vrend_shader_io *io = ctx->generic_ios.input_range.used ? &ctx->generic_ios.input_range.io : &ctx->inputs[j];
get_source_info_generic(ctx, io_in, srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
} else if (ctx->inputs[j].name == TGSI_SEMANTIC_PATCH) {
- struct vrend_shader_io *io = ctx->patch_input_range.used ? &ctx->patch_input_range.io : &ctx->inputs[j];
+ struct vrend_shader_io *io = ctx->patch_ios.input_range.used ? &ctx->patch_ios.input_range.io : &ctx->inputs[j];
get_source_info_patch(srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
} else if (ctx->inputs[j].name == TGSI_SEMANTIC_POSITION && ctx->prog_type == TGSI_PROCESSOR_VERTEX &&
ctx->inputs[j].first != ctx->inputs[j].last) {
@@ -3996,10 +4034,10 @@ get_source_info(struct dump_ctx *ctx,
strbuf_fmt(src_buf, "clip_dist_temp[%d%s]", ctx->outputs[j].sid, clip_indirect);
}
} else if (ctx->outputs[j].name == TGSI_SEMANTIC_GENERIC) {
- struct vrend_shader_io *io = ctx->generic_output_range.used ? &ctx->generic_output_range.io : &ctx->outputs[j];
+ struct vrend_shader_io *io = ctx->generic_ios.output_range.used ? &ctx->generic_ios.output_range.io : &ctx->outputs[j];
get_source_info_generic(ctx, io_out, srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
} else if (ctx->outputs[j].name == TGSI_SEMANTIC_PATCH) {
- struct vrend_shader_io *io = ctx->patch_output_range.used ? &ctx->patch_output_range.io : &ctx->outputs[j];
+ struct vrend_shader_io *io = ctx->patch_ios.output_range.used ? &ctx->patch_ios.output_range.io : &ctx->outputs[j];
get_source_info_patch(srcstypeprefix, prefix, src, io, arrayname, swizzle, src_buf);
} else {
strbuf_fmt(src_buf, "%s(%s%s%s%s)", get_string(srcstypeprefix), prefix, ctx->outputs[j].glsl_name, arrayname, ctx->outputs[j].is_int ? "" : swizzle);
@@ -4294,7 +4332,7 @@ get_source_info(struct dump_ctx *ctx,
boolean isabsolute = src->Register.Absolute;
strcpy(fp64_src, src_buf->buf);
strbuf_fmt(src_buf, "fp64_src[%d]", i);
- emit_buff(ctx, "%s.x = %spackDouble2x32(uvec2(%s%s))%s;\n", src_buf->buf, isabsolute ? "abs(" : "", fp64_src, swizzle, isabsolute ? ")" : "");
+ emit_buff(&ctx->glsl_strbufs, "%s.x = %spackDouble2x32(uvec2(%s%s))%s;\n", src_buf->buf, isabsolute ? "abs(" : "", fp64_src, swizzle, isabsolute ? ")" : "");
}
}
@@ -4336,54 +4374,54 @@ void rewrite_io_ranged(struct dump_ctx *ctx)
for (uint i = 0; i < ctx->num_inputs; ++i) {
if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH) {
ctx->inputs[i].glsl_predefined_no_emit = true;
- if (ctx->inputs[i].sid < ctx->patch_input_range.io.sid || ctx->patch_input_range.used == false) {
- ctx->patch_input_range.io.first = i;
- ctx->patch_input_range.io.usage_mask = 0xf;
- ctx->patch_input_range.io.name = TGSI_SEMANTIC_PATCH;
- ctx->patch_input_range.io.sid = ctx->inputs[i].sid;
- ctx->patch_input_range.used = true;
+ if (ctx->inputs[i].sid < ctx->patch_ios.input_range.io.sid || ctx->patch_ios.input_range.used == false) {
+ ctx->patch_ios.input_range.io.first = i;
+ ctx->patch_ios.input_range.io.usage_mask = 0xf;
+ ctx->patch_ios.input_range.io.name = TGSI_SEMANTIC_PATCH;
+ ctx->patch_ios.input_range.io.sid = ctx->inputs[i].sid;
+ ctx->patch_ios.input_range.used = true;
if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
}
- if (ctx->inputs[i].sid > ctx->patch_input_range.io.last)
- ctx->patch_input_range.io.last = ctx->inputs[i].sid;
+ if (ctx->inputs[i].sid > ctx->patch_ios.input_range.io.last)
+ ctx->patch_ios.input_range.io.last = ctx->inputs[i].sid;
}
if (ctx->inputs[i].name == TGSI_SEMANTIC_GENERIC) {
ctx->inputs[i].glsl_predefined_no_emit = true;
- if (ctx->inputs[i].sid < ctx->generic_input_range.io.sid || ctx->generic_input_range.used == false) {
- ctx->generic_input_range.io.sid = ctx->inputs[i].sid;
- ctx->generic_input_range.io.first = i;
- ctx->generic_input_range.io.name = TGSI_SEMANTIC_GENERIC;
- ctx->generic_input_range.io.num_components = 4;
- ctx->generic_input_range.used = true;
+ if (ctx->inputs[i].sid < ctx->generic_ios.input_range.io.sid || ctx->generic_ios.input_range.used == false) {
+ ctx->generic_ios.input_range.io.sid = ctx->inputs[i].sid;
+ ctx->generic_ios.input_range.io.first = i;
+ ctx->generic_ios.input_range.io.name = TGSI_SEMANTIC_GENERIC;
+ ctx->generic_ios.input_range.io.num_components = 4;
+ ctx->generic_ios.input_range.used = true;
if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
}
- if (ctx->inputs[i].sid > ctx->generic_input_range.io.last)
- ctx->generic_input_range.io.last = ctx->inputs[i].sid;
+ if (ctx->inputs[i].sid > ctx->generic_ios.input_range.io.last)
+ ctx->generic_ios.input_range.io.last = ctx->inputs[i].sid;
}
if (ctx->key->num_indirect_generic_inputs > 0)
- ctx->generic_input_range.io.last = ctx->generic_input_range.io.sid + ctx->key->num_indirect_generic_inputs - 1;
+ ctx->generic_ios.input_range.io.last = ctx->generic_ios.input_range.io.sid + ctx->key->num_indirect_generic_inputs - 1;
if (ctx->key->num_indirect_patch_inputs > 0)
- ctx->patch_input_range.io.last = ctx->patch_input_range.io.sid + ctx->key->num_indirect_patch_inputs - 1;
+ ctx->patch_ios.input_range.io.last = ctx->patch_ios.input_range.io.sid + ctx->key->num_indirect_patch_inputs - 1;
}
- snprintf(ctx->patch_input_range.io.glsl_name, 64, "%s_p%d",
- get_stage_input_name_prefix(ctx, ctx->prog_type), ctx->patch_input_range.io.sid);
- snprintf(ctx->generic_input_range.io.glsl_name, 64, "%s_g%d",
- get_stage_input_name_prefix(ctx, ctx->prog_type), ctx->generic_input_range.io.sid);
+ snprintf(ctx->patch_ios.input_range.io.glsl_name, 64, "%s_p%d",
+ get_stage_input_name_prefix(ctx, ctx->prog_type), ctx->patch_ios.input_range.io.sid);
+ snprintf(ctx->generic_ios.input_range.io.glsl_name, 64, "%s_g%d",
+ get_stage_input_name_prefix(ctx, ctx->prog_type), ctx->generic_ios.input_range.io.sid);
- ctx->generic_input_range.io.num_components = 4;
- ctx->generic_input_range.io.usage_mask = 0xf;
- ctx->generic_input_range.io.swizzle_offset = 0;
+ ctx->generic_ios.input_range.io.num_components = 4;
+ ctx->generic_ios.input_range.io.usage_mask = 0xf;
+ ctx->generic_ios.input_range.io.swizzle_offset = 0;
- ctx->patch_input_range.io.num_components = 4;
- ctx->patch_input_range.io.usage_mask = 0xf;
- ctx->patch_input_range.io.swizzle_offset = 0;
+ ctx->patch_ios.input_range.io.num_components = 4;
+ ctx->patch_ios.input_range.io.usage_mask = 0xf;
+ ctx->patch_ios.input_range.io.swizzle_offset = 0;
if (prefer_generic_io_block(ctx, io_in))
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
}
if ((ctx->info.indirect_files & (1 << TGSI_FILE_OUTPUT)) ||
@@ -4393,52 +4431,52 @@ void rewrite_io_ranged(struct dump_ctx *ctx)
for (uint i = 0; i < ctx->num_outputs; ++i) {
if (ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) {
ctx->outputs[i].glsl_predefined_no_emit = true;
- if (ctx->outputs[i].sid < ctx->patch_output_range.io.sid || ctx->patch_output_range.used == false) {
- ctx->patch_output_range.io.first = i;
- ctx->patch_output_range.io.name = TGSI_SEMANTIC_PATCH;
- ctx->patch_output_range.io.sid = ctx->outputs[i].sid;
- ctx->patch_output_range.used = true;
+ if (ctx->outputs[i].sid < ctx->patch_ios.output_range.io.sid || ctx->patch_ios.output_range.used == false) {
+ ctx->patch_ios.output_range.io.first = i;
+ ctx->patch_ios.output_range.io.name = TGSI_SEMANTIC_PATCH;
+ ctx->patch_ios.output_range.io.sid = ctx->outputs[i].sid;
+ ctx->patch_ios.output_range.used = true;
if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
}
- if (ctx->outputs[i].sid > ctx->patch_output_range.io.last) {
- ctx->patch_output_range.io.last = ctx->outputs[i].sid;
+ if (ctx->outputs[i].sid > ctx->patch_ios.output_range.io.last) {
+ ctx->patch_ios.output_range.io.last = ctx->outputs[i].sid;
}
}
if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC) {
ctx->outputs[i].glsl_predefined_no_emit = true;
- if (ctx->outputs[i].sid < ctx->generic_output_range.io.sid || ctx->generic_output_range.used == false) {
- ctx->generic_output_range.io.sid = ctx->outputs[i].sid;
- ctx->generic_output_range.io.first = i;
- ctx->generic_output_range.io.name = TGSI_SEMANTIC_GENERIC;
- ctx->generic_output_range.used = true;
- ctx->generic_output_range.io.usage_mask = 0xf;
- ctx->generic_output_range.io.num_components = 4;
+ if (ctx->outputs[i].sid < ctx->generic_ios.output_range.io.sid || ctx->generic_ios.output_range.used == false) {
+ ctx->generic_ios.output_range.io.sid = ctx->outputs[i].sid;
+ ctx->generic_ios.output_range.io.first = i;
+ ctx->generic_ios.output_range.io.name = TGSI_SEMANTIC_GENERIC;
+ ctx->generic_ios.output_range.used = true;
+ ctx->generic_ios.output_range.io.usage_mask = 0xf;
+ ctx->generic_ios.output_range.io.num_components = 4;
if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles)
ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS;
}
- if (ctx->outputs[i].sid > ctx->generic_output_range.io.last) {
- ctx->generic_output_range.io.last = ctx->outputs[i].sid;
+ if (ctx->outputs[i].sid > ctx->generic_ios.output_range.io.last) {
+ ctx->generic_ios.output_range.io.last = ctx->outputs[i].sid;
}
}
}
- snprintf(ctx->patch_output_range.io.glsl_name, 64, "%s_p%d",
- get_stage_output_name_prefix(ctx->prog_type), ctx->patch_output_range.io.sid);
- snprintf(ctx->generic_output_range.io.glsl_name, 64, "%s_g%d",
- get_stage_output_name_prefix(ctx->prog_type), ctx->generic_output_range.io.sid);
+ snprintf(ctx->patch_ios.output_range.io.glsl_name, 64, "%s_p%d",
+ get_stage_output_name_prefix(ctx->prog_type), ctx->patch_ios.output_range.io.sid);
+ snprintf(ctx->generic_ios.output_range.io.glsl_name, 64, "%s_g%d",
+ get_stage_output_name_prefix(ctx->prog_type), ctx->generic_ios.output_range.io.sid);
- ctx->generic_output_range.io.num_components = 4;
- ctx->generic_output_range.io.usage_mask = 0xf;
- ctx->generic_output_range.io.swizzle_offset = 0;
+ ctx->generic_ios.output_range.io.num_components = 4;
+ ctx->generic_ios.output_range.io.usage_mask = 0xf;
+ ctx->generic_ios.output_range.io.swizzle_offset = 0;
- ctx->patch_output_range.io.num_components = 4;
- ctx->patch_output_range.io.usage_mask = 0xf;
- ctx->patch_output_range.io.swizzle_offset = 0;
+ ctx->patch_ios.output_range.io.num_components = 4;
+ ctx->patch_ios.output_range.io.usage_mask = 0xf;
+ ctx->patch_ios.output_range.io.swizzle_offset = 0;
if (prefer_generic_io_block(ctx, io_out))
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
}
}
@@ -4519,13 +4557,14 @@ void rewrite_vs_pos_array(struct dump_ctx *ctx)
ctx->inputs[io_idx].first = range_start;
ctx->inputs[io_idx].last = range_end;
ctx->inputs[io_idx].glsl_predefined_no_emit = false;
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
}
}
static
-void emit_fs_clipdistance_load(struct dump_ctx *ctx)
+void emit_fs_clipdistance_load(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
int i;
@@ -4560,7 +4599,7 @@ void emit_fs_clipdistance_load(struct dump_ctx *ctx)
is_cull = true;
}
const char *clip_cull = is_cull ? "Cull" : "Clip";
- emit_buff(ctx, "clip_dist_temp[%d].%c = %sgl_%sDistance[%d];\n", clipidx, wm, prefix, clip_cull,
+ emit_buff(glsl_strbufs, "clip_dist_temp[%d].%c = %sgl_%sDistance[%d];\n", clipidx, wm, prefix, clip_cull,
is_cull ? i - ctx->key->prev_stage_num_clip_out : i);
}
}
@@ -4571,7 +4610,9 @@ void emit_fs_clipdistance_load(struct dump_ctx *ctx)
* previous shader stage to the according inputs.
*/
-static bool apply_prev_layout(struct dump_ctx *ctx)
+static bool apply_prev_layout(const struct vrend_shader_key *key,
+ struct vrend_shader_io inputs[],
+ uint32_t *num_inputs)
{
bool require_enhanced_layouts = false;
@@ -4579,14 +4620,14 @@ static bool apply_prev_layout(struct dump_ctx *ctx)
* the previous shader that uses a different layout. It may even be that one
* input be the combination of two inputs. */
- for (unsigned i = 0; i < ctx->num_inputs; ++i ) {
+ for (unsigned i = 0; i < *num_inputs; ++i ) {
unsigned i_input = i;
- struct vrend_shader_io *io = &ctx->inputs[i];
+ struct vrend_shader_io *io = &inputs[i];
if (io->name == TGSI_SEMANTIC_GENERIC || io->name == TGSI_SEMANTIC_PATCH) {
- const struct vrend_layout_info *layout = ctx->key->prev_stage_generic_and_patch_outputs_layout;
- for (unsigned generic_index = 0; generic_index < ctx->key->num_prev_generic_and_patch_outputs; ++generic_index, ++layout) {
+ const struct vrend_layout_info *layout = key->prev_stage_generic_and_patch_outputs_layout;
+ for (unsigned generic_index = 0; generic_index < key->num_prev_generic_and_patch_outputs; ++generic_index, ++layout) {
bool already_found_one = false;
@@ -4596,8 +4637,8 @@ static bool apply_prev_layout(struct dump_ctx *ctx)
/* We have already one IO with the same SID and arrays ID, so we need to duplicate it */
if (already_found_one) {
- memmove(io + 1, io, (ctx->num_inputs - i_input) * sizeof(struct vrend_shader_io));
- ctx->num_inputs++;
+ memmove(io + 1, io, (*num_inputs - i_input) * sizeof(struct vrend_shader_io));
+ (*num_inputs)++;
++io;
++i_input;
@@ -4616,7 +4657,7 @@ static bool apply_prev_layout(struct dump_ctx *ctx)
require_enhanced_layouts |= io->swizzle_offset > 0;
if (io->num_components == 1)
io->override_no_wm = true;
- if (i_input < ctx->num_inputs - 1) {
+ if (i_input < *num_inputs - 1) {
already_found_one = (io[1].sid != layout->sid || io[1].array_id != layout->array_id);
}
}
@@ -4693,6 +4734,7 @@ void renumber_io_arrays(unsigned nio, struct vrend_shader_io *io)
}
}
+// TODO Consider exposing non-const ctx-> members as args to make *ctx const
static void handle_io_arrays(struct dump_ctx *ctx)
{
bool require_enhanced_layouts = false;
@@ -4715,7 +4757,7 @@ static void handle_io_arrays(struct dump_ctx *ctx)
if (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY ||
ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ||
ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)
- require_enhanced_layouts |= apply_prev_layout(ctx);
+ require_enhanced_layouts |= apply_prev_layout(ctx->key, ctx->inputs, &ctx->num_inputs);
if (ctx->guest_sent_io_arrays) {
if (ctx->num_inputs > 0)
@@ -4779,11 +4821,11 @@ iter_instruction(struct tgsi_iterate_context *iter,
rewrite_vs_pos_array(ctx);
}
- emit_buf(ctx, "void main(void)\n{\n");
+ emit_buf(&ctx->glsl_strbufs, "void main(void)\n{\n");
if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
- emit_color_select(ctx);
+ emit_color_select(ctx, &ctx->glsl_strbufs);
if (ctx->fs_uses_clipdist_input)
- emit_fs_clipdistance_load(ctx);
+ emit_fs_clipdistance_load(ctx, &ctx->glsl_strbufs);
}
if (ctx->so)
prepare_so_movs(ctx);
@@ -4812,34 +4854,34 @@ iter_instruction(struct tgsi_iterate_context *iter,
switch (inst->Instruction.Opcode) {
case TGSI_OPCODE_SQRT:
case TGSI_OPCODE_DSQRT:
- emit_buff(ctx, "%s = sqrt(vec4(%s))%s;\n", dsts[0], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = sqrt(vec4(%s))%s;\n", dsts[0], srcs[0], writemask);
break;
case TGSI_OPCODE_LRP:
- emit_buff(ctx, "%s = mix(vec4(%s), vec4(%s), vec4(%s))%s;\n", dsts[0], srcs[2], srcs[1], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = mix(vec4(%s), vec4(%s), vec4(%s))%s;\n", dsts[0], srcs[2], srcs[1], srcs[0], writemask);
break;
case TGSI_OPCODE_DP2:
- emit_buff(ctx, "%s = %s(dot(vec2(%s), vec2(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(dot(vec2(%s), vec2(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
break;
case TGSI_OPCODE_DP3:
- emit_buff(ctx, "%s = %s(dot(vec3(%s), vec3(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(dot(vec3(%s), vec3(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
break;
case TGSI_OPCODE_DP4:
- emit_buff(ctx, "%s = %s(dot(vec4(%s), vec4(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(dot(vec4(%s), vec4(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
break;
case TGSI_OPCODE_DPH:
- emit_buff(ctx, "%s = %s(dot(vec4(vec3(%s), 1.0), vec4(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(dot(vec4(vec3(%s), 1.0), vec4(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
break;
case TGSI_OPCODE_MAX:
case TGSI_OPCODE_DMAX:
case TGSI_OPCODE_IMAX:
case TGSI_OPCODE_UMAX:
- emit_buff(ctx, "%s = %s(%s(max(%s, %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(max(%s, %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_MIN:
case TGSI_OPCODE_DMIN:
case TGSI_OPCODE_IMIN:
case TGSI_OPCODE_UMIN:
- emit_buff(ctx, "%s = %s(%s(min(%s, %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(min(%s, %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_ABS:
case TGSI_OPCODE_IABS:
@@ -4847,31 +4889,31 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_op1("abs");
break;
case TGSI_OPCODE_KILL_IF:
- emit_buff(ctx, "if (any(lessThan(%s, vec4(0.0))))\ndiscard;\n", srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "if (any(lessThan(%s, vec4(0.0))))\ndiscard;\n", srcs[0]);
break;
case TGSI_OPCODE_IF:
case TGSI_OPCODE_UIF:
- emit_buff(ctx, "if (any(bvec4(%s))) {\n", srcs[0]);
- indent_buf(ctx);
+ emit_buff(&ctx->glsl_strbufs, "if (bool(%s.x)) {\n", srcs[0]);
+ indent_buf(&ctx->glsl_strbufs);
break;
case TGSI_OPCODE_ELSE:
- outdent_buf(ctx);
- emit_buf(ctx, "} else {\n");
- indent_buf(ctx);
+ outdent_buf(&ctx->glsl_strbufs);
+ emit_buf(&ctx->glsl_strbufs, "} else {\n");
+ indent_buf(&ctx->glsl_strbufs);
break;
case TGSI_OPCODE_ENDIF:
- emit_buf(ctx, "}\n");
- outdent_buf(ctx);
+ emit_buf(&ctx->glsl_strbufs, "}\n");
+ outdent_buf(&ctx->glsl_strbufs);
break;
case TGSI_OPCODE_KILL:
- emit_buff(ctx, "discard;\n");
+ emit_buff(&ctx->glsl_strbufs, "discard;\n");
break;
case TGSI_OPCODE_DST:
- emit_buff(ctx, "%s = vec4(1.0, %s.y * %s.y, %s.z, %s.w);\n", dsts[0],
+ emit_buff(&ctx->glsl_strbufs, "%s = vec4(1.0, %s.y * %s.y, %s.z, %s.w);\n", dsts[0],
srcs[0], srcs[1], srcs[0], srcs[1]);
break;
case TGSI_OPCODE_LIT:
- emit_buff(ctx, "%s = %s(vec4(1.0, max(%s.x, 0.0), step(0.0, %s.x) * pow(max(0.0, %s.y), clamp(%s.w, -128.0, 128.0)), 1.0)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[0], srcs[0], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(vec4(1.0, max(%s.x, 0.0), step(0.0, %s.x) * pow(max(0.0, %s.y), clamp(%s.w, -128.0, 128.0)), 1.0)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[0], srcs[0], srcs[0], writemask);
break;
case TGSI_OPCODE_EX2:
emit_op1("exp2");
@@ -4880,10 +4922,10 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_op1("log2");
break;
case TGSI_OPCODE_EXP:
- emit_buff(ctx, "%s = %s(vec4(pow(2.0, floor(%s.x)), %s.x - floor(%s.x), exp2(%s.x), 1.0)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[0], srcs[0], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(vec4(pow(2.0, floor(%s.x)), %s.x - floor(%s.x), exp2(%s.x), 1.0)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[0], srcs[0], srcs[0], writemask);
break;
case TGSI_OPCODE_LOG:
- emit_buff(ctx, "%s = %s(vec4(floor(log2(%s.x)), %s.x / pow(2.0, floor(log2(%s.x))), log2(%s.x), 1.0)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[0], srcs[0], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(vec4(floor(log2(%s.x)), %s.x / pow(2.0, floor(log2(%s.x))), log2(%s.x), 1.0)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[0], srcs[0], srcs[0], writemask);
break;
case TGSI_OPCODE_COS:
emit_op1("cos");
@@ -4892,7 +4934,7 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_op1("sin");
break;
case TGSI_OPCODE_SCS:
- emit_buff(ctx, "%s = %s(vec4(cos(%s.x), sin(%s.x), 0, 1)%s);\n", dsts[0], get_string(dinfo.dstconv),
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(vec4(cos(%s.x), sin(%s.x), 0, 1)%s);\n", dsts[0], get_string(dinfo.dstconv),
srcs[0], srcs[0], writemask);
break;
case TGSI_OPCODE_DDX:
@@ -4910,10 +4952,10 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_op1("dFdyFine");
break;
case TGSI_OPCODE_RCP:
- emit_buff(ctx, "%s = %s(1.0/(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(1.0/(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_DRCP:
- emit_buff(ctx, "%s = %s(1.0LF/(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(1.0LF/(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_FLR:
emit_op1("floor");
@@ -4945,18 +4987,18 @@ iter_instruction(struct tgsi_iterate_context *iter,
break;
case TGSI_OPCODE_RSQ:
case TGSI_OPCODE_DRSQ:
- emit_buff(ctx, "%s = %s(inversesqrt(%s.x));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(inversesqrt(%s.x));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_FBFETCH:
case TGSI_OPCODE_MOV:
- emit_buff(ctx, "%s = %s(%s(%s%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], sinfo.override_no_wm[0] ? "" : writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(%s%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], sinfo.override_no_wm[0] ? "" : writemask);
break;
case TGSI_OPCODE_ADD:
case TGSI_OPCODE_DADD:
emit_arit_op2("+");
break;
case TGSI_OPCODE_UADD:
- emit_buff(ctx, "%s = %s(%s(ivec4((uvec4(%s) + uvec4(%s))))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(ivec4((uvec4(%s) + uvec4(%s))))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_SUB:
emit_arit_op2("-");
@@ -4970,16 +5012,16 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_arit_op2("/");
break;
case TGSI_OPCODE_UMUL:
- emit_buff(ctx, "%s = %s(%s((uvec4(%s) * uvec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((uvec4(%s) * uvec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_UMOD:
- emit_buff(ctx, "%s = %s(%s((uvec4(%s) %% uvec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((uvec4(%s) %% uvec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_IDIV:
- emit_buff(ctx, "%s = %s(%s((ivec4(%s) / ivec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((ivec4(%s) / ivec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_UDIV:
- emit_buff(ctx, "%s = %s(%s((uvec4(%s) / uvec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((uvec4(%s) / uvec4(%s)))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], writemask);
break;
case TGSI_OPCODE_ISHR:
case TGSI_OPCODE_USHR:
@@ -4989,11 +5031,11 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_arit_op2("<<");
break;
case TGSI_OPCODE_MAD:
- emit_buff(ctx, "%s = %s((%s * %s + %s)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1], srcs[2], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s((%s * %s + %s)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1], srcs[2], writemask);
break;
case TGSI_OPCODE_UMAD:
case TGSI_OPCODE_DMAD:
- emit_buff(ctx, "%s = %s(%s((%s * %s + %s)%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], srcs[2], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((%s * %s + %s)%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], srcs[2], writemask);
break;
case TGSI_OPCODE_OR:
emit_arit_op2("|");
@@ -5027,43 +5069,43 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_txqs(ctx, inst, sinfo.sreg_index, srcs, dsts[0]);
break;
case TGSI_OPCODE_I2F:
- emit_buff(ctx, "%s = %s(ivec4(%s)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(ivec4(%s)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], writemask);
break;
case TGSI_OPCODE_I2D:
- emit_buff(ctx, "%s = %s(ivec4(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(ivec4(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_D2F:
- emit_buff(ctx, "%s = %s(%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_U2F:
- emit_buff(ctx, "%s = %s(uvec4(%s)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(uvec4(%s)%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0], writemask);
break;
case TGSI_OPCODE_U2D:
- emit_buff(ctx, "%s = %s(uvec4(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(uvec4(%s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_F2I:
- emit_buff(ctx, "%s = %s(%s(ivec4(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(ivec4(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], writemask);
break;
case TGSI_OPCODE_D2I:
- emit_buff(ctx, "%s = %s(%s(%s(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), get_string(dinfo.idstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(%s(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), get_string(dinfo.idstconv), srcs[0]);
break;
case TGSI_OPCODE_F2U:
- emit_buff(ctx, "%s = %s(%s(uvec4(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(uvec4(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], writemask);
break;
case TGSI_OPCODE_D2U:
- emit_buff(ctx, "%s = %s(%s(%s(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), get_string(dinfo.udstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(%s(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), get_string(dinfo.udstconv), srcs[0]);
break;
case TGSI_OPCODE_F2D:
- emit_buff(ctx, "%s = %s(%s(%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
break;
case TGSI_OPCODE_NOT:
- emit_buff(ctx, "%s = %s(uintBitsToFloat(~(uvec4(%s))));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(uintBitsToFloat(~(uvec4(%s))));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_INEG:
- emit_buff(ctx, "%s = %s(intBitsToFloat(-(ivec4(%s))));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(intBitsToFloat(-(ivec4(%s))));\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_DNEG:
- emit_buff(ctx, "%s = %s(-%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(-%s);\n", dsts[0], get_string(dinfo.dstconv), srcs[0]);
break;
case TGSI_OPCODE_SEQ:
emit_compare("equal");
@@ -5108,96 +5150,96 @@ iter_instruction(struct tgsi_iterate_context *iter,
emit_ucompare("greaterThanEqual");
break;
case TGSI_OPCODE_POW:
- emit_buff(ctx, "%s = %s(pow(%s, %s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(pow(%s, %s));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
break;
case TGSI_OPCODE_CMP:
- emit_buff(ctx, "%s = mix(%s, %s, greaterThanEqual(%s, vec4(0.0)))%s;\n", dsts[0], srcs[1], srcs[2], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = mix(%s, %s, greaterThanEqual(%s, vec4(0.0)))%s;\n", dsts[0], srcs[1], srcs[2], srcs[0], writemask);
break;
case TGSI_OPCODE_UCMP:
- emit_buff(ctx, "%s = mix(%s, %s, notEqual(floatBitsToUint(%s), uvec4(0.0)))%s;\n", dsts[0], srcs[2], srcs[1], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = mix(%s, %s, notEqual(floatBitsToUint(%s), uvec4(0.0)))%s;\n", dsts[0], srcs[2], srcs[1], srcs[0], writemask);
break;
case TGSI_OPCODE_END:
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) {
- handle_vertex_proc_exit(ctx);
+ handle_vertex_proc_exit(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
} else if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) {
- emit_clip_dist_movs(ctx);
+ emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
} else if (iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) {
if (ctx->so && !ctx->key->gs_present)
- emit_so_movs(ctx);
- emit_clip_dist_movs(ctx);
+ emit_so_movs(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
+ emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
if (!ctx->key->gs_present) {
- emit_prescale(ctx);
+ emit_prescale(&ctx->glsl_strbufs);
}
} else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
- handle_fragment_proc_exit(ctx);
+ handle_fragment_proc_exit(ctx, &ctx->glsl_strbufs);
}
- emit_buf(ctx, "}\n");
+ emit_buf(&ctx->glsl_strbufs, "}\n");
break;
case TGSI_OPCODE_RET:
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) {
- handle_vertex_proc_exit(ctx);
+ handle_vertex_proc_exit(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
} else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
- handle_fragment_proc_exit(ctx);
+ handle_fragment_proc_exit(ctx, &ctx->glsl_strbufs);
}
- emit_buf(ctx, "return;\n");
+ emit_buf(&ctx->glsl_strbufs, "return;\n");
break;
case TGSI_OPCODE_ARL:
- emit_buff(ctx, "%s = int(floor(%s)%s);\n", dsts[0], srcs[0], writemask);
+ emit_buff(&ctx->glsl_strbufs, "%s = int(floor(%s)%s);\n", dsts[0], srcs[0], writemask);
break;
case TGSI_OPCODE_UARL:
- emit_buff(ctx, "%s = int(%s);\n", dsts[0], srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = int(%s);\n", dsts[0], srcs[0]);
break;
case TGSI_OPCODE_XPD:
- emit_buff(ctx, "%s = %s(cross(vec3(%s), vec3(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(cross(vec3(%s), vec3(%s)));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1]);
break;
case TGSI_OPCODE_BGNLOOP:
- emit_buf(ctx, "do {\n");
- indent_buf(ctx);
+ emit_buf(&ctx->glsl_strbufs, "do {\n");
+ indent_buf(&ctx->glsl_strbufs);
break;
case TGSI_OPCODE_ENDLOOP:
- outdent_buf(ctx);
- emit_buf(ctx, "} while(true);\n");
+ outdent_buf(&ctx->glsl_strbufs);
+ emit_buf(&ctx->glsl_strbufs, "} while(true);\n");
break;
case TGSI_OPCODE_BRK:
- emit_buf(ctx, "break;\n");
+ emit_buf(&ctx->glsl_strbufs, "break;\n");
break;
case TGSI_OPCODE_EMIT: {
struct immed *imd = &ctx->imm[(inst->Src[0].Register.Index)];
if (ctx->so && ctx->key->gs_present)
- emit_so_movs(ctx);
- emit_clip_dist_movs(ctx);
- emit_prescale(ctx);
+ emit_so_movs(ctx, &ctx->glsl_strbufs, &ctx->has_clipvertex_so);
+ emit_clip_dist_movs(ctx, &ctx->glsl_strbufs);
+ emit_prescale(&ctx->glsl_strbufs);
if (imd->val[inst->Src[0].Register.SwizzleX].ui > 0) {
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
- emit_buff(ctx, "EmitStreamVertex(%d);\n", imd->val[inst->Src[0].Register.SwizzleX].ui);
+ emit_buff(&ctx->glsl_strbufs, "EmitStreamVertex(%d);\n", imd->val[inst->Src[0].Register.SwizzleX].ui);
} else
- emit_buf(ctx, "EmitVertex();\n");
+ emit_buf(&ctx->glsl_strbufs, "EmitVertex();\n");
break;
}
case TGSI_OPCODE_ENDPRIM: {
struct immed *imd = &ctx->imm[(inst->Src[0].Register.Index)];
if (imd->val[inst->Src[0].Register.SwizzleX].ui > 0) {
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
- emit_buff(ctx, "EndStreamPrimitive(%d);\n", imd->val[inst->Src[0].Register.SwizzleX].ui);
+ emit_buff(&ctx->glsl_strbufs, "EndStreamPrimitive(%d);\n", imd->val[inst->Src[0].Register.SwizzleX].ui);
} else
- emit_buf(ctx, "EndPrimitive();\n");
+ emit_buf(&ctx->glsl_strbufs, "EndPrimitive();\n");
break;
}
case TGSI_OPCODE_INTERP_CENTROID:
- emit_buff(ctx, "%s = %s(%s(vec4(interpolateAtCentroid(%s)%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], src_swizzle0);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(interpolateAtCentroid(%s)%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], src_swizzle0);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_INTERP_SAMPLE:
- emit_buff(ctx, "%s = %s(%s(vec4(interpolateAtSample(%s, %s.x)%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], src_swizzle0);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(interpolateAtSample(%s, %s.x)%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], src_swizzle0);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_INTERP_OFFSET:
- emit_buff(ctx, "%s = %s(%s(vec4(interpolateAtOffset(%s, %s.xy)%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], src_swizzle0);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(vec4(interpolateAtOffset(%s, %s.xy)%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], src_swizzle0);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_UMUL_HI:
- emit_buff(ctx, "umulExtended(%s, %s, umul_temp, mul_utemp);\n", srcs[0], srcs[1]);
- emit_buff(ctx, "%s = %s(%s(umul_temp%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), writemask);
+ emit_buff(&ctx->glsl_strbufs, "umulExtended(%s, %s, umul_temp, mul_utemp);\n", srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(umul_temp%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), writemask);
if (!ctx->cfg->use_gles) {
if (ctx->cfg->has_gpu_shader5)
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
@@ -5207,8 +5249,8 @@ iter_instruction(struct tgsi_iterate_context *iter,
ctx->write_mul_utemp = true;
break;
case TGSI_OPCODE_IMUL_HI:
- emit_buff(ctx, "imulExtended(%s, %s, imul_temp, mul_itemp);\n", srcs[0], srcs[1]);
- emit_buff(ctx, "%s = %s(%s(imul_temp%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), writemask);
+ emit_buff(&ctx->glsl_strbufs, "imulExtended(%s, %s, imul_temp, mul_itemp);\n", srcs[0], srcs[1]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(imul_temp%s));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), writemask);
if (!ctx->cfg->use_gles) {
if (ctx->cfg->has_gpu_shader5)
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
@@ -5219,36 +5261,36 @@ iter_instruction(struct tgsi_iterate_context *iter,
break;
case TGSI_OPCODE_IBFE:
- emit_buff(ctx, "%s = %s(%s(bitfieldExtract(%s, int(%s.x), int(%s.x))));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], srcs[2]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(bitfieldExtract(%s, int(%s.x), int(%s.x))));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], srcs[2]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_UBFE:
- emit_buff(ctx, "%s = %s(%s(bitfieldExtract(%s, int(%s.x), int(%s.x))));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], srcs[2]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(bitfieldExtract(%s, int(%s.x), int(%s.x))));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], srcs[1], srcs[2]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_BFI:
- emit_buff(ctx, "%s = %s(uintBitsToFloat(bitfieldInsert(%s, %s, int(%s), int(%s))));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1], srcs[2], srcs[3]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(uintBitsToFloat(bitfieldInsert(%s, %s, int(%s), int(%s))));\n", dsts[0], get_string(dinfo.dstconv), srcs[0], srcs[1], srcs[2], srcs[3]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_BREV:
- emit_buff(ctx, "%s = %s(%s(bitfieldReverse(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(bitfieldReverse(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_POPC:
- emit_buff(ctx, "%s = %s(%s(bitCount(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(bitCount(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_LSB:
- emit_buff(ctx, "%s = %s(%s(findLSB(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(findLSB(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_IMSB:
case TGSI_OPCODE_UMSB:
- emit_buff(ctx, "%s = %s(%s(findMSB(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(findMSB(%s)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0]);
ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5;
break;
case TGSI_OPCODE_BARRIER:
- emit_buf(ctx, "barrier();\n");
+ emit_buf(&ctx->glsl_strbufs, "barrier();\n");
break;
case TGSI_OPCODE_MEMBAR: {
struct immed *imd = &ctx->imm[(inst->Src[0].Register.Index)];
@@ -5259,23 +5301,23 @@ iter_instruction(struct tgsi_iterate_context *iter,
TGSI_MEMBAR_SHARED);
if (val & TGSI_MEMBAR_THREAD_GROUP) {
- emit_buf(ctx, "groupMemoryBarrier();\n");
+ emit_buf(&ctx->glsl_strbufs, "groupMemoryBarrier();\n");
} else {
if ((val & all_val) == all_val) {
- emit_buf(ctx, "memoryBarrier();\n");
+ emit_buf(&ctx->glsl_strbufs, "memoryBarrier();\n");
ctx->shader_req_bits |= SHADER_REQ_IMAGE_LOAD_STORE;
} else {
if (val & TGSI_MEMBAR_SHADER_BUFFER) {
- emit_buf(ctx, "memoryBarrierBuffer();\n");
+ emit_buf(&ctx->glsl_strbufs, "memoryBarrierBuffer();\n");
}
if (val & TGSI_MEMBAR_ATOMIC_BUFFER) {
- emit_buf(ctx, "memoryBarrierAtomic();\n");
+ emit_buf(&ctx->glsl_strbufs, "memoryBarrierAtomic();\n");
}
if (val & TGSI_MEMBAR_SHADER_IMAGE) {
- emit_buf(ctx, "memoryBarrierImage();\n");
+ emit_buf(&ctx->glsl_strbufs, "memoryBarrierImage();\n");
}
if (val & TGSI_MEMBAR_SHARED) {
- emit_buf(ctx, "memoryBarrierShared();\n");
+ emit_buf(&ctx->glsl_strbufs, "memoryBarrierShared();\n");
}
}
}
@@ -5287,7 +5329,8 @@ iter_instruction(struct tgsi_iterate_context *iter,
return false;
srcs[1] = ctx->src_bufs[1].buf;
}
- translate_store(ctx, inst, &sinfo, srcs, dsts[0]);
+ translate_store(ctx, &ctx->glsl_strbufs, ctx->ssbo_memory_qualifier,
+ inst, &sinfo, srcs, dsts[0]);
break;
case TGSI_OPCODE_LOAD:
if (ctx->cfg->use_gles) {
@@ -5295,7 +5338,8 @@ iter_instruction(struct tgsi_iterate_context *iter,
return false;
srcs[1] = ctx->src_bufs[1].buf;
}
- translate_load(ctx, inst, &sinfo, &dinfo, srcs, dsts[0], writemask);
+ translate_load(ctx, &ctx->glsl_strbufs, ctx->ssbo_memory_qualifier, ctx->images,
+ inst, &sinfo, &dinfo, srcs, dsts[0], writemask);
break;
case TGSI_OPCODE_ATOMUADD:
case TGSI_OPCODE_ATOMXCHG:
@@ -5319,7 +5363,7 @@ iter_instruction(struct tgsi_iterate_context *iter,
break;
case TGSI_OPCODE_CLOCK:
ctx->shader_req_bits |= SHADER_REQ_SHADER_CLOCK;
- emit_buff(ctx, "%s = uintBitsToFloat(clock2x32ARB());\n", dsts[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = uintBitsToFloat(clock2x32ARB());\n", dsts[0]);
break;
default:
vrend_printf("failed to convert opcode %d\n", inst->Instruction.Opcode);
@@ -5329,14 +5373,14 @@ iter_instruction(struct tgsi_iterate_context *iter,
for (uint32_t i = 0; i < 1; i++) {
enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
if (dtype == TGSI_TYPE_DOUBLE) {
- emit_buff(ctx, "%s = uintBitsToFloat(unpackDouble2x32(%s));\n", fp64_dsts[0], dsts[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = uintBitsToFloat(unpackDouble2x32(%s));\n", fp64_dsts[0], dsts[0]);
}
}
if (inst->Instruction.Saturate) {
- emit_buff(ctx, "%s = clamp(%s, 0.0, 1.0);\n", dsts[0], dsts[0]);
+ emit_buff(&ctx->glsl_strbufs, "%s = clamp(%s, 0.0, 1.0);\n", dsts[0], dsts[0]);
}
- if (strbuf_get_error(&ctx->glsl_main))
+ if (strbuf_get_error(&ctx->glsl_strbufs.glsl_main))
return false;
return true;
}
@@ -5351,143 +5395,143 @@ prolog(struct tgsi_iterate_context *iter)
if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX &&
ctx->key->gs_present)
- require_glsl_ver(ctx, 150);
+ ctx->glsl_ver_required = require_glsl_ver(ctx, 150);
return true;
}
-static void emit_ext(struct dump_ctx *ctx, const char *name,
+static void emit_ext(struct vrend_glsl_strbufs *glsl_strbufs, const char *name,
const char *verb)
{
- emit_ver_extf(ctx, "#extension GL_%s : %s\n", name, verb);
+ emit_ver_extf(glsl_strbufs, "#extension GL_%s : %s\n", name, verb);
}
-static void emit_header(struct dump_ctx *ctx)
+static void emit_header(const struct dump_ctx *ctx, struct vrend_glsl_strbufs *glsl_strbufs)
{
if (ctx->cfg->use_gles) {
- emit_ver_extf(ctx, "#version %d es\n", ctx->cfg->glsl_version);
+ emit_ver_extf(glsl_strbufs, "#version %d es\n", ctx->cfg->glsl_version);
if ((ctx->shader_req_bits & SHADER_REQ_CLIP_DISTANCE)||
(ctx->num_clip_dist == 0 && ctx->key->clip_plane_enable)) {
- emit_ext(ctx, "EXT_clip_cull_distance", "require");
+ emit_ext(glsl_strbufs, "EXT_clip_cull_distance", "require");
}
if (ctx->shader_req_bits & SHADER_REQ_SAMPLER_MS)
- emit_ext(ctx, "OES_texture_storage_multisample_2d_array", "require");
+ emit_ext(glsl_strbufs, "OES_texture_storage_multisample_2d_array", "require");
if (ctx->shader_req_bits & SHADER_REQ_CONSERVATIVE_DEPTH)
- emit_ext(ctx, "EXT_conservative_depth", "require");
+ emit_ext(glsl_strbufs, "EXT_conservative_depth", "require");
if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT) {
if (ctx->shader_req_bits & SHADER_REQ_FBFETCH)
- emit_ext(ctx, "EXT_shader_framebuffer_fetch", "require");
+ emit_ext(glsl_strbufs, "EXT_shader_framebuffer_fetch", "require");
if (ctx->shader_req_bits & SHADER_REQ_BLEND_EQUATION_ADVANCED)
- emit_ext(ctx, "KHR_blend_equation_advanced", "require");
+ emit_ext(glsl_strbufs, "KHR_blend_equation_advanced", "require");
}
if (ctx->shader_req_bits & SHADER_REQ_VIEWPORT_IDX)
- emit_ext(ctx, "OES_viewport_array", "require");
+ emit_ext(glsl_strbufs, "OES_viewport_array", "require");
if (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY) {
- emit_ext(ctx, "EXT_geometry_shader", "require");
+ emit_ext(glsl_strbufs, "EXT_geometry_shader", "require");
if (ctx->shader_req_bits & SHADER_REQ_PSIZE)
- emit_ext(ctx, "OES_geometry_point_size", "enable");
+ emit_ext(glsl_strbufs, "OES_geometry_point_size", "enable");
}
if (ctx->shader_req_bits & SHADER_REQ_NV_IMAGE_FORMATS)
- emit_ext(ctx, "NV_image_formats", "require");
+ emit_ext(glsl_strbufs, "NV_image_formats", "require");
if ((ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ||
ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)) {
if (ctx->cfg->glsl_version < 320)
- emit_ext(ctx, "OES_tessellation_shader", "require");
- emit_ext(ctx, "OES_tessellation_point_size", "enable");
+ emit_ext(glsl_strbufs, "OES_tessellation_shader", "require");
+ emit_ext(glsl_strbufs, "OES_tessellation_point_size", "enable");
}
if (ctx->cfg->glsl_version < 320) {
if (ctx->shader_req_bits & SHADER_REQ_SAMPLER_BUF)
- emit_ext(ctx, "EXT_texture_buffer", "require");
+ emit_ext(glsl_strbufs, "EXT_texture_buffer", "require");
if (prefer_generic_io_block(ctx, io_in) || prefer_generic_io_block(ctx, io_out)) {
- emit_ext(ctx, "OES_shader_io_blocks", "require");
+ emit_ext(glsl_strbufs, "OES_shader_io_blocks", "require");
}
if (ctx->shader_req_bits & SHADER_REQ_SAMPLE_SHADING)
- emit_ext(ctx, "OES_sample_variables", "require");
+ emit_ext(glsl_strbufs, "OES_sample_variables", "require");
if (ctx->shader_req_bits & SHADER_REQ_GPU_SHADER5) {
- emit_ext(ctx, "OES_gpu_shader5", "require");
- emit_ext(ctx, "OES_shader_multisample_interpolation",
+ emit_ext(glsl_strbufs, "OES_gpu_shader5", "require");
+ emit_ext(glsl_strbufs, "OES_shader_multisample_interpolation",
"require");
}
if (ctx->shader_req_bits & SHADER_REQ_CUBE_ARRAY)
- emit_ext(ctx, "OES_texture_cube_map_array", "require");
+ emit_ext(glsl_strbufs, "OES_texture_cube_map_array", "require");
if (ctx->shader_req_bits & SHADER_REQ_LAYER)
- emit_ext(ctx, "EXT_geometry_shader", "require");
+ emit_ext(glsl_strbufs, "EXT_geometry_shader", "require");
if (ctx->shader_req_bits & SHADER_REQ_IMAGE_ATOMIC)
- emit_ext(ctx, "OES_shader_image_atomic", "require");
+ emit_ext(glsl_strbufs, "OES_shader_image_atomic", "require");
if (ctx->shader_req_bits & SHADER_REQ_GEOMETRY_SHADER)
- emit_ext(ctx, "EXT_geometry_shader", "require");
+ emit_ext(glsl_strbufs, "EXT_geometry_shader", "require");
}
if (logiop_require_inout(ctx->key)) {
if (ctx->key->fs_logicop_emulate_coherent)
- emit_ext(ctx, "EXT_shader_framebuffer_fetch", "require");
+ emit_ext(glsl_strbufs, "EXT_shader_framebuffer_fetch", "require");
else
- emit_ext(ctx, "EXT_shader_framebuffer_fetch_non_coherent", "require");
+ emit_ext(glsl_strbufs, "EXT_shader_framebuffer_fetch_non_coherent", "require");
}
if (ctx->shader_req_bits & SHADER_REQ_LODQ)
- emit_ext(ctx, "EXT_texture_query_lod", "require");
+ emit_ext(glsl_strbufs, "EXT_texture_query_lod", "require");
- emit_hdr(ctx, "precision highp float;\n");
- emit_hdr(ctx, "precision highp int;\n");
+ emit_hdr(glsl_strbufs, "precision highp float;\n");
+ emit_hdr(glsl_strbufs, "precision highp int;\n");
} else {
if (ctx->prog_type == TGSI_PROCESSOR_COMPUTE) {
- emit_ver_ext(ctx, "#version 330\n");
- emit_ext(ctx, "ARB_compute_shader", "require");
+ emit_ver_ext(glsl_strbufs, "#version 330\n");
+ emit_ext(glsl_strbufs, "ARB_compute_shader", "require");
} else {
if (ctx->glsl_ver_required > 150)
- emit_ver_extf(ctx, "#version %d\n", ctx->glsl_ver_required);
+ emit_ver_extf(glsl_strbufs, "#version %d\n", ctx->glsl_ver_required);
else if (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY ||
ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL ||
ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ||
ctx->glsl_ver_required == 150)
- emit_ver_ext(ctx, "#version 150\n");
+ emit_ver_ext(glsl_strbufs, "#version 150\n");
else if (ctx->glsl_ver_required == 140)
- emit_ver_ext(ctx, "#version 140\n");
+ emit_ver_ext(glsl_strbufs, "#version 140\n");
else
- emit_ver_ext(ctx, "#version 130\n");
+ emit_ver_ext(glsl_strbufs, "#version 130\n");
}
if (ctx->shader_req_bits & SHADER_REQ_ENHANCED_LAYOUTS)
- emit_ext(ctx, "ARB_enhanced_layouts", "require");
+ emit_ext(glsl_strbufs, "ARB_enhanced_layouts", "require");
if (ctx->shader_req_bits & SHADER_REQ_SEPERATE_SHADER_OBJECTS)
- emit_ext(ctx, "ARB_separate_shader_objects", "require");
+ emit_ext(glsl_strbufs, "ARB_separate_shader_objects", "require");
if (ctx->shader_req_bits & SHADER_REQ_ARRAYS_OF_ARRAYS)
- emit_ext(ctx, "ARB_arrays_of_arrays", "require");
+ emit_ext(glsl_strbufs, "ARB_arrays_of_arrays", "require");
if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ||
ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)
- emit_ext(ctx, "ARB_tessellation_shader", "require");
+ emit_ext(glsl_strbufs, "ARB_tessellation_shader", "require");
if (ctx->prog_type == TGSI_PROCESSOR_VERTEX && ctx->cfg->use_explicit_locations)
- emit_ext(ctx, "ARB_explicit_attrib_location", "require");
+ emit_ext(glsl_strbufs, "ARB_explicit_attrib_location", "require");
if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT && fs_emit_layout(ctx))
- emit_ext(ctx, "ARB_fragment_coord_conventions", "require");
+ emit_ext(glsl_strbufs, "ARB_fragment_coord_conventions", "require");
if (ctx->ubo_used_mask)
- emit_ext(ctx, "ARB_uniform_buffer_object", "require");
+ emit_ext(glsl_strbufs, "ARB_uniform_buffer_object", "require");
if (ctx->num_cull_dist_prop || ctx->key->prev_stage_num_cull_out)
- emit_ext(ctx, "ARB_cull_distance", "require");
+ emit_ext(glsl_strbufs, "ARB_cull_distance", "require");
if (ctx->ssbo_used_mask)
- emit_ext(ctx, "ARB_shader_storage_buffer_object", "require");
+ emit_ext(glsl_strbufs, "ARB_shader_storage_buffer_object", "require");
if (ctx->num_abo) {
- emit_ext(ctx, "ARB_shader_atomic_counters", "require");
- emit_ext(ctx, "ARB_shader_atomic_counter_ops", "require");
+ emit_ext(glsl_strbufs, "ARB_shader_atomic_counters", "require");
+ emit_ext(glsl_strbufs, "ARB_shader_atomic_counter_ops", "require");
}
for (uint32_t i = 0; i < ARRAY_SIZE(shader_req_table); i++) {
@@ -5495,7 +5539,7 @@ static void emit_header(struct dump_ctx *ctx)
continue;
if (ctx->shader_req_bits & shader_req_table[i].key) {
- emit_ext(ctx, shader_req_table[i].string, "require");
+ emit_ext(glsl_strbufs, shader_req_table[i].string, "require");
}
}
}
@@ -5587,7 +5631,9 @@ static const char *get_aux_string(unsigned location)
}
}
-static void emit_sampler_decl(struct dump_ctx *ctx,
+static void emit_sampler_decl(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ uint32_t *shadow_samp_mask,
uint32_t i, uint32_t range,
const struct vrend_shader_sampler *sampler)
{
@@ -5604,14 +5650,14 @@ static void emit_sampler_decl(struct dump_ctx *ctx,
is_shad = samplertype_is_shadow(sampler->tgsi_sampler_type);
if (range)
- emit_hdrf(ctx, "uniform %s %csampler%s %ssamp%d[%d];\n", precision, ptc, stc, sname, i, range);
+ emit_hdrf(glsl_strbufs, "uniform %s %csampler%s %ssamp%d[%d];\n", precision, ptc, stc, sname, i, range);
else
- emit_hdrf(ctx, "uniform %s %csampler%s %ssamp%d;\n", precision, ptc, stc, sname, i);
+ emit_hdrf(glsl_strbufs, "uniform %s %csampler%s %ssamp%d;\n", precision, ptc, stc, sname, i);
if (is_shad) {
- emit_hdrf(ctx, "uniform %s vec4 %sshadmask%d;\n", precision, sname, i);
- emit_hdrf(ctx, "uniform %s vec4 %sshadadd%d;\n", precision, sname, i);
- ctx->shadow_samp_mask |= (1 << i);
+ emit_hdrf(glsl_strbufs, "uniform %s vec4 %sshadmask%d;\n", precision, sname, i);
+ emit_hdrf(glsl_strbufs, "uniform %s vec4 %sshadadd%d;\n", precision, sname, i);
+ *shadow_samp_mask |= (1 << i);
}
}
@@ -5745,7 +5791,8 @@ const char *get_internalformat_string(int virgl_format, enum tgsi_return_type *s
}
}
-static void emit_image_decl(struct dump_ctx *ctx,
+static void emit_image_decl(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
uint32_t i, uint32_t range,
const struct vrend_shader_image *image)
{
@@ -5770,69 +5817,72 @@ static void emit_image_decl(struct dump_ctx *ctx,
access = "writeonly ";
if (ctx->cfg->use_gles) { /* TODO: enable on OpenGL 4.2 and up also */
- emit_hdrf(ctx, "layout(binding=%d%s%s) ",
+ emit_hdrf(glsl_strbufs, "layout(binding=%d%s%s) ",
i, formatstr[0] != '\0' ? ", " : ", rgba32f", formatstr);
} else if (formatstr[0] != '\0') {
- emit_hdrf(ctx, "layout(%s) ", formatstr);
+ emit_hdrf(glsl_strbufs, "layout(%s) ", formatstr);
}
if (range)
- emit_hdrf(ctx, "%s%suniform %s%cimage%s %simg%d[%d];\n",
+ emit_hdrf(glsl_strbufs, "%s%suniform %s%cimage%s %simg%d[%d];\n",
access, volatile_str, precision, ptc, stc, sname, i, range);
else
- emit_hdrf(ctx, "%s%suniform %s%cimage%s %simg%d;\n",
+ emit_hdrf(glsl_strbufs, "%s%suniform %s%cimage%s %simg%d;\n",
access, volatile_str, precision, ptc, stc, sname, i);
}
-static void emit_ios_common(struct dump_ctx *ctx)
+static int emit_ios_common(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ uint32_t *shadow_samp_mask)
{
uint i;
const char *sname = tgsi_proc_to_prefix(ctx->prog_type);
+ int glsl_ver_required = ctx->glsl_ver_required;
for (i = 0; i < ctx->num_temp_ranges; i++) {
- emit_hdrf(ctx, "vec4 temp%d[%d];\n", ctx->temp_ranges[i].first, ctx->temp_ranges[i].last - ctx->temp_ranges[i].first + 1);
+ emit_hdrf(glsl_strbufs, "vec4 temp%d[%d];\n", ctx->temp_ranges[i].first, ctx->temp_ranges[i].last - ctx->temp_ranges[i].first + 1);
}
if (ctx->write_mul_utemp) {
- emit_hdr(ctx, "uvec4 mul_utemp;\n");
- emit_hdr(ctx, "uvec4 umul_temp;\n");
+ emit_hdr(glsl_strbufs, "uvec4 mul_utemp;\n");
+ emit_hdr(glsl_strbufs, "uvec4 umul_temp;\n");
}
if (ctx->write_mul_itemp) {
- emit_hdr(ctx, "ivec4 mul_itemp;\n");
- emit_hdr(ctx, "ivec4 imul_temp;\n");
+ emit_hdr(glsl_strbufs, "ivec4 mul_itemp;\n");
+ emit_hdr(glsl_strbufs, "ivec4 imul_temp;\n");
}
if (ctx->ssbo_used_mask || ctx->has_file_memory) {
- emit_hdr(ctx, "uint ssbo_addr_temp;\n");
+ emit_hdr(glsl_strbufs, "uint ssbo_addr_temp;\n");
}
if (ctx->shader_req_bits & SHADER_REQ_FP64) {
- emit_hdr(ctx, "dvec2 fp64_dst[3];\n");
- emit_hdr(ctx, "dvec2 fp64_src[4];\n");
+ emit_hdr(glsl_strbufs, "dvec2 fp64_dst[3];\n");
+ emit_hdr(glsl_strbufs, "dvec2 fp64_src[4];\n");
}
for (i = 0; i < ctx->num_address; i++) {
- emit_hdrf(ctx, "int addr%d;\n", i);
+ emit_hdrf(glsl_strbufs, "int addr%d;\n", i);
}
if (ctx->num_consts) {
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
- emit_hdrf(ctx, "uniform uvec4 %sconst0[%d];\n", cname, ctx->num_consts);
+ emit_hdrf(glsl_strbufs, "uniform uvec4 %sconst0[%d];\n", cname, ctx->num_consts);
}
if (ctx->ubo_used_mask) {
const char *cname = tgsi_proc_to_prefix(ctx->prog_type);
if (ctx->info.dimension_indirect_files & (1 << TGSI_FILE_CONSTANT)) {
- require_glsl_ver(ctx, 150);
+ glsl_ver_required = require_glsl_ver(ctx, 150);
int first = ffs(ctx->ubo_used_mask) - 1;
unsigned num_ubo = util_bitcount(ctx->ubo_used_mask);
- emit_hdrf(ctx, "uniform %subo { vec4 ubocontents[%d]; } %suboarr[%d];\n", cname, ctx->ubo_sizes[first], cname, num_ubo);
+ emit_hdrf(glsl_strbufs, "uniform %subo { vec4 ubocontents[%d]; } %suboarr[%d];\n", cname, ctx->ubo_sizes[first], cname, num_ubo);
} else {
unsigned mask = ctx->ubo_used_mask;
while (mask) {
uint32_t i = u_bit_scan(&mask);
- emit_hdrf(ctx, "uniform %subo%d { vec4 %subo%dcontents[%d]; };\n", cname, i, cname, i, ctx->ubo_sizes[i]);
+ emit_hdrf(glsl_strbufs, "uniform %subo%d { vec4 %subo%dcontents[%d]; };\n", cname, i, cname, i, ctx->ubo_sizes[i]);
}
}
}
@@ -5841,7 +5891,7 @@ static void emit_ios_common(struct dump_ctx *ctx)
for (i = 0; i < ctx->num_sampler_arrays; i++) {
uint32_t first = ctx->sampler_arrays[i].first;
uint32_t range = ctx->sampler_arrays[i].array_size;
- emit_sampler_decl(ctx, first, range, ctx->samplers + first);
+ emit_sampler_decl(ctx, glsl_strbufs, shadow_samp_mask, first, range, ctx->samplers + first);
}
} else {
uint nsamp = util_last_bit(ctx->samplers_used);
@@ -5850,7 +5900,7 @@ static void emit_ios_common(struct dump_ctx *ctx)
if ((ctx->samplers_used & (1 << i)) == 0)
continue;
- emit_sampler_decl(ctx, i, 0, ctx->samplers + i);
+ emit_sampler_decl(ctx, glsl_strbufs, shadow_samp_mask, i, 0, ctx->samplers + i);
}
}
@@ -5858,21 +5908,21 @@ static void emit_ios_common(struct dump_ctx *ctx)
for (i = 0; i < ctx->num_image_arrays; i++) {
uint32_t first = ctx->image_arrays[i].first;
uint32_t range = ctx->image_arrays[i].array_size;
- emit_image_decl(ctx, first, range, ctx->images + first);
+ emit_image_decl(ctx, glsl_strbufs, first, range, ctx->images + first);
}
} else {
uint32_t mask = ctx->images_used_mask;
while (mask) {
i = u_bit_scan(&mask);
- emit_image_decl(ctx, i, 0, ctx->images + i);
+ emit_image_decl(ctx, glsl_strbufs, i, 0, ctx->images + i);
}
}
for (i = 0; i < ctx->num_abo; i++){
if (ctx->abo_sizes[i] > 1)
- emit_hdrf(ctx, "layout (binding = %d, offset = %d) uniform atomic_uint ac%d[%d];\n", ctx->abo_idx[i], ctx->abo_offsets[i] * 4, i, ctx->abo_sizes[i]);
+ emit_hdrf(glsl_strbufs, "layout (binding = %d, offset = %d) uniform atomic_uint ac%d[%d];\n", ctx->abo_idx[i], ctx->abo_offsets[i] * 4, i, ctx->abo_sizes[i]);
else
- emit_hdrf(ctx, "layout (binding = %d, offset = %d) uniform atomic_uint ac%d;\n", ctx->abo_idx[i], ctx->abo_offsets[i] * 4, i);
+ emit_hdrf(glsl_strbufs, "layout (binding = %d, offset = %d) uniform atomic_uint ac%d;\n", ctx->abo_idx[i], ctx->abo_offsets[i] * 4, i);
}
if (ctx->info.indirect_files & (1 << TGSI_FILE_BUFFER)) {
@@ -5881,7 +5931,7 @@ static void emit_ios_common(struct dump_ctx *ctx)
int start, count;
u_bit_scan_consecutive_range(&mask, &start, &count);
const char *atomic = (ctx->ssbo_atomic_mask & (1 << start)) ? "atomic" : "";
- emit_hdrf(ctx, "layout (binding = %d, std430) buffer %sssbo%d { uint %sssbocontents%d[]; } %sssboarr%s[%d];\n", start, sname, start, sname, start, sname, atomic, count);
+ emit_hdrf(glsl_strbufs, "layout (binding = %d, std430) buffer %sssbo%d { uint %sssbocontents%d[]; } %sssboarr%s[%d];\n", start, sname, start, sname, start, sname, atomic, count);
}
} else {
uint32_t mask = ctx->ssbo_used_mask;
@@ -5889,14 +5939,16 @@ static void emit_ios_common(struct dump_ctx *ctx)
uint32_t id = u_bit_scan(&mask);
enum vrend_type_qualifier type = (ctx->ssbo_integer_mask & (1 << id)) ? INT : UINT;
char *coherent = ctx->ssbo_memory_qualifier[id] == TGSI_MEMORY_COHERENT ? "coherent" : "";
- emit_hdrf(ctx, "layout (binding = %d, std430) %s buffer %sssbo%d { %s %sssbocontents%d[]; };\n", id, coherent, sname, id,
+ emit_hdrf(glsl_strbufs, "layout (binding = %d, std430) %s buffer %sssbo%d { %s %sssbocontents%d[]; };\n", id, coherent, sname, id,
get_string(type), sname, id);
}
}
+ return glsl_ver_required;
}
-static void emit_ios_streamout(struct dump_ctx *ctx)
+static void emit_ios_streamout(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
if (ctx->so) {
char outtype[6] = "";
@@ -5909,7 +5961,7 @@ static void emit_ios_streamout(struct dump_ctx *ctx)
snprintf(outtype, 6, "vec%d", ctx->so->output[i].num_components);
if (ctx->so->output[i].stream && ctx->prog_type == TGSI_PROCESSOR_GEOMETRY)
- emit_hdrf(ctx, "layout (stream=%d) out %s tfout%d;\n", ctx->so->output[i].stream, outtype, i);
+ emit_hdrf(glsl_strbufs, "layout (stream=%d) out %s tfout%d;\n", ctx->so->output[i].stream, outtype, i);
else {
const struct vrend_shader_io *output = get_io_slot(&ctx->outputs[0], ctx->num_outputs,
ctx->so->output[i].register_index);
@@ -5917,46 +5969,50 @@ static void emit_ios_streamout(struct dump_ctx *ctx)
output->glsl_predefined_no_emit) {
if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL)
- emit_hdrf(ctx, "out %s tfout%d[];\n", outtype, i);
+ emit_hdrf(glsl_strbufs, "out %s tfout%d[];\n", outtype, i);
else
- emit_hdrf(ctx, "out %s tfout%d;\n", outtype, i);
+ emit_hdrf(glsl_strbufs, "out %s tfout%d;\n", outtype, i);
}
}
}
}
}
-static inline void emit_winsys_correction(struct dump_ctx *ctx)
+static inline void emit_winsys_correction(struct vrend_glsl_strbufs *glsl_strbufs)
{
- emit_hdr(ctx, "uniform float winsys_adjust_y;\n");
+ emit_hdr(glsl_strbufs, "uniform float winsys_adjust_y;\n");
}
-static void emit_ios_indirect_generics_output(struct dump_ctx *ctx, const char *postfix)
+static void emit_ios_indirect_generics_output(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ const char *postfix)
{
- if (ctx->generic_output_range.used) {
- int size = ctx->generic_output_range.io.last - ctx->generic_output_range.io.sid + 1;
+ if (ctx->generic_ios.output_range.used) {
+ int size = ctx->generic_ios.output_range.io.last - ctx->generic_ios.output_range.io.sid + 1;
if (prefer_generic_io_block(ctx, io_out)) {
char blockname[64];
const char *stage_prefix = get_stage_output_name_prefix(ctx->prog_type);
- get_blockname(blockname, stage_prefix, &ctx->generic_output_range.io);
+ get_blockname(blockname, stage_prefix, &ctx->generic_ios.output_range.io);
char blockvarame[64];
- get_blockvarname(blockvarame, stage_prefix, &ctx->generic_output_range.io, postfix);
+ get_blockvarname(blockvarame, stage_prefix, &ctx->generic_ios.output_range.io, postfix);
- emit_hdrf(ctx, "out %s {\n vec4 %s[%d]; \n} %s;\n", blockname,
- ctx->generic_output_range.io.glsl_name, size, blockvarame);
+ emit_hdrf(glsl_strbufs, "out %s {\n vec4 %s[%d]; \n} %s;\n", blockname,
+ ctx->generic_ios.output_range.io.glsl_name, size, blockvarame);
} else
- emit_hdrf(ctx, "out vec4 %s%s[%d];\n",
- ctx->generic_output_range.io.glsl_name,
+ emit_hdrf(glsl_strbufs, "out vec4 %s%s[%d];\n",
+ ctx->generic_ios.output_range.io.glsl_name,
postfix,
size);
}
}
-static void emit_ios_indirect_generics_input(struct dump_ctx *ctx, const char *postfix)
+static void emit_ios_indirect_generics_input(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ const char *postfix)
{
- if (ctx->generic_input_range.used) {
- int size = ctx->generic_input_range.io.last - ctx->generic_input_range.io.sid + 1;
+ if (ctx->generic_ios.input_range.used) {
+ int size = ctx->generic_ios.input_range.io.last - ctx->generic_ios.input_range.io.sid + 1;
assert(size < 256 && size >= 0);
if (size < ctx->key->num_indirect_generic_inputs) {
VREND_DEBUG(dbg_shader, NULL, "WARNING: shader key indicates less indirect inputs"
@@ -5970,23 +6026,26 @@ static void emit_ios_indirect_generics_input(struct dump_ctx *ctx, const char *p
char blockvarame[64];
const char *stage_prefix = get_stage_input_name_prefix(ctx, ctx->prog_type);
- get_blockname(blockname, stage_prefix, &ctx->generic_input_range.io);
- get_blockvarname(blockvarame, stage_prefix, &ctx->generic_input_range.io,
+ get_blockname(blockname, stage_prefix, &ctx->generic_ios.input_range.io);
+ get_blockvarname(blockvarame, stage_prefix, &ctx->generic_ios.input_range.io,
postfix);
- emit_hdrf(ctx, "in %s {\n vec4 %s[%d]; \n} %s;\n",
- blockname, ctx->generic_input_range.io.glsl_name,
+ emit_hdrf(glsl_strbufs, "in %s {\n vec4 %s[%d]; \n} %s;\n",
+ blockname, ctx->generic_ios.input_range.io.glsl_name,
size, blockvarame);
} else
- emit_hdrf(ctx, "in vec4 %s%s[%d];\n",
- ctx->generic_input_range.io.glsl_name,
+ emit_hdrf(glsl_strbufs, "in vec4 %s%s[%d];\n",
+ ctx->generic_ios.input_range.io.glsl_name,
postfix,
size);
}
}
static void
-emit_ios_generic(struct dump_ctx *ctx, enum io_type iot, const char *prefix,
+emit_ios_generic(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ enum io_type iot, const char *prefix,
const struct vrend_shader_io *io, const char *inout,
const char *postfix)
{
@@ -6013,9 +6072,9 @@ emit_ios_generic(struct dump_ctx *ctx, enum io_type iot, const char *prefix,
t = type[io->num_components - 1];
if (io->first == io->last) {
- emit_hdr(ctx, layout);
+ emit_hdr(glsl_strbufs, layout);
/* ugly leave spaces to patch interp in later */
- emit_hdrf(ctx, "%s%s\n%s %s %s %s%s;\n",
+ emit_hdrf(glsl_strbufs, "%s%s\n%s %s %s %s%s;\n",
io->precise ? "precise" : "",
io->invariant ? "invariant" : "",
prefix,
@@ -6026,9 +6085,9 @@ emit_ios_generic(struct dump_ctx *ctx, enum io_type iot, const char *prefix,
if (io->name == TGSI_SEMANTIC_GENERIC) {
if (iot == io_in)
- ctx->generic_inputs_emitted_mask |= 1 << io->sid;
+ generic_ios->inputs_emitted_mask |= 1 << io->sid;
else
- ctx->generic_outputs_emitted_mask |= 1 << io->sid;
+ generic_ios->outputs_emitted_mask |= 1 << io->sid;
}
} else {
@@ -6042,9 +6101,9 @@ emit_ios_generic(struct dump_ctx *ctx, enum io_type iot, const char *prefix,
char blockvarame[64];
get_blockvarname(blockvarame, stage_prefix, io, postfix);
- emit_hdrf(ctx, "%s %s {\n", inout, blockname);
- emit_hdr(ctx, layout);
- emit_hdrf(ctx, "%s%s\n%s %s %s[%d]; \n} %s;\n",
+ emit_hdrf(glsl_strbufs, "%s %s {\n", inout, blockname);
+ emit_hdr(glsl_strbufs, layout);
+ emit_hdrf(glsl_strbufs, "%s%s\n%s %s %s[%d]; \n} %s;\n",
io->precise ? "precise" : "",
io->invariant ? "invariant" : "",
prefix,
@@ -6053,8 +6112,8 @@ emit_ios_generic(struct dump_ctx *ctx, enum io_type iot, const char *prefix,
io->last - io->first +1,
blockvarame);
} else {
- emit_hdr(ctx, layout);
- emit_hdrf(ctx, "%s%s\n%s %s %s %s%s[%d];\n",
+ emit_hdr(glsl_strbufs, layout);
+ emit_hdrf(glsl_strbufs, "%s%s\n%s %s %s %s%s[%d];\n",
io->precise ? "precise" : "",
io->invariant ? "invariant" : "",
prefix,
@@ -6071,7 +6130,12 @@ emit_ios_generic(struct dump_ctx *ctx, enum io_type iot, const char *prefix,
typedef bool (*can_emit_generic_callback)(const struct vrend_shader_io *io);
static void
-emit_ios_generic_outputs(struct dump_ctx *ctx,
+emit_ios_generic_outputs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ uint8_t front_back_color_emitted_flags[],
+ bool *force_color_two_side,
+ uint32_t *num_interps,
const can_emit_generic_callback can_emit_generic)
{
uint32_t i;
@@ -6089,25 +6153,26 @@ emit_ios_generic_outputs(struct dump_ctx *ctx,
if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC ||
ctx->outputs[i].name == TGSI_SEMANTIC_COLOR ||
ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR) {
- ctx->num_interps++;
+ (*num_interps)++;
/* ugly leave spaces to patch interp in later */
prefix = INTERP_PREFIX;
}
if (ctx->outputs[i].name == TGSI_SEMANTIC_COLOR) {
- ctx->front_back_color_emitted_flags[ctx->outputs[i].sid] |= FRONT_COLOR_EMITTED;
+ front_back_color_emitted_flags[ctx->outputs[i].sid] |= FRONT_COLOR_EMITTED;
fc_emitted |= 1ull << ctx->outputs[i].sid;
}
if (ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR) {
- ctx->front_back_color_emitted_flags[ctx->outputs[i].sid] |= BACK_COLOR_EMITTED;
+ front_back_color_emitted_flags[ctx->outputs[i].sid] |= BACK_COLOR_EMITTED;
bc_emitted |= 1ull << ctx->outputs[i].sid;
}
- emit_ios_generic(ctx, io_out, prefix, &ctx->outputs[i],
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios,
+ io_out, prefix, &ctx->outputs[i],
ctx->outputs[i].fbfetch_used ? "inout" : "out", "");
} else if (ctx->outputs[i].invariant || ctx->outputs[i].precise) {
- emit_hdrf(ctx, "%s%s;\n",
+ emit_hdrf(glsl_strbufs, "%s%s;\n",
ctx->outputs[i].precise ? "precise " :
(ctx->outputs[i].invariant ? "invariant " : ""),
ctx->outputs[i].glsl_name);
@@ -6118,11 +6183,12 @@ emit_ios_generic_outputs(struct dump_ctx *ctx,
* we have to force two side coloring, because the FS shader might expect
* a front color too. */
if (bc_emitted & ~fc_emitted)
- ctx->force_color_two_side = 1;
+ *force_color_two_side = 1;
}
static void
-emit_ios_patch(struct dump_ctx *ctx, const char *prefix, const struct vrend_shader_io *io,
+emit_ios_patch(struct vrend_glsl_strbufs *glsl_strbufs,
+ const char *prefix, const struct vrend_shader_io *io,
const char *inout, int size)
{
const char type[4][6] = {"float", " vec2", " vec3", " vec4"};
@@ -6131,19 +6197,19 @@ emit_ios_patch(struct dump_ctx *ctx, const char *prefix, const struct vrend_shad
if (io->layout_location > 0) {
/* we need to define a layout here because interleaved arrays might be emited */
if (io->swizzle_offset)
- emit_hdrf(ctx, "layout(location = %d, component = %d)\n",
+ emit_hdrf(glsl_strbufs, "layout(location = %d, component = %d)\n",
io->layout_location - 1, io->swizzle_offset);
else
- emit_hdrf(ctx, "layout(location = %d)\n", io->layout_location - 1);
+ emit_hdrf(glsl_strbufs, "layout(location = %d)\n", io->layout_location - 1);
}
if (io->usage_mask != 0xf)
t = type[io->num_components - 1];
if (io->last == io->first)
- emit_hdrf(ctx, "%s %s %s %s;\n", prefix, inout, t, io->glsl_name);
+ emit_hdrf(glsl_strbufs, "%s %s %s %s;\n", prefix, inout, t, io->glsl_name);
else
- emit_hdrf(ctx, "%s %s %s %s[%d];\n", prefix, inout, t,
+ emit_hdrf(glsl_strbufs, "%s %s %s %s[%d];\n", prefix, inout, t,
io->glsl_name, size);
}
@@ -6153,7 +6219,12 @@ can_emit_generic_default(UNUSED const struct vrend_shader_io *io)
return true;
}
-static void emit_ios_vs(struct dump_ctx *ctx)
+static void emit_ios_vs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ uint32_t *num_interps,
+ uint8_t front_back_color_emitted_flags[],
+ bool *force_color_two_side)
{
uint32_t i;
@@ -6161,19 +6232,20 @@ static void emit_ios_vs(struct dump_ctx *ctx)
char postfix[32] = "";
if (!ctx->inputs[i].glsl_predefined_no_emit) {
if (ctx->cfg->use_explicit_locations) {
- emit_hdrf(ctx, "layout(location=%d) ", ctx->inputs[i].first);
+ emit_hdrf(glsl_strbufs, "layout(location=%d) ", ctx->inputs[i].first);
}
if (ctx->inputs[i].first != ctx->inputs[i].last)
snprintf(postfix, sizeof(postfix), "[%d]", ctx->inputs[i].last - ctx->inputs[i].first + 1);
const char *vtype[3] = {"vec4", "ivec4", "uvec4"};
- emit_hdrf(ctx, "in %s %s%s;\n",
+ emit_hdrf(glsl_strbufs, "in %s %s%s;\n",
vtype[ctx->inputs[i].type], ctx->inputs[i].glsl_name, postfix);
}
}
- emit_ios_indirect_generics_output(ctx, "");
+ emit_ios_indirect_generics_output(ctx, glsl_strbufs, "");
- emit_ios_generic_outputs(ctx, can_emit_generic_default);
+ emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags,
+ force_color_two_side, num_interps, can_emit_generic_default);
if (ctx->key->color_two_side || ctx->force_color_two_side) {
bool fcolor_emitted, bcolor_emitted;
@@ -6184,24 +6256,24 @@ static void emit_ios_vs(struct dump_ctx *ctx)
fcolor_emitted = bcolor_emitted = false;
- fcolor_emitted = ctx->front_back_color_emitted_flags[ctx->outputs[i].sid] & FRONT_COLOR_EMITTED;
- bcolor_emitted = ctx->front_back_color_emitted_flags[ctx->outputs[i].sid] & BACK_COLOR_EMITTED;
+ fcolor_emitted = front_back_color_emitted_flags[ctx->outputs[i].sid] & FRONT_COLOR_EMITTED;
+ bcolor_emitted = front_back_color_emitted_flags[ctx->outputs[i].sid] & BACK_COLOR_EMITTED;
if (fcolor_emitted && !bcolor_emitted) {
- emit_hdrf(ctx, "%sout vec4 ex_bc%d;\n", INTERP_PREFIX, ctx->outputs[i].sid);
- ctx->front_back_color_emitted_flags[ctx->outputs[i].sid] |= BACK_COLOR_EMITTED;
+ emit_hdrf(glsl_strbufs, "%sout vec4 ex_bc%d;\n", INTERP_PREFIX, ctx->outputs[i].sid);
+ front_back_color_emitted_flags[ctx->outputs[i].sid] |= BACK_COLOR_EMITTED;
}
if (bcolor_emitted && !fcolor_emitted) {
- emit_hdrf(ctx, "%sout vec4 ex_c%d;\n", INTERP_PREFIX, ctx->outputs[i].sid);
- ctx->front_back_color_emitted_flags[ctx->outputs[i].sid] |= FRONT_COLOR_EMITTED;
+ emit_hdrf(glsl_strbufs, "%sout vec4 ex_c%d;\n", INTERP_PREFIX, ctx->outputs[i].sid);
+ front_back_color_emitted_flags[ctx->outputs[i].sid] |= FRONT_COLOR_EMITTED;
}
}
}
- emit_winsys_correction(ctx);
+ emit_winsys_correction(glsl_strbufs);
if (ctx->has_clipvertex) {
- emit_hdrf(ctx, "%svec4 clipv_tmp;\n", ctx->has_clipvertex_so ? "out " : "");
+ emit_hdrf(glsl_strbufs, "%svec4 clipv_tmp;\n", ctx->has_clipvertex_so ? "out " : "");
}
if (ctx->num_clip_dist || ctx->key->clip_plane_enable) {
bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
@@ -6219,14 +6291,14 @@ static void emit_ios_vs(struct dump_ctx *ctx)
} else
snprintf(clip_buf, 64, "out float gl_ClipDistance[%d];\n", num_clip_dists);
if (ctx->key->clip_plane_enable) {
- emit_hdr(ctx, "uniform vec4 clipp[8];\n");
+ emit_hdr(glsl_strbufs, "uniform vec4 clipp[8];\n");
}
if ((ctx->key->gs_present || ctx->key->tes_present) && ctx->key->next_stage_pervertex_in) {
- emit_hdrf(ctx, "out gl_PerVertex {\n vec4 gl_Position;\n %s%s};\n", clip_buf, cull_buf);
+ emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position;\n %s%s};\n", clip_buf, cull_buf);
} else {
- emit_hdrf(ctx, "%s%s", clip_buf, cull_buf);
+ emit_hdrf(glsl_strbufs, "%s%s", clip_buf, cull_buf);
}
- emit_hdr(ctx, "vec4 clip_dist_temp[2];\n");
+ emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
}
}
@@ -6244,7 +6316,12 @@ static const char *get_depth_layout(int depth_layout)
return dl[depth_layout -1];
}
-static void emit_ios_fs(struct dump_ctx *ctx)
+static void emit_ios_fs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ uint32_t *num_interps,
+ bool *winsys_adjust_y_emitted
+ )
{
uint32_t i;
@@ -6253,16 +6330,16 @@ static void emit_ios_fs(struct dump_ctx *ctx)
char comma = (upper_left && ctx->fs_pixel_center) ? ',' : ' ';
if (!ctx->cfg->use_gles)
- emit_hdrf(ctx, "layout(%s%c%s) in vec4 gl_FragCoord;\n",
+ emit_hdrf(glsl_strbufs, "layout(%s%c%s) in vec4 gl_FragCoord;\n",
upper_left ? "origin_upper_left" : "",
comma,
ctx->fs_pixel_center ? "pixel_center_integer" : "");
}
if (ctx->early_depth_stencil) {
- emit_hdr(ctx, "layout(early_fragment_tests) in;\n");
+ emit_hdr(glsl_strbufs, "layout(early_fragment_tests) in;\n");
}
- emit_ios_indirect_generics_input(ctx, "");
+ emit_ios_indirect_generics_input(ctx, glsl_strbufs, "");
for (i = 0; i < ctx->num_inputs; i++) {
if (!ctx->inputs[i].glsl_predefined_no_emit) {
@@ -6276,36 +6353,36 @@ static void emit_ios_fs(struct dump_ctx *ctx)
if (!prefix)
prefix = "";
auxprefix = get_aux_string(ctx->inputs[i].location);
- ctx->num_interps++;
+ (*num_interps)++;
}
char prefixes[64];
snprintf(prefixes, sizeof(prefixes), "%s %s", prefix, auxprefix);
- emit_ios_generic(ctx, io_in, prefixes, &ctx->inputs[i], "in", "");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_in, prefixes, &ctx->inputs[i], "in", "");
}
if (ctx->cfg->use_gles && !ctx->winsys_adjust_y_emitted &&
(ctx->key->coord_replace & (1 << ctx->inputs[i].sid))) {
- ctx->winsys_adjust_y_emitted = true;
- emit_hdr(ctx, "uniform float winsys_adjust_y;\n");
+ *winsys_adjust_y_emitted = true;
+ emit_hdr(glsl_strbufs, "uniform float winsys_adjust_y;\n");
}
}
if (vrend_shader_needs_alpha_func(ctx->key)) {
- emit_hdr(ctx, "uniform float alpha_ref_val;\n");
+ emit_hdr(glsl_strbufs, "uniform float alpha_ref_val;\n");
}
if (ctx->key->color_two_side) {
if (ctx->color_in_mask & 1)
- emit_hdr(ctx, "vec4 realcolor0;\n");
+ emit_hdr(glsl_strbufs, "vec4 realcolor0;\n");
if (ctx->color_in_mask & 2)
- emit_hdr(ctx, "vec4 realcolor1;\n");
+ emit_hdr(glsl_strbufs, "vec4 realcolor1;\n");
}
unsigned choices = ctx->fs_blend_equation_advanced;
while (choices) {
enum gl_advanced_blend_mode choice = (enum gl_advanced_blend_mode)u_bit_scan(&choices);
- emit_hdrf(ctx, "layout(blend_support_%s) out;\n", blend_to_name(choice));
+ emit_hdrf(glsl_strbufs, "layout(blend_support_%s) out;\n", blend_to_name(choice));
}
if (ctx->write_all_cbufs) {
@@ -6318,16 +6395,16 @@ static void emit_ios_fs(struct dump_ctx *ctx)
for (i = 0; i < (uint32_t)ctx->cfg->max_draw_buffers; i++) {
if (ctx->cfg->use_gles) {
if (ctx->key->fs_logicop_enabled)
- emit_hdrf(ctx, "%s fsout_tmp_c%d;\n", type, i);
+ emit_hdrf(glsl_strbufs, "%s fsout_tmp_c%d;\n", type, i);
if (logiop_require_inout(ctx->key)) {
const char *noncoherent = ctx->key->fs_logicop_emulate_coherent ? "" : ", noncoherent";
- emit_hdrf(ctx, "layout (location=%d%s) inout highp %s fsout_c%d;\n", i, noncoherent, type, i);
+ emit_hdrf(glsl_strbufs, "layout (location=%d%s) inout highp %s fsout_c%d;\n", i, noncoherent, type, i);
} else
- emit_hdrf(ctx, "layout (location=%d) out %s fsout_c%d;\n", i,
+ emit_hdrf(glsl_strbufs, "layout (location=%d) out %s fsout_c%d;\n", i,
type, i);
} else
- emit_hdrf(ctx, "out %s fsout_c%d;\n", type, i);
+ emit_hdrf(glsl_strbufs, "out %s fsout_c%d;\n", type, i);
}
} else {
for (i = 0; i < ctx->num_outputs; i++) {
@@ -6340,11 +6417,11 @@ static void emit_ios_fs(struct dump_ctx *ctx)
!ctx->cfg->has_dual_src_blend)
sprintf(prefix, "layout(location = %d)", ctx->outputs[i].sid);
- emit_ios_generic(ctx, io_out, prefix, &ctx->outputs[i],
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_out, prefix, &ctx->outputs[i],
ctx->outputs[i].fbfetch_used ? "inout" : "out", "");
} else if (ctx->outputs[i].invariant || ctx->outputs[i].precise) {
- emit_hdrf(ctx, "%s%s;\n",
+ emit_hdrf(glsl_strbufs, "%s%s;\n",
ctx->outputs[i].precise ? "precise " :
(ctx->outputs[i].invariant ? "invariant " : ""),
ctx->outputs[i].glsl_name);
@@ -6355,21 +6432,21 @@ static void emit_ios_fs(struct dump_ctx *ctx)
if (ctx->fs_depth_layout) {
const char *depth_layout = get_depth_layout(ctx->fs_depth_layout);
if (depth_layout)
- emit_hdrf(ctx, "layout (%s) out float gl_FragDepth;\n", depth_layout);
+ emit_hdrf(glsl_strbufs, "layout (%s) out float gl_FragDepth;\n", depth_layout);
}
if (ctx->num_in_clip_dist) {
if (ctx->key->prev_stage_num_clip_out) {
- emit_hdrf(ctx, "in float gl_ClipDistance[%d];\n", ctx->key->prev_stage_num_clip_out);
+ emit_hdrf(glsl_strbufs, "in float gl_ClipDistance[%d];\n", ctx->key->prev_stage_num_clip_out);
} else if (ctx->num_in_clip_dist > 4 && !ctx->key->prev_stage_num_cull_out) {
- emit_hdrf(ctx, "in float gl_ClipDistance[%d];\n", ctx->num_in_clip_dist);
+ emit_hdrf(glsl_strbufs, "in float gl_ClipDistance[%d];\n", ctx->num_in_clip_dist);
}
if (ctx->key->prev_stage_num_cull_out) {
- emit_hdrf(ctx, "in float gl_CullDistance[%d];\n", ctx->key->prev_stage_num_cull_out);
+ emit_hdrf(glsl_strbufs, "in float gl_CullDistance[%d];\n", ctx->key->prev_stage_num_cull_out);
}
if(ctx->fs_uses_clipdist_input)
- emit_hdr(ctx, "vec4 clip_dist_temp[2];\n");
+ emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
}
}
@@ -6379,7 +6456,13 @@ can_emit_generic_geom(const struct vrend_shader_io *io)
return io->stream == 0;
}
-static void emit_ios_geom(struct dump_ctx *ctx)
+static void emit_ios_geom(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ uint8_t front_back_color_emitted_flags[],
+ uint32_t *num_interps,
+ bool *has_pervertex,
+ bool *force_color_two_side)
{
uint32_t i;
char invocbuf[25];
@@ -6387,16 +6470,17 @@ static void emit_ios_geom(struct dump_ctx *ctx)
if (ctx->gs_num_invocations)
snprintf(invocbuf, 25, ", invocations = %d", ctx->gs_num_invocations);
- emit_hdrf(ctx, "layout(%s%s) in;\n", prim_to_name(ctx->gs_in_prim),
+ emit_hdrf(glsl_strbufs, "layout(%s%s) in;\n", prim_to_name(ctx->gs_in_prim),
ctx->gs_num_invocations > 1 ? invocbuf : "");
- emit_hdrf(ctx, "layout(%s, max_vertices = %d) out;\n", prim_to_name(ctx->gs_out_prim), ctx->gs_max_out_verts);
+ emit_hdrf(glsl_strbufs, "layout(%s, max_vertices = %d) out;\n", prim_to_name(ctx->gs_out_prim), ctx->gs_max_out_verts);
for (i = 0; i < ctx->num_inputs; i++) {
if (!ctx->inputs[i].glsl_predefined_no_emit) {
char postfix[64];
snprintf(postfix, sizeof(postfix), "[%d]", gs_input_prim_to_size(ctx->gs_in_prim));
- emit_ios_generic(ctx, io_in, "", &ctx->inputs[i], "in", postfix);
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios,
+ io_in, "", &ctx->inputs[i], "in", postfix);
}
}
@@ -6409,21 +6493,22 @@ static void emit_ios_geom(struct dump_ctx *ctx)
if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC ||
ctx->outputs[i].name == TGSI_SEMANTIC_COLOR ||
ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR) {
- ctx->num_interps++;
+ (*num_interps)++;
/* ugly leave spaces to patch interp in later */
prefix = INTERP_PREFIX;
}
- emit_hdrf(ctx, "layout (stream = %d) %s%s%sout vec4 %s;\n", ctx->outputs[i].stream, prefix,
+ emit_hdrf(glsl_strbufs, "layout (stream = %d) %s%s%sout vec4 %s;\n", ctx->outputs[i].stream, prefix,
ctx->outputs[i].precise ? "precise " : "",
ctx->outputs[i].invariant ? "invariant " : "",
ctx->outputs[i].glsl_name);
}
}
- emit_ios_generic_outputs(ctx, can_emit_generic_geom);
+ emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags,
+ force_color_two_side, num_interps, can_emit_generic_geom);
- emit_winsys_correction(ctx);
+ emit_winsys_correction(glsl_strbufs);
if (ctx->num_in_clip_dist || ctx->key->clip_plane_enable) {
int clip_dist, cull_dist;
@@ -6438,8 +6523,8 @@ static void emit_ios_geom(struct dump_ctx *ctx)
if (cull_dist)
snprintf(cull_var, 64, "float gl_CullDistance[%d];\n", cull_dist);
- ctx->has_pervertex = true;
- emit_hdrf(ctx, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s\n} gl_in[];\n", clip_var, cull_var);
+ (*has_pervertex) = true;
+ emit_hdrf(glsl_strbufs, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s\n} gl_in[];\n", clip_var, cull_var);
}
if (ctx->num_clip_dist) {
bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
@@ -6456,44 +6541,47 @@ static void emit_ios_geom(struct dump_ctx *ctx)
snprintf(cull_buf, 64, "out float gl_CullDistance[%d];\n", num_cull_dists);
} else
snprintf(clip_buf, 64, "out float gl_ClipDistance[%d];\n", num_clip_dists);
- emit_hdrf(ctx, "%s%s\n", clip_buf, cull_buf);
- emit_hdrf(ctx, "vec4 clip_dist_temp[2];\n");
+ emit_hdrf(glsl_strbufs, "%s%s\n", clip_buf, cull_buf);
+ emit_hdrf(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
}
}
-static void emit_ios_tcs(struct dump_ctx *ctx)
+static void emit_ios_tcs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ bool *has_pervertex)
{
uint32_t i;
- emit_ios_indirect_generics_input(ctx, "[]");
+ emit_ios_indirect_generics_input(ctx, glsl_strbufs, "[]");
for (i = 0; i < ctx->num_inputs; i++) {
if (!ctx->inputs[i].glsl_predefined_no_emit) {
if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH)
- emit_ios_patch(ctx, "", &ctx->inputs[i], "in", ctx->inputs[i].last - ctx->inputs[i].first + 1);
+ emit_ios_patch(glsl_strbufs, "", &ctx->inputs[i], "in", ctx->inputs[i].last - ctx->inputs[i].first + 1);
else
- emit_ios_generic(ctx, io_in, "", &ctx->inputs[i], "in", "[]");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_in, "", &ctx->inputs[i], "in", "[]");
}
}
- emit_hdrf(ctx, "layout(vertices = %d) out;\n", ctx->tcs_vertices_out);
+ emit_hdrf(glsl_strbufs, "layout(vertices = %d) out;\n", ctx->tcs_vertices_out);
- emit_ios_indirect_generics_output(ctx, "[]");
+ emit_ios_indirect_generics_output(ctx, glsl_strbufs, "[]");
- if (ctx->patch_output_range.used)
- emit_ios_patch(ctx, "patch", &ctx->patch_output_range.io, "out",
- ctx->patch_output_range.io.last - ctx->patch_output_range.io.sid + 1);
+ if (ctx->patch_ios.output_range.used)
+ emit_ios_patch(glsl_strbufs, "patch", &ctx->patch_ios.output_range.io, "out",
+ ctx->patch_ios.output_range.io.last - ctx->patch_ios.output_range.io.sid + 1);
for (i = 0; i < ctx->num_outputs; i++) {
if (!ctx->outputs[i].glsl_predefined_no_emit) {
if (ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) {
- emit_ios_patch(ctx, "patch", &ctx->outputs[i], "out",
+ emit_ios_patch(glsl_strbufs, "patch", &ctx->outputs[i], "out",
ctx->outputs[i].last - ctx->outputs[i].first + 1);
} else
- emit_ios_generic(ctx, io_out, "", &ctx->outputs[i], "out", "[]");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_out, "", &ctx->outputs[i], "out", "[]");
} else if (ctx->outputs[i].invariant || ctx->outputs[i].precise) {
- emit_hdrf(ctx, "%s%s;\n",
+ emit_hdrf(glsl_strbufs, "%s%s;\n",
ctx->outputs[i].precise ? "precise " :
(ctx->outputs[i].invariant ? "invariant " : ""),
ctx->outputs[i].glsl_name);
@@ -6512,45 +6600,52 @@ static void emit_ios_tcs(struct dump_ctx *ctx)
if (cull_dist)
snprintf(cull_var, 64, "float gl_CullDistance[%d];\n", cull_dist);
- ctx->has_pervertex = true;
- emit_hdrf(ctx, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s} gl_in[];\n", clip_var, cull_var);
+ *has_pervertex = true;
+ emit_hdrf(glsl_strbufs, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s} gl_in[];\n", clip_var, cull_var);
}
if (ctx->num_clip_dist && ctx->key->next_stage_pervertex_in) {
- emit_hdrf(ctx, "out gl_PerVertex {\n vec4 gl_Position;\n float gl_ClipDistance[%d];\n} gl_out[];\n", ctx->num_clip_dist);
- emit_hdr(ctx, "vec4 clip_dist_temp[2];\n");
+ emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position;\n float gl_ClipDistance[%d];\n} gl_out[];\n", ctx->num_clip_dist);
+ emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
}
}
-static void emit_ios_tes(struct dump_ctx *ctx)
+static void emit_ios_tes(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ uint8_t front_back_color_emitted_flags[],
+ uint32_t *num_interps,
+ bool *has_pervertex,
+ bool *force_color_two_side)
{
uint32_t i;
- if (ctx->patch_input_range.used)
- emit_ios_patch(ctx, "patch", &ctx->patch_input_range.io, "in",
- ctx->patch_input_range.io.last - ctx->patch_input_range.io.sid + 1);
+ if (ctx->patch_ios.input_range.used)
+ emit_ios_patch(glsl_strbufs, "patch", &ctx->patch_ios.input_range.io, "in",
+ ctx->patch_ios.input_range.io.last - ctx->patch_ios.input_range.io.sid + 1);
- if (ctx->generic_input_range.used)
- emit_ios_indirect_generics_input(ctx, "[]");
+ if (generic_ios->input_range.used)
+ emit_ios_indirect_generics_input(ctx, glsl_strbufs, "[]");
for (i = 0; i < ctx->num_inputs; i++) {
if (!ctx->inputs[i].glsl_predefined_no_emit) {
if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH)
- emit_ios_patch(ctx, "patch", &ctx->inputs[i], "in",
+ emit_ios_patch(glsl_strbufs, "patch", &ctx->inputs[i], "in",
ctx->inputs[i].last - ctx->inputs[i].first + 1);
else
- emit_ios_generic(ctx, io_in, "", &ctx->inputs[i], "in", "[]");
+ emit_ios_generic(ctx, glsl_strbufs, generic_ios, io_in, "", &ctx->inputs[i], "in", "[]");
}
}
- emit_hdrf(ctx, "layout(%s, %s, %s%s) in;\n",
+ emit_hdrf(glsl_strbufs, "layout(%s, %s, %s%s) in;\n",
prim_to_tes_name(ctx->tes_prim_mode),
get_spacing_string(ctx->tes_spacing),
ctx->tes_vertex_order ? "cw" : "ccw",
ctx->tes_point_mode ? ", point_mode" : "");
- emit_ios_generic_outputs(ctx, can_emit_generic_default);
+ emit_ios_generic_outputs(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags,
+ force_color_two_side, num_interps, can_emit_generic_default);
- emit_winsys_correction(ctx);
+ emit_winsys_correction(glsl_strbufs);
if (ctx->num_in_clip_dist) {
int clip_dist, cull_dist;
@@ -6564,84 +6659,96 @@ static void emit_ios_tes(struct dump_ctx *ctx)
if (cull_dist)
snprintf(cull_var, 64, "float gl_CullDistance[%d];\n", cull_dist);
- ctx->has_pervertex = true;
- emit_hdrf(ctx, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s} gl_in[];\n", clip_var, cull_var);
+ *has_pervertex = true;
+ emit_hdrf(glsl_strbufs, "in gl_PerVertex {\n vec4 gl_Position; \n %s%s} gl_in[];\n", clip_var, cull_var);
}
if (ctx->num_clip_dist && ctx->key->next_stage_pervertex_in) {
- emit_hdrf(ctx, "out gl_PerVertex {\n vec4 gl_Position;\n float gl_ClipDistance[%d];\n} gl_out[];\n", ctx->num_clip_dist);
- emit_hdr(ctx, "vec4 clip_dist_temp[2];\n");
+ emit_hdrf(glsl_strbufs, "out gl_PerVertex {\n vec4 gl_Position;\n float gl_ClipDistance[%d];\n} gl_out[];\n", ctx->num_clip_dist);
+ emit_hdr(glsl_strbufs, "vec4 clip_dist_temp[2];\n");
}
}
-static void emit_ios_cs(struct dump_ctx *ctx)
+static void emit_ios_cs(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs)
{
- emit_hdrf(ctx, "layout (local_size_x = %d, local_size_y = %d, local_size_z = %d) in;\n",
+ emit_hdrf(glsl_strbufs, "layout (local_size_x = %d, local_size_y = %d, local_size_z = %d) in;\n",
ctx->local_cs_block_size[0], ctx->local_cs_block_size[1], ctx->local_cs_block_size[2]);
if (ctx->req_local_mem) {
enum vrend_type_qualifier type = ctx->integer_memory ? INT : UINT;
- emit_hdrf(ctx, "shared %s values[%d];\n", get_string(type), ctx->req_local_mem / 4);
+ emit_hdrf(glsl_strbufs, "shared %s values[%d];\n", get_string(type), ctx->req_local_mem / 4);
}
}
-static void emit_ios(struct dump_ctx *ctx)
+static int emit_ios(const struct dump_ctx *ctx,
+ struct vrend_glsl_strbufs *glsl_strbufs,
+ struct vrend_generic_ios *generic_ios,
+ uint8_t front_back_color_emitted_flags[],
+ uint32_t *num_interps,
+ bool *has_pervertex,
+ bool *force_color_two_side,
+ bool *winsys_adjust_y_emitted,
+ uint32_t *shadow_samp_mask)
{
- ctx->num_interps = 0;
+ *num_interps = 0;
+ int glsl_ver_required = ctx->glsl_ver_required;
if (ctx->so && ctx->so->num_outputs >= PIPE_MAX_SO_OUTPUTS) {
vrend_printf( "Num outputs exceeded, max is %u\n", PIPE_MAX_SO_OUTPUTS);
- set_hdr_error(ctx);
- return;
+ set_hdr_error(glsl_strbufs);
+ return glsl_ver_required;
}
switch (ctx->prog_type) {
case TGSI_PROCESSOR_VERTEX:
- emit_ios_vs(ctx);
+ emit_ios_vs(ctx, glsl_strbufs, generic_ios, num_interps, front_back_color_emitted_flags, force_color_two_side);
break;
case TGSI_PROCESSOR_FRAGMENT:
- emit_ios_fs(ctx);
+ emit_ios_fs(ctx, glsl_strbufs, generic_ios, num_interps, winsys_adjust_y_emitted);
break;
case TGSI_PROCESSOR_GEOMETRY:
- emit_ios_geom(ctx);
+ emit_ios_geom(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags, num_interps, has_pervertex, force_color_two_side);
break;
case TGSI_PROCESSOR_TESS_CTRL:
- emit_ios_tcs(ctx);
+ emit_ios_tcs(ctx, glsl_strbufs, generic_ios, has_pervertex);
break;
case TGSI_PROCESSOR_TESS_EVAL:
- emit_ios_tes(ctx);
+ emit_ios_tes(ctx, glsl_strbufs, generic_ios, front_back_color_emitted_flags, num_interps, has_pervertex, force_color_two_side);
break;
case TGSI_PROCESSOR_COMPUTE:
- emit_ios_cs(ctx);
+ emit_ios_cs(ctx, glsl_strbufs);
break;
default:
fprintf(stderr, "Unknown shader processor %d\n", ctx->prog_type);
- set_hdr_error(ctx);
- return;
+ set_hdr_error(glsl_strbufs);
+ return glsl_ver_required;
}
- if (ctx->generic_outputs_expected_mask &&
- (ctx->generic_outputs_expected_mask != ctx->generic_outputs_emitted_mask)) {
+ if (generic_ios->outputs_expected_mask &&
+ (generic_ios->outputs_expected_mask != generic_ios->outputs_emitted_mask)) {
for (int i = 0; i < 31; ++i) {
uint32_t mask = 1 << i;
- bool expecting = ctx->generic_outputs_expected_mask & mask;
- if (expecting & !(ctx->generic_outputs_emitted_mask & mask))
- emit_hdrf(ctx, " out vec4 %s_g%dA0_f%s;\n",
+ bool expecting = generic_ios->outputs_expected_mask & mask;
+ if (expecting & !(generic_ios->outputs_emitted_mask & mask))
+ emit_hdrf(glsl_strbufs, " out vec4 %s_g%dA0_f%s;\n",
get_stage_output_name_prefix(ctx->prog_type), i,
ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL ? "[]" : "");
}
}
- emit_ios_streamout(ctx);
- emit_ios_common(ctx);
+ emit_ios_streamout(ctx, glsl_strbufs);
+ glsl_ver_required = emit_ios_common(ctx, glsl_strbufs, shadow_samp_mask);
if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT &&
ctx->key->pstipple_tex == true) {
- emit_hdr(ctx, "uniform sampler2D pstipple_sampler;\nfloat stip_temp;\n");
+ emit_hdr(glsl_strbufs, "uniform sampler2D pstipple_sampler;\nfloat stip_temp;\n");
}
+
+ return glsl_ver_required;
}
-static boolean fill_fragment_interpolants(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
+static boolean fill_fragment_interpolants(const struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
{
uint32_t i, index = 0;
@@ -6666,7 +6773,7 @@ static boolean fill_fragment_interpolants(struct dump_ctx *ctx, struct vrend_sha
return true;
}
-static boolean fill_interpolants(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
+static boolean fill_interpolants(const struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
{
boolean ret;
@@ -6722,7 +6829,7 @@ static boolean analyze_instruction(struct tgsi_iterate_context *iter,
return true;
}
-static void fill_sinfo(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
+static void fill_sinfo(const struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
{
sinfo->num_ucp = ctx->key->clip_plane_enable ? 8 : 0;
sinfo->has_pervertex_in = ctx->has_pervertex;
@@ -6739,15 +6846,15 @@ static void fill_sinfo(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
sinfo->ubo_indirect = ctx->info.dimension_indirect_files & (1 << TGSI_FILE_CONSTANT);
- if (ctx->generic_input_range.used)
- sinfo->num_indirect_generic_inputs = ctx->generic_input_range.io.last - ctx->generic_input_range.io.sid + 1;
- if (ctx->patch_input_range.used)
- sinfo->num_indirect_patch_inputs = ctx->patch_input_range.io.last - ctx->patch_input_range.io.sid + 1;
+ if (ctx->generic_ios.input_range.used)
+ sinfo->num_indirect_generic_inputs = ctx->generic_ios.input_range.io.last - ctx->generic_ios.input_range.io.sid + 1;
+ if (ctx->patch_ios.input_range.used)
+ sinfo->num_indirect_patch_inputs = ctx->patch_ios.input_range.io.last - ctx->patch_ios.input_range.io.sid + 1;
- if (ctx->generic_output_range.used)
- sinfo->num_indirect_generic_outputs = ctx->generic_output_range.io.last - ctx->generic_output_range.io.sid + 1;
- if (ctx->patch_output_range.used)
- sinfo->num_indirect_patch_outputs = ctx->patch_output_range.io.last - ctx->patch_output_range.io.sid + 1;
+ if (ctx->generic_ios.output_range.used)
+ sinfo->num_indirect_generic_outputs = ctx->generic_ios.output_range.io.last - ctx->generic_ios.output_range.io.sid + 1;
+ if (ctx->patch_ios.output_range.used)
+ sinfo->num_indirect_patch_outputs = ctx->patch_ios.output_range.io.last - ctx->patch_ios.output_range.io.sid + 1;
sinfo->num_inputs = ctx->num_inputs;
sinfo->num_interps = ctx->num_interps;
@@ -6794,7 +6901,7 @@ static void fill_sinfo(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
free(sinfo->image_arrays);
sinfo->image_arrays = ctx->image_arrays;
sinfo->num_image_arrays = ctx->num_image_arrays;
- sinfo->generic_inputs_emitted_mask = ctx->generic_inputs_emitted_mask;
+ sinfo->generic_inputs_emitted_mask = ctx->generic_ios.inputs_emitted_mask;
for (unsigned i = 0; i < ctx->num_outputs; ++i) {
if (ctx->outputs[i].invariant)
@@ -6802,29 +6909,29 @@ static void fill_sinfo(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
}
}
-static bool allocate_strbuffers(struct dump_ctx* ctx)
+static bool allocate_strbuffers(struct vrend_glsl_strbufs* glsl_strbufs)
{
- if (!strbuf_alloc(&ctx->glsl_main, 4096))
+ if (!strbuf_alloc(&glsl_strbufs->glsl_main, 4096))
return false;
- if (strbuf_get_error(&ctx->glsl_main))
+ if (strbuf_get_error(&glsl_strbufs->glsl_main))
return false;
- if (!strbuf_alloc(&ctx->glsl_hdr, 1024))
+ if (!strbuf_alloc(&glsl_strbufs->glsl_hdr, 1024))
return false;
- if (!strbuf_alloc(&ctx->glsl_ver_ext, 1024))
+ if (!strbuf_alloc(&glsl_strbufs->glsl_ver_ext, 1024))
return false;
return true;
}
-static void set_strbuffers(MAYBE_UNUSED const struct vrend_context *rctx, struct dump_ctx* ctx,
+static void set_strbuffers(MAYBE_UNUSED const struct vrend_context *rctx, const struct vrend_glsl_strbufs* glsl_strbufs,
struct vrend_strarray *shader)
{
- strarray_addstrbuf(shader, &ctx->glsl_ver_ext);
- strarray_addstrbuf(shader, &ctx->glsl_hdr);
- strarray_addstrbuf(shader, &ctx->glsl_main);
+ strarray_addstrbuf(shader, &glsl_strbufs->glsl_ver_ext);
+ strarray_addstrbuf(shader, &glsl_strbufs->glsl_hdr);
+ strarray_addstrbuf(shader, &glsl_strbufs->glsl_main);
VREND_DEBUG(dbg_shader_glsl, rctx, "GLSL:");
VREND_DEBUG_EXT(dbg_shader_glsl, rctx, strarray_dump(shader));
VREND_DEBUG(dbg_shader_glsl, rctx, "\n");
@@ -6879,12 +6986,12 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
ctx.has_sample_input = false;
ctx.req_local_mem = req_local_mem;
ctx.guest_sent_io_arrays = key->guest_sent_io_arrays;
- ctx.generic_outputs_expected_mask = key->generic_outputs_expected_mask;
+ ctx.generic_ios.outputs_expected_mask = key->generic_outputs_expected_mask;
tgsi_scan_shader(tokens, &ctx.info);
/* if we are in core profile mode we should use GLSL 1.40 */
if (cfg->use_core_profile && cfg->glsl_version >= 140)
- require_glsl_ver(&ctx, 140);
+ ctx.glsl_ver_required = require_glsl_ver(&ctx, 140);
if (sinfo->so_info.num_outputs) {
ctx.so = &sinfo->so_info;
@@ -6895,17 +7002,17 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
ctx.so_names = NULL;
if (ctx.info.dimension_indirect_files & (1 << TGSI_FILE_CONSTANT))
- require_glsl_ver(&ctx, 150);
+ ctx.glsl_ver_required = require_glsl_ver(&ctx, 150);
if (ctx.info.indirect_files & (1 << TGSI_FILE_BUFFER) ||
ctx.info.indirect_files & (1 << TGSI_FILE_IMAGE)) {
- require_glsl_ver(&ctx, 150);
+ ctx.glsl_ver_required = require_glsl_ver(&ctx, 150);
ctx.shader_req_bits |= SHADER_REQ_GPU_SHADER5;
}
if (ctx.info.indirect_files & (1 << TGSI_FILE_SAMPLER))
ctx.shader_req_bits |= SHADER_REQ_GPU_SHADER5;
- if (!allocate_strbuffers(&ctx))
+ if (!allocate_strbuffers(&ctx.glsl_strbufs))
goto fail;
bret = tgsi_iterate_shader(tokens, &ctx.iter);
@@ -6915,10 +7022,15 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
for (size_t i = 0; i < ARRAY_SIZE(ctx.src_bufs); ++i)
strbuf_free(ctx.src_bufs + i);
- emit_header(&ctx);
- emit_ios(&ctx);
+ emit_header(&ctx, &ctx.glsl_strbufs);
+ ctx.glsl_ver_required = emit_ios(&ctx, &ctx.glsl_strbufs, &ctx.generic_ios,
+ ctx.front_back_color_emitted_flags,
+ &ctx.num_interps, &ctx.has_pervertex,
+ &ctx.force_color_two_side,
+ &ctx.winsys_adjust_y_emitted,
+ &ctx.shadow_samp_mask);
- if (strbuf_get_error(&ctx.glsl_hdr))
+ if (strbuf_get_error(&ctx.glsl_strbufs.glsl_hdr))
goto fail;
bret = fill_interpolants(&ctx, sinfo);
@@ -6928,7 +7040,7 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
free(ctx.temp_ranges);
fill_sinfo(&ctx, sinfo);
- set_strbuffers(rctx, &ctx, shader);
+ set_strbuffers(rctx, &ctx.glsl_strbufs, shader);
if (ctx.prog_type == TGSI_PROCESSOR_GEOMETRY) {
vrend_patch_vertex_shader_interpolants(rctx,
@@ -6957,9 +7069,9 @@ bool vrend_convert_shader(const struct vrend_context *rctx,
return true;
fail:
- strbuf_free(&ctx.glsl_main);
- strbuf_free(&ctx.glsl_hdr);
- strbuf_free(&ctx.glsl_ver_ext);
+ strbuf_free(&ctx.glsl_strbufs.glsl_main);
+ strbuf_free(&ctx.glsl_strbufs.glsl_hdr);
+ strbuf_free(&ctx.glsl_strbufs.glsl_ver_ext);
free(ctx.so_names);
free(ctx.temp_ranges);
return false;
@@ -7212,7 +7324,7 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *rctx,
ctx.ssbo_atomic_array_base = 0xffffffff;
ctx.has_sample_input = false;
- if (!allocate_strbuffers(&ctx))
+ if (!allocate_strbuffers(&ctx.glsl_strbufs))
goto fail;
tgsi_iterate_shader(vs_tokens, &ctx.iter);
@@ -7224,10 +7336,15 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *rctx,
handle_io_arrays(&ctx);
- emit_header(&ctx);
- emit_ios(&ctx);
+ emit_header(&ctx, &ctx.glsl_strbufs);
+ ctx.glsl_ver_required = emit_ios(&ctx, &ctx.glsl_strbufs, &ctx.generic_ios,
+ ctx.front_back_color_emitted_flags,
+ &ctx.num_interps, &ctx.has_pervertex,
+ &ctx.force_color_two_side,
+ &ctx.winsys_adjust_y_emitted,
+ &ctx.shadow_samp_mask);
- emit_buf(&ctx, "void main() {\n");
+ emit_buf(&ctx.glsl_strbufs, "void main() {\n");
for (unsigned int i = 0; i < ctx.num_inputs; ++i) {
const char *out_prefix = "";
@@ -7243,13 +7360,13 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *rctx,
}
if (ctx.inputs[i].first == ctx.inputs[i].last) {
- emit_buff(&ctx, "%s%s%s = %s%s%s;\n",
+ emit_buff(&ctx.glsl_strbufs, "%s%s%s = %s%s%s;\n",
out_prefix, ctx.outputs[i].glsl_name, postfix,
in_prefix, ctx.inputs[i].glsl_name, postfix);
} else {
unsigned size = ctx.inputs[i].last == ctx.inputs[i].first + 1;
for (unsigned int k = 0; k < size; ++k) {
- emit_buff(&ctx, "%s%s%s[%d] = %s%s%s[%d];\n",
+ emit_buff(&ctx.glsl_strbufs, "%s%s%s[%d] = %s%s%s[%d];\n",
out_prefix, ctx.outputs[i].glsl_name, postfix, k,
in_prefix, ctx.inputs[i].glsl_name, postfix, k);
}
@@ -7257,20 +7374,20 @@ bool vrend_shader_create_passthrough_tcs(const struct vrend_context *rctx,
}
for (int i = 0; i < 4; ++i)
- emit_buff(&ctx, "gl_TessLevelOuter[%d] = %f;\n", i, tess_factors[i]);
+ emit_buff(&ctx.glsl_strbufs, "gl_TessLevelOuter[%d] = %f;\n", i, tess_factors[i]);
for (int i = 0; i < 2; ++i)
- emit_buff(&ctx, "gl_TessLevelInner[%d] = %f;\n", i, tess_factors[i + 4]);
+ emit_buff(&ctx.glsl_strbufs, "gl_TessLevelInner[%d] = %f;\n", i, tess_factors[i + 4]);
- emit_buf(&ctx, "}\n");
+ emit_buf(&ctx.glsl_strbufs, "}\n");
fill_sinfo(&ctx, sinfo);
- set_strbuffers(rctx, &ctx, shader);
+ set_strbuffers(rctx, &ctx.glsl_strbufs, shader);
return true;
fail:
- strbuf_free(&ctx.glsl_main);
- strbuf_free(&ctx.glsl_hdr);
- strbuf_free(&ctx.glsl_ver_ext);
+ strbuf_free(&ctx.glsl_strbufs.glsl_main);
+ strbuf_free(&ctx.glsl_strbufs.glsl_hdr);
+ strbuf_free(&ctx.glsl_strbufs.glsl_ver_ext);
free(ctx.so_names);
free(ctx.temp_ranges);
return false;
diff --git a/src/vrend_shader.h b/src/vrend_shader.h
index 9f199c46..fd2356b7 100644
--- a/src/vrend_shader.h
+++ b/src/vrend_shader.h
@@ -118,6 +118,7 @@ struct vrend_shader_info {
};
struct vrend_shader_key {
+ bool fs_prim_is_points;
uint32_t coord_replace;
bool invert_fs_origin;
bool pstipple_tex;
diff --git a/src/vrend_strbuf.h b/src/vrend_strbuf.h
index 51c82f89..26296fae 100644
--- a/src/vrend_strbuf.h
+++ b/src/vrend_strbuf.h
@@ -175,7 +175,7 @@ static inline bool strarray_alloc(struct vrend_strarray *sa, int init_alloc)
return true;
}
-static inline bool strarray_addstrbuf(struct vrend_strarray *sa, struct vrend_strbuf *sb)
+static inline bool strarray_addstrbuf(struct vrend_strarray *sa, const struct vrend_strbuf *sb)
{
assert(sa->num_strings < sa->num_alloced_strings);
if (sa->num_strings >= sa->num_alloced_strings)
diff --git a/src/vrend_winsys.c b/src/vrend_winsys.c
new file mode 100644
index 00000000..4e4bbc10
--- /dev/null
+++ b/src/vrend_winsys.c
@@ -0,0 +1,200 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vrend_winsys.h"
+
+#ifdef HAVE_EPOXY_GLX_H
+#include "vrend_winsys_glx.h"
+#endif
+
+#include <stddef.h>
+
+enum {
+ CONTEXT_NONE,
+ CONTEXT_EGL,
+ CONTEXT_GLX
+};
+
+static int use_context = CONTEXT_NONE;
+
+#ifdef HAVE_EPOXY_EGL_H
+struct virgl_egl *egl = NULL;
+struct virgl_gbm *gbm = NULL;
+#endif
+
+#ifdef HAVE_EPOXY_GLX_H
+static struct virgl_glx *glx_info = NULL;
+#endif
+
+int vrend_winsys_init(uint32_t flags, int preferred_fd)
+{
+ if (flags & VIRGL_RENDERER_USE_EGL) {
+#ifdef HAVE_EPOXY_EGL_H
+ /*
+ * If the user specifies a preferred DRM fd and we can't use it, fail. If the user doesn't
+ * specify an fd, it's possible to initialize EGL without one.
+ */
+ gbm = virgl_gbm_init(preferred_fd);
+ if (preferred_fd > 0 && !gbm)
+ return -1;
+
+ egl = virgl_egl_init(gbm, flags & VIRGL_RENDERER_USE_SURFACELESS,
+ flags & VIRGL_RENDERER_USE_GLES);
+ if (!egl) {
+ if (gbm) {
+ virgl_gbm_fini(gbm);
+ gbm = NULL;
+ }
+
+ return -1;
+ }
+
+ use_context = CONTEXT_EGL;
+#else
+ vrend_printf( "EGL is not supported on this platform\n");
+ return -1;
+#endif
+ } else if (flags & VIRGL_RENDERER_USE_GLX) {
+#ifdef HAVE_EPOXY_GLX_H
+ glx_info = virgl_glx_init();
+ if (!glx_info)
+ return -1;
+ use_context = CONTEXT_GLX;
+#else
+ vrend_printf( "GLX is not supported on this platform\n");
+ return -1;
+#endif
+ }
+
+ return 0;
+}
+
+void vrend_winsys_cleanup(void)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (use_context == CONTEXT_EGL) {
+ virgl_egl_destroy(egl);
+ egl = NULL;
+ use_context = CONTEXT_NONE;
+ if (gbm) {
+ virgl_gbm_fini(gbm);
+ gbm = NULL;
+ }
+ }
+#endif
+#ifdef HAVE_EPOXY_GLX_H
+ if (use_context == CONTEXT_GLX) {
+ virgl_glx_destroy(glx_info);
+ glx_info = NULL;
+ use_context = CONTEXT_NONE;
+ }
+#endif
+}
+
+virgl_renderer_gl_context vrend_winsys_create_context(struct virgl_gl_ctx_param *param)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (use_context == CONTEXT_EGL)
+ return virgl_egl_create_context(egl, param);
+#endif
+#ifdef HAVE_EPOXY_GLX_H
+ if (use_context == CONTEXT_GLX)
+ return virgl_glx_create_context(glx_info, param);
+#endif
+ return NULL;
+}
+
+void vrend_winsys_destroy_context(virgl_renderer_gl_context ctx)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (use_context == CONTEXT_EGL) {
+ virgl_egl_destroy_context(egl, ctx);
+ return;
+ }
+#endif
+#ifdef HAVE_EPOXY_GLX_H
+ if (use_context == CONTEXT_GLX) {
+ virgl_glx_destroy_context(glx_info, ctx);
+ return;
+ }
+#endif
+}
+
+int vrend_winsys_make_context_current(virgl_renderer_gl_context ctx)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (use_context == CONTEXT_EGL)
+ return virgl_egl_make_context_current(egl, ctx);
+#endif
+#ifdef HAVE_EPOXY_GLX_H
+ if (use_context == CONTEXT_GLX)
+ return virgl_glx_make_context_current(glx_info, ctx);
+#endif
+ return -1;
+}
+
+int vrend_winsys_has_gl_colorspace(void)
+{
+ bool egl_colorspace = false;
+#ifdef HAVE_EPOXY_EGL_H
+ if (egl)
+ egl_colorspace = virgl_has_egl_khr_gl_colorspace(egl);
+#endif
+ return use_context == CONTEXT_NONE ||
+ use_context == CONTEXT_GLX ||
+ (use_context == CONTEXT_EGL && egl_colorspace);
+}
+
+int vrend_winsys_get_fourcc_for_texture(uint32_t tex_id, uint32_t format, int *fourcc)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (use_context == CONTEXT_EGL)
+ return virgl_egl_get_fourcc_for_texture(egl, tex_id, format, fourcc);
+#endif
+ return 0;
+}
+
+int vrend_winsys_get_fd_for_texture(uint32_t tex_id, int *fd)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (!egl)
+ return -1;
+
+ return virgl_egl_get_fd_for_texture(egl, tex_id, fd);
+#else
+ return -1;
+#endif
+}
+
+int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset)
+{
+#ifdef HAVE_EPOXY_EGL_H
+ if (!egl)
+ return -1;
+
+ return virgl_egl_get_fd_for_texture2(egl, tex_id, fd, stride, offset);
+#else
+ return -1;
+#endif
+}
diff --git a/src/vrend_winsys.h b/src/vrend_winsys.h
new file mode 100644
index 00000000..24b1e5bb
--- /dev/null
+++ b/src/vrend_winsys.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VREND_WINSYS_H
+#define VREND_WINSYS_H
+
+#include "config.h"
+
+#ifdef HAVE_EPOXY_EGL_H
+#include "vrend_winsys_gbm.h"
+#include "vrend_winsys_egl.h"
+#endif
+
+#include "virglrenderer.h"
+
+struct virgl_gl_ctx_param;
+
+#ifdef HAVE_EPOXY_EGL_H
+extern struct virgl_egl *egl;
+extern struct virgl_gbm *gbm;
+#endif
+
+int vrend_winsys_init(uint32_t flags, int preferred_fd);
+void vrend_winsys_cleanup(void);
+
+virgl_renderer_gl_context vrend_winsys_create_context(struct virgl_gl_ctx_param *param);
+void vrend_winsys_destroy_context(virgl_renderer_gl_context ctx);
+int vrend_winsys_make_context_current(virgl_renderer_gl_context ctx);
+
+int vrend_winsys_has_gl_colorspace(void);
+
+int vrend_winsys_get_fourcc_for_texture(uint32_t tex_id, uint32_t format, int *fourcc);
+int vrend_winsys_get_fd_for_texture(uint32_t tex_id, int *fd);
+int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset);
+
+#endif /* VREND_WINSYS_H */
diff --git a/src/virgl_egl_context.c b/src/vrend_winsys_egl.c
index 3949c224..6a6e7f77 100644
--- a/src/virgl_egl_context.c
+++ b/src/vrend_winsys_egl.c
@@ -34,15 +34,14 @@
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
-#include <epoxy/egl.h>
#include <xf86drm.h>
#include "util/u_memory.h"
#include "virglrenderer.h"
-#include "virgl_egl.h"
+#include "vrend_winsys_egl.h"
#include "virgl_hw.h"
-#include "virgl_gbm.h"
+#include "vrend_winsys_gbm.h"
#include "virgl_util.h"
#define EGL_KHR_SURFACELESS_CONTEXT BIT(0)
@@ -53,7 +52,7 @@
#define EGL_KHR_GL_COLORSPACE BIT(5)
#define EGL_EXT_IMAGE_DMA_BUF_IMPORT BIT(6)
#define EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS BIT(7)
-#define EGL_KHR_FENCE_SYNC BIT(8)
+#define EGL_KHR_FENCE_SYNC_ANDROID BIT(8)
static const struct {
uint32_t bit;
@@ -66,7 +65,7 @@ static const struct {
{ EGL_KHR_GL_COLORSPACE, "EGL_KHR_gl_colorspace" },
{ EGL_EXT_IMAGE_DMA_BUF_IMPORT, "EGL_EXT_image_dma_buf_import" },
{ EGL_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS, "EGL_EXT_image_dma_buf_import_modifiers" },
- { EGL_KHR_FENCE_SYNC, "EGL_KHR_fence_sync"}
+ { EGL_KHR_FENCE_SYNC_ANDROID, "EGL_ANDROID_native_fence_sync"}
};
struct virgl_egl {
@@ -75,7 +74,7 @@ struct virgl_egl {
EGLConfig egl_conf;
EGLContext egl_ctx;
uint32_t extension_bits;
- bool need_fence_and_wait_external;
+ EGLSyncKHR signaled_fence;
};
static bool virgl_egl_has_extension_in_string(const char *haystack, const char *needle)
@@ -218,10 +217,6 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
if (virgl_egl_init_extensions(egl, extensions))
goto fail;
- // ARM Mali platforms need explicit synchronization prior to mapping.
- if (!strcmp(eglQueryString(egl->egl_display, EGL_VENDOR), "ARM"))
- egl->need_fence_and_wait_external = true;
-
if (gles)
api = EGL_OPENGL_ES_API;
else
@@ -242,6 +237,16 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
eglMakeCurrent(egl->egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
egl->egl_ctx);
+
+ if (gles && virgl_egl_supports_fences(egl)) {
+ egl->signaled_fence = eglCreateSyncKHR(egl->egl_display,
+ EGL_SYNC_NATIVE_FENCE_ANDROID, NULL);
+ if (!egl->signaled_fence) {
+ vrend_printf("Failed to create signaled fence");
+ goto fail;
+ }
+ }
+
return egl;
fail:
@@ -251,6 +256,9 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
void virgl_egl_destroy(struct virgl_egl *egl)
{
+ if (egl->signaled_fence) {
+ eglDestroySyncKHR(egl->egl_display, egl->signaled_fence);
+ }
eglMakeCurrent(egl->egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
EGL_NO_CONTEXT);
eglDestroyContext(egl->egl_display, egl->egl_ctx);
@@ -513,30 +521,38 @@ void virgl_egl_image_destroy(struct virgl_egl *egl, void *image)
}
#endif
-bool virgl_egl_need_fence_and_wait_external(struct virgl_egl *egl)
+bool virgl_egl_supports_fences(struct virgl_egl *egl)
{
- return (egl && egl->need_fence_and_wait_external);
+ return (egl && has_bit(egl->extension_bits, EGL_KHR_FENCE_SYNC_ANDROID));
}
-void *virgl_egl_fence(struct virgl_egl *egl)
+EGLSyncKHR virgl_egl_fence_create(struct virgl_egl *egl)
{
- const EGLint attrib_list[] = {EGL_SYNC_CONDITION_KHR,
- EGL_SYNC_PRIOR_COMMANDS_COMPLETE_KHR,
- EGL_NONE};
- EGLSyncKHR fence = EGL_NO_SYNC_KHR;
-
- if (!egl || !has_bit(egl->extension_bits, EGL_KHR_FENCE_SYNC)) {
- return (void *)fence;
+ if (!egl || !has_bit(egl->extension_bits, EGL_KHR_FENCE_SYNC_ANDROID)) {
+ return EGL_NO_SYNC_KHR;
}
- return (void *)eglCreateSyncKHR(egl->egl_display, EGL_SYNC_FENCE_KHR, attrib_list);
+ return eglCreateSyncKHR(egl->egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, NULL);
}
-void virgl_egl_wait_fence(struct virgl_egl *egl, void* sync)
-{
- EGLSyncKHR fence = (EGLSyncKHR) sync;
- if (fence == EGL_NO_SYNC_KHR)
- return;
- eglWaitSyncKHR(egl->egl_display, fence, 0);
+void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence) {
eglDestroySyncKHR(egl->egl_display, fence);
}
+
+bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, uint64_t timeout)
+{
+ EGLint ret = eglClientWaitSyncKHR(egl->egl_display, fence, 0, timeout);
+ if (ret == EGL_FALSE) {
+ vrend_printf("wait sync failed\n");
+ }
+ return ret != EGL_TIMEOUT_EXPIRED_KHR;
+}
+
+bool virgl_egl_export_signaled_fence(struct virgl_egl *egl, int *out_fd) {
+ return virgl_egl_export_fence(egl, egl->signaled_fence, out_fd);
+}
+
+bool virgl_egl_export_fence(struct virgl_egl *egl, EGLSyncKHR fence, int *out_fd) {
+ *out_fd = eglDupNativeFenceFDANDROID(egl->egl_display, fence);
+ return *out_fd != EGL_NO_NATIVE_FENCE_FD_ANDROID;
+}
diff --git a/src/virgl_egl.h b/src/vrend_winsys_egl.h
index 90a813b5..1fb0ccbc 100644
--- a/src/virgl_egl.h
+++ b/src/vrend_winsys_egl.h
@@ -26,6 +26,9 @@
#include "virglrenderer.h"
#include "vrend_renderer.h"
+
+#include <epoxy/egl.h>
+
struct virgl_egl;
struct virgl_gbm;
struct gbm_bo;
@@ -57,7 +60,10 @@ void *virgl_egl_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo);
void *virgl_egl_aux_plane_image_from_dmabuf(struct virgl_egl *egl, struct gbm_bo *bo, int plane);
void virgl_egl_image_destroy(struct virgl_egl *egl, void *image);
-bool virgl_egl_need_fence_and_wait_external(struct virgl_egl *egl);
-void *virgl_egl_fence(struct virgl_egl *egl);
-void virgl_egl_wait_fence(struct virgl_egl *egl, void* fence);
+bool virgl_egl_supports_fences(struct virgl_egl *egl);
+EGLSyncKHR virgl_egl_fence_create(struct virgl_egl *egl);
+void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence);
+bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, uint64_t timeout);
+bool virgl_egl_export_signaled_fence(struct virgl_egl *egl, int *out_fd);
+bool virgl_egl_export_fence(struct virgl_egl *egl, EGLSyncKHR fence, int *out_fd);
#endif
diff --git a/src/virgl_gbm.c b/src/vrend_winsys_gbm.c
index 7a0499ed..578086d4 100644
--- a/src/virgl_gbm.c
+++ b/src/vrend_winsys_gbm.c
@@ -38,7 +38,7 @@
#include "util/u_memory.h"
#include "pipe/p_state.h"
-#include "virgl_gbm.h"
+#include "vrend_winsys_gbm.h"
#include "virgl_hw.h"
#include "vrend_debug.h"
@@ -75,6 +75,13 @@ static const struct planar_layout packed_4bpp_layout = {
.bytes_per_pixel = { 4 }
};
+static const struct planar_layout packed_8bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 8 }
+};
+
static const struct planar_layout biplanar_yuv_420_layout = {
.num_planes = 2,
.horizontal_subsampling = { 1, 2 },
@@ -93,6 +100,7 @@ static const struct format_conversion conversions[] = {
{ GBM_FORMAT_RGB565, VIRGL_FORMAT_B5G6R5_UNORM },
{ GBM_FORMAT_ARGB8888, VIRGL_FORMAT_B8G8R8A8_UNORM },
{ GBM_FORMAT_XRGB8888, VIRGL_FORMAT_B8G8R8X8_UNORM },
+ { GBM_FORMAT_ABGR16161616F, VIRGL_FORMAT_R16G16B16A16_FLOAT },
{ GBM_FORMAT_NV12, VIRGL_FORMAT_NV12 },
{ GBM_FORMAT_ABGR8888, VIRGL_FORMAT_R8G8B8A8_UNORM},
{ GBM_FORMAT_XBGR8888, VIRGL_FORMAT_R8G8B8X8_UNORM},
@@ -178,6 +186,8 @@ static const struct planar_layout *layout_from_format(uint32_t format)
case GBM_FORMAT_ABGR8888:
case GBM_FORMAT_XBGR8888:
return &packed_4bpp_layout;
+ case GBM_FORMAT_ABGR16161616F:
+ return &packed_8bpp_layout;
default:
return NULL;
}
@@ -326,7 +336,13 @@ int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, const struct iovec
host_map_stride0 = 0;
uint32_t map_flags = (direction == VIRGL_TRANSFER_TO_HOST) ? GBM_BO_TRANSFER_WRITE :
GBM_BO_TRANSFER_READ;
- void *addr = gbm_bo_map2(bo, 0, 0, width, height, map_flags, &host_map_stride0, &map_data, 0);
+ /* XXX remove this and map just the region when single plane and GBM honors the region */
+ if (direction == VIRGL_TRANSFER_TO_HOST &&
+ !(info->box->x == 0 && info->box->y == 0 &&
+ info->box->width == width && info->box->height == height))
+ map_flags |= GBM_BO_TRANSFER_READ;
+
+ void *addr = gbm_bo_map(bo, 0, 0, width, height, map_flags, &host_map_stride0, &map_data);
if (!addr)
return -1;
diff --git a/src/virgl_gbm.h b/src/vrend_winsys_gbm.h
index 53b7d0b5..dd5ce365 100644
--- a/src/virgl_gbm.h
+++ b/src/vrend_winsys_gbm.h
@@ -35,6 +35,27 @@
#define VIRGL_GBM_MAX_PLANES 4
#endif
+/* GBM_FORMAT_ABGR16161616F was added since mesa 20.0 */
+#ifndef GBM_FORMAT_ABGR16161616F
+#define GBM_FORMAT_ABGR16161616F __gbm_fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+#endif
+
+#ifndef MINIGBM
+
+#define GBM_BO_USE_TEXTURING (1 << 5),
+#define GBM_BO_USE_CAMERA_WRITE (1 << 6)
+#define GBM_BO_USE_CAMERA_READ (1 << 7)
+#define GBM_BO_USE_PROTECTED (1 << 8)
+#define GBM_BO_USE_SW_READ_OFTEN (1 << 9)
+#define GBM_BO_USE_SW_READ_RARELY (1 << 10)
+#define GBM_BO_USE_SW_WRITE_OFTEN (1 << 11)
+#define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
+#define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
+#define GBM_BO_USE_HW_VIDEO_ENCODER (1 << 14)
+#define GBM_TEST_ALLOC (1 << 15)
+
+#endif
+
/*
* If fd >= 0, virglrenderer owns the fd since it was opened via a rendernode
* query. If fd < 0, the gbm device was opened with the fd provided by the
diff --git a/src/virgl_glx_context.c b/src/vrend_winsys_glx.c
index 20630caf..23bb9834 100644
--- a/src/virgl_glx_context.c
+++ b/src/vrend_winsys_glx.c
@@ -29,7 +29,7 @@
#include <epoxy/glx.h>
#include "virglrenderer.h"
-#include "virgl_glx.h"
+#include "vrend_winsys_glx.h"
struct virgl_glx {
Display *display;
diff --git a/src/virgl_glx.h b/src/vrend_winsys_glx.h
index e5cecbac..e5cecbac 100644
--- a/src/virgl_glx.h
+++ b/src/vrend_winsys_glx.h
diff --git a/tests/test_fuzzer_formats.c b/tests/test_fuzzer_formats.c
index 9ad59b9d..154a2e58 100644
--- a/tests/test_fuzzer_formats.c
+++ b/tests/test_fuzzer_formats.c
@@ -36,7 +36,7 @@
#include <string.h>
#include "virgl_hw.h"
-#include "virgl_egl.h"
+#include "vrend_winsys_egl.h"
#include "virglrenderer.h"
#include "virgl_protocol.h"
#include "os/os_misc.h"
diff --git a/tests/test_virgl_init.c b/tests/test_virgl_init.c
index a6ff8fdc..e61502c1 100644
--- a/tests/test_virgl_init.c
+++ b/tests/test_virgl_init.c
@@ -70,6 +70,18 @@ START_TEST(virgl_init_cbs_wrong_ver)
}
END_TEST
+START_TEST(virgl_init_cleanup_without_init)
+{
+ virgl_renderer_cleanup(&mystruct);
+}
+END_TEST
+
+START_TEST(virgl_init_reset_without_init)
+{
+ virgl_renderer_reset();
+}
+END_TEST
+
START_TEST(virgl_init_egl)
{
int ret;
@@ -81,6 +93,43 @@ START_TEST(virgl_init_egl)
END_TEST
+START_TEST(virgl_init_egl_double_init)
+{
+ int ret;
+ test_cbs.version = 1;
+ ret = virgl_renderer_init(&mystruct, context_flags, &test_cbs);
+ ck_assert_int_eq(ret, 0);
+ ret = virgl_renderer_init(&mystruct, context_flags, &test_cbs);
+ ck_assert_int_eq(ret, 0);
+ virgl_renderer_cleanup(&mystruct);
+}
+END_TEST
+
+START_TEST(virgl_init_egl_double_init_conflict_args)
+{
+ struct myinfo_struct local_struct;
+ struct virgl_renderer_callbacks local_cbs;
+ int ret;
+
+ test_cbs.version = 1;
+ ret = virgl_renderer_init(&mystruct, context_flags, &test_cbs);
+ ck_assert_int_eq(ret, 0);
+
+ ret = virgl_renderer_init(&local_struct, context_flags, &test_cbs);
+ ck_assert_int_eq(ret, -EBUSY);
+
+ ret = virgl_renderer_init(&mystruct, 0, &test_cbs);
+ ck_assert_int_eq(ret, -EBUSY);
+
+ memset(&local_cbs, 0, sizeof(local_cbs));
+ local_cbs.version = 1;
+ ret = virgl_renderer_init(&mystruct, context_flags, &local_cbs);
+ ck_assert_int_eq(ret, -EBUSY);
+
+ virgl_renderer_cleanup(&mystruct);
+}
+END_TEST
+
START_TEST(virgl_init_egl_create_ctx)
{
int ret;
@@ -456,7 +505,11 @@ static Suite *virgl_init_suite(void)
tcase_add_test(tc_core, virgl_init_no_cbs);
tcase_add_test(tc_core, virgl_init_no_cookie);
tcase_add_test(tc_core, virgl_init_cbs_wrong_ver);
+ tcase_add_test(tc_core, virgl_init_cleanup_without_init);
+ tcase_add_test(tc_core, virgl_init_reset_without_init);
tcase_add_test(tc_core, virgl_init_egl);
+ tcase_add_test(tc_core, virgl_init_egl_double_init);
+ tcase_add_test(tc_core, virgl_init_egl_double_init_conflict_args);
tcase_add_test(tc_core, virgl_init_egl_create_ctx);
tcase_add_test(tc_core, virgl_init_egl_create_ctx_0);
tcase_add_test(tc_core, virgl_init_egl_destroy_ctx_illegal);