aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChia-I Wu <olvaffe@gmail.com>2021-08-25 17:02:10 -0700
committerChia-I Wu <olvaffe@gmail.com>2021-09-03 09:28:54 -0700
commit6691ebec9f1d325289aba54eaedc44e219f6dc84 (patch)
tree09fe1d9a75aec3ec5d50e180f80216f906096142
parent94a9cbe4efa29b2eec442725e326e1bb55271353 (diff)
downloadvirglrenderer-6691ebec9f1d325289aba54eaedc44e219f6dc84.tar.gz
vkr: add iov support to vkr_ring_buffer
vkr_ring_read_buffer now supports reading from a ring buffer that is on top of iov. It is overly complex though. For further optimization and simplication, we should consider requiring a logically contiguous virgl_resource. Possible options are requiring a physically contiguous guest memory (this can have other use cases) or requiring a host VkDeviceMemory (already doable, but meh). We also use the chance to replace size_t by uint32_t in vkr_ring_read_buffer. No functional difference. Signed-off-by: Chia-I Wu <olvaffe@gmail.com> Reviewed-by: Ryan Neph <ryanneph@google.com> Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
-rw-r--r--src/venus/vkr_ring.c69
-rw-r--r--src/venus/vkr_ring.h9
2 files changed, 67 insertions, 11 deletions
diff --git a/src/venus/vkr_ring.c b/src/venus/vkr_ring.c
index bb3a64e6..c6aaeeb2 100644
--- a/src/venus/vkr_ring.c
+++ b/src/venus/vkr_ring.c
@@ -61,14 +61,18 @@ vkr_ring_init_buffer(struct vkr_ring *ring, const struct vkr_ring_layout *layout
{
struct vkr_ring_buffer *buf = &ring->buffer;
+ const struct iovec *base_iov =
+ seek_resource(layout->resource, 0, layout->buffer.begin, &buf->base_iov_index,
+ &buf->base_iov_offset);
+
buf->size = vkr_region_size(&layout->buffer);
assert(buf->size && util_is_power_of_two(buf->size));
buf->mask = buf->size - 1;
buf->cur = 0;
-
- /* TODO iov support */
- buf->data = get_resource_pointer(layout->resource, 0, layout->buffer.begin);
+ buf->cur_iov = base_iov;
+ buf->cur_iov_index = buf->base_iov_index;
+ buf->cur_iov_offset = buf->base_iov_offset;
}
static bool
@@ -111,22 +115,67 @@ vkr_ring_store_status(struct vkr_ring *ring, uint32_t status)
atomic_store_explicit(ring->control.status, status, memory_order_seq_cst);
}
+/* TODO consider requiring virgl_resource to be logically contiguous */
static void
-vkr_ring_read_buffer(struct vkr_ring *ring, void *data, size_t size)
+vkr_ring_read_buffer(struct vkr_ring *ring, void *data, uint32_t size)
{
struct vkr_ring_buffer *buf = &ring->buffer;
+ const struct virgl_resource *res = ring->resource;
- const size_t offset = buf->cur & buf->mask;
assert(size <= buf->size);
- if (offset + size <= buf->size) {
- memcpy(data, buf->data + offset, size);
+ const uint32_t buf_offset = buf->cur & buf->mask;
+ const uint32_t buf_avail = buf->size - buf_offset;
+ const bool wrap = size >= buf_avail;
+
+ uint32_t read_size;
+ uint32_t wrap_size;
+ if (!wrap) {
+ read_size = size;
+ wrap_size = 0;
+ } else {
+ read_size = buf_avail;
+ /* When size == buf_avail, wrap is true but wrap_size is 0. We want to
+ * wrap because it seems slightly faster on the next call. Besides,
+ * seek_resource does not support seeking to end-of-resource which could
+ * happen if we don't wrap and the buffer region end coincides with the
+ * resource end.
+ */
+ wrap_size = size - buf_avail;
+ }
+
+ /* do the reads */
+ if (read_size <= buf->cur_iov->iov_len - buf->cur_iov_offset) {
+ const void *src = (const uint8_t *)buf->cur_iov->iov_base + buf->cur_iov_offset;
+ memcpy(data, src, read_size);
+
+ /* fast path */
+ if (!wrap) {
+ assert(!wrap_size);
+ buf->cur += read_size;
+ buf->cur_iov_offset += read_size;
+ return;
+ }
} else {
- const size_t s = buf->size - offset;
- memcpy(data, buf->data + offset, s);
- memcpy((uint8_t *)data + s, buf->data, size - s);
+ vrend_read_from_iovec(buf->cur_iov, res->iov_count - buf->cur_iov_index,
+ buf->cur_iov_offset, data, read_size);
+ }
+
+ if (wrap_size) {
+ vrend_read_from_iovec(res->iov + buf->base_iov_index,
+ res->iov_count - buf->base_iov_index, buf->base_iov_offset,
+ (char *)data + read_size, wrap_size);
}
+ /* advance cur */
buf->cur += size;
+ if (!wrap) {
+ buf->cur_iov = seek_resource(res, buf->cur_iov_index, buf->cur_iov_offset + size,
+ &buf->cur_iov_index, &buf->cur_iov_offset);
+ } else {
+ buf->cur_iov =
+ seek_resource(res, buf->base_iov_index, buf->base_iov_offset + wrap_size,
+ &buf->cur_iov_index, &buf->cur_iov_offset);
+ }
}
struct vkr_ring *
diff --git a/src/venus/vkr_ring.h b/src/venus/vkr_ring.h
index d964340a..ee87d2d0 100644
--- a/src/venus/vkr_ring.h
+++ b/src/venus/vkr_ring.h
@@ -42,6 +42,10 @@ struct vkr_ring_control {
/* the buffer region of a ring */
struct vkr_ring_buffer {
+ /* the base of the region in the resource */
+ int base_iov_index;
+ size_t base_iov_offset;
+
uint32_t size;
uint32_t mask;
@@ -50,7 +54,10 @@ struct vkr_ring_buffer {
*/
uint32_t cur;
- const uint8_t *data;
+ /* The current iov and iov offset in the resource. */
+ const struct iovec *cur_iov;
+ int cur_iov_index;
+ size_t cur_iov_offset;
};
/* the extra region of a ring */