aboutsummaryrefslogtreecommitdiff
path: root/src/venus/vkr_ring.h
blob: 8dc83bb5bd2ee94874cec5afb6f2722f681eff1c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
 * Copyright 2021 Google LLC
 * SPDX-License-Identifier: MIT
 */

#ifndef VKR_RING_H
#define VKR_RING_H

#include "vkr_common.h"

/* We read from the ring buffer to a temporary buffer for
 * virgl_context::submit_cmd.  Until that is changed, we want to put a limit
 * on the size of the temporary buffer.  It also makes no sense to have huge
 * rings.
 *
 * This must not exceed UINT32_MAX because the ring head and tail are 32-bit.
 */
#define VKR_RING_BUFFER_MAX_SIZE (16u * 1024 * 1024)

/* The layout of a ring in a virgl_resource.  This is parsed and discarded by
 * vkr_ring_create.
 */
struct vkr_ring_layout {
   struct virgl_resource *resource;

   struct vkr_region head;
   struct vkr_region tail;
   struct vkr_region status;
   struct vkr_region buffer;
   struct vkr_region extra;
};

static_assert(ATOMIC_INT_LOCK_FREE == 2 && sizeof(atomic_uint) == 4,
              "vkr_ring_control requires lock-free 32-bit atomic_uint");

/* the control region of a ring */
struct vkr_ring_control {
   /* Pointers to ring head, tail, and status.
    *
    * Clients increment the tail after commands are added.  We increment the
    * head after commands are executed.  The status is updated when there is a
    * status change to the ring thread.
    */
   volatile atomic_uint *head;
   const volatile atomic_uint *tail;
   volatile atomic_uint *status;
};

/* the buffer region of a ring */
struct vkr_ring_buffer {
   /* the base of the region in the resource */
   int base_iov_index;
   size_t base_iov_offset;

   uint32_t size;
   uint32_t mask;

   /* The current offset in the buffer region.  It is free-running and must be
    * masked to be between [0, size).
    */
   uint32_t cur;

   /* The current iov and iov offset in the resource. */
   const struct iovec *cur_iov;
   int cur_iov_index;
   size_t cur_iov_offset;
};

/* the extra region of a ring */
struct vkr_ring_extra {
   /* the base of the region in the resource */
   int base_iov_index;
   size_t base_iov_offset;

   /* used for offset validation */
   struct vkr_region region;

   /* cache the latest offset->pointer result */
   size_t cached_offset;
   volatile atomic_uint *cached_data;
};

struct vkr_ring {
   /* used by the caller */
   vkr_object_id id;
   struct list_head head;

   /* ring regions */
   struct virgl_resource *resource;
   struct vkr_ring_control control;
   struct vkr_ring_buffer buffer;
   struct vkr_ring_extra extra;

   /* ring thread */
   struct virgl_context *context;
   uint64_t idle_timeout;
   void *cmd;

   mtx_t mutex;
   cnd_t cond;
   thrd_t thread;
   atomic_bool started;
   atomic_bool pending_notify;
};

struct vkr_ring *
vkr_ring_create(const struct vkr_ring_layout *layout,
                struct virgl_context *ctx,
                uint64_t idle_timeout);

void
vkr_ring_destroy(struct vkr_ring *ring);

void
vkr_ring_start(struct vkr_ring *ring);

bool
vkr_ring_stop(struct vkr_ring *ring);

void
vkr_ring_notify(struct vkr_ring *ring);

bool
vkr_ring_write_extra(struct vkr_ring *ring, size_t offset, uint32_t val);

#endif /* VKR_RING_H */