summaryrefslogtreecommitdiff
path: root/common/include/uapi/gpu/arm/midgard/csf
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2021-07-12 20:18:46 +0100
committerSidath Senanayake <sidaths@google.com>2021-07-12 20:18:46 +0100
commitf573fd96c0d6b6dfb2a91605a211dd15cb3153d4 (patch)
treeab706afb1df5131b258a6ff01ed25cdd7bd16825 /common/include/uapi/gpu/arm/midgard/csf
parentd31cf595e359c7bde5c272f5b729c86277148e52 (diff)
parentfca8613cfcf585bf9113dca96a05daea9fd89794 (diff)
downloadgpu-f573fd96c0d6b6dfb2a91605a211dd15cb3153d4.tar.gz
Merge r31p0 from upstream into android-gs-pixel-5.10
This commit updates the Mali KMD to version r31p0 from commit fca8613cfcf585bf9113dca96a05daea9fd89794 Bug: 185900681 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: I3c814b6145b10beee3d0fecedb74a6225a09a858
Diffstat (limited to 'common/include/uapi/gpu/arm/midgard/csf')
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h765
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_control_registers.h32
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_registers.h1414
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h390
4 files changed, 2601 insertions, 0 deletions
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h b/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h
new file mode 100644
index 0000000..7fa874b
--- /dev/null
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h
@@ -0,0 +1,765 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_BASE_CSF_KERNEL_H_
+#define _UAPI_BASE_CSF_KERNEL_H_
+
+#include <linux/types.h>
+
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
+/* Userspace is not allowed to free this memory.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
+
+#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* IN/OUT */
+/* Should be cached on the CPU, returned if actually cached
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the alloc
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Protected memory
+ */
+#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/* CSF event memory
+ *
+ * If Outer shareable coherence is not specified or not available, then on
+ * allocation kbase will automatically use the uncached GPU mapping.
+ * There is no need for the client to specify BASE_MEM_UNCACHED_GPU
+ * themselves when allocating memory with the BASE_MEM_CSF_EVENT flag.
+ *
+ * This memory requires a permanent mapping
+ *
+ * See also kbase_reg_needs_kernel_mapping()
+ */
+#define BASE_MEM_CSF_EVENT ((base_mem_alloc_flags)1 << 19)
+
+#define BASE_MEM_RESERVED_BIT_20 ((base_mem_alloc_flags)1 << 20)
+
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu
+ * mode. Some components within the GPU might only be able to access memory
+ * that is GPU cacheable. Refer to the specific GPU implementation for more
+ * details. The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
+/*
+ * Bits [22:25] for group_id (0~15).
+ *
+ * base_mem_group_id_set() should be used to pack a memory group ID into a
+ * base_mem_alloc_flags value instead of accessing the bits directly.
+ * base_mem_group_id_get() should be used to extract the memory group ID from
+ * a base_mem_alloc_flags value.
+ */
+#define BASEP_MEM_GROUP_ID_SHIFT 22
+#define BASE_MEM_GROUP_ID_MASK \
+ ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+
+/* Must do CPU cache maintenance when imported memory is mapped/unmapped
+ * on GPU. Currently applicable to dma-buf type only.
+ */
+#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26)
+
+/* OUT */
+/* Kernel side cache sync ops required */
+#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28)
+
+/* Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 29
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/* A mask of all currently reserved flags
+ */
+#define BASE_MEM_FLAGS_RESERVED \
+ BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_20
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+/* reserved handles ..-47<<PAGE_SHIFT> for future special handles */
+#define BASEP_MEM_CSF_USER_REG_PAGE_HANDLE (47ul << 12)
+#define BASEP_MEM_CSF_USER_IO_PAGES_HANDLE (48ul << 12)
+#define BASE_MEM_COOKIE_BASE (64ul << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
+ BASE_MEM_COOKIE_BASE)
+
+#define KBASE_CSF_NUM_USER_IO_PAGES_HANDLE \
+ ((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> \
+ LOCAL_PAGE_SHIFT)
+
+/**
+ * Valid set of just-in-time memory allocation flags
+ */
+#define BASE_JIT_ALLOC_VALID_FLAGS ((__u8)0)
+
+/* Flags to pass to ::base_context_init.
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+typedef __u32 base_context_create_flags;
+
+/* No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/* Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/* Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+ ((base_context_create_flags)1 << 1)
+
+/* Base context creates a CSF event notification thread.
+ *
+ * The creation of a CSF event notification thread is conditional but
+ * mandatory for the handling of CSF events.
+ */
+#define BASE_CONTEXT_CSF_EVENT_THREAD ((base_context_create_flags)1 << 2)
+
+/* Bit-shift used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3)
+
+/* Bitmask used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+ ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+
+/* Bitpattern describing the base_context_create_flags that can be
+ * passed to the kernel
+ */
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+ (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \
+ BASEP_CONTEXT_MMU_GROUP_ID_MASK)
+
+/* Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (BASE_CONTEXT_CCTX_EMBEDDED | \
+ BASE_CONTEXT_CSF_EVENT_THREAD | \
+ BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST)
+ */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact.
+ */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+/* Enable KBase tracepoints for CSF builds */
+#define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1 << 2)
+
+/* Enable additional CSF Firmware side tracepoints */
+#define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1 << 3)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+ BASE_TLSTREAM_JOB_DUMPING_ENABLED | \
+ BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | \
+ BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS)
+
+/* Number of pages mapped into the process address space for a bound GPU
+ * command queue. A pair of input/output pages and a Hw doorbell page
+ * are mapped to enable direct submission of commands to Hw.
+ */
+#define BASEP_QUEUE_NR_MMAP_USER_PAGES ((size_t)3)
+
+#define BASE_QUEUE_MAX_PRIORITY (15U)
+
+/* CQS Sync object is an array of __u32 event_mem[2], error field index is 1 */
+#define BASEP_EVENT_VAL_INDEX (0U)
+#define BASEP_EVENT_ERR_INDEX (1U)
+
+/* The upper limit for number of objects that could be waited/set per command.
+ * This limit is now enforced as internally the error inherit inputs are
+ * converted to 32-bit flags in a __u32 variable occupying a previously padding
+ * field.
+ */
+#define BASEP_KCPU_CQS_MAX_NUM_OBJS ((size_t)32)
+
+#if MALI_UNIT_TEST
+/**
+ * enum base_kcpu_command_type - Kernel CPU queue command type.
+ * @BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL: fence_signal,
+ * @BASE_KCPU_COMMAND_TYPE_FENCE_WAIT: fence_wait,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT: cqs_wait,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_SET: cqs_set,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION: cqs_wait_operation,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION: cqs_set_operation,
+ * @BASE_KCPU_COMMAND_TYPE_MAP_IMPORT: map_import,
+ * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT: unmap_import,
+ * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: unmap_import_force,
+ * @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC: jit_alloc,
+ * @BASE_KCPU_COMMAND_TYPE_JIT_FREE: jit_free,
+ * @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: group_suspend,
+ * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier,
+ * @BASE_KCPU_COMMAND_TYPE_SAMPLE_TIME: sample_time,
+ */
+enum base_kcpu_command_type {
+ BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL,
+ BASE_KCPU_COMMAND_TYPE_FENCE_WAIT,
+ BASE_KCPU_COMMAND_TYPE_CQS_WAIT,
+ BASE_KCPU_COMMAND_TYPE_CQS_SET,
+ BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION,
+ BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION,
+ BASE_KCPU_COMMAND_TYPE_MAP_IMPORT,
+ BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT,
+ BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE,
+ BASE_KCPU_COMMAND_TYPE_JIT_ALLOC,
+ BASE_KCPU_COMMAND_TYPE_JIT_FREE,
+ BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND,
+ BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER,
+ BASE_KCPU_COMMAND_TYPE_SAMPLE_TIME,
+};
+#else
+/**
+ * enum base_kcpu_command_type - Kernel CPU queue command type.
+ * @BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL: fence_signal,
+ * @BASE_KCPU_COMMAND_TYPE_FENCE_WAIT: fence_wait,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT: cqs_wait,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_SET: cqs_set,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION: cqs_wait_operation,
+ * @BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION: cqs_set_operation,
+ * @BASE_KCPU_COMMAND_TYPE_MAP_IMPORT: map_import,
+ * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT: unmap_import,
+ * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: unmap_import_force,
+ * @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC: jit_alloc,
+ * @BASE_KCPU_COMMAND_TYPE_JIT_FREE: jit_free,
+ * @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: group_suspend,
+ * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier,
+ */
+enum base_kcpu_command_type {
+ BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL,
+ BASE_KCPU_COMMAND_TYPE_FENCE_WAIT,
+ BASE_KCPU_COMMAND_TYPE_CQS_WAIT,
+ BASE_KCPU_COMMAND_TYPE_CQS_SET,
+ BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION,
+ BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION,
+ BASE_KCPU_COMMAND_TYPE_MAP_IMPORT,
+ BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT,
+ BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE,
+ BASE_KCPU_COMMAND_TYPE_JIT_ALLOC,
+ BASE_KCPU_COMMAND_TYPE_JIT_FREE,
+ BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND,
+ BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER,
+};
+#endif /* MALI_UNIT_TEST */
+
+/**
+ * enum base_queue_group_priority - Priority of a GPU Command Queue Group.
+ * @BASE_QUEUE_GROUP_PRIORITY_HIGH: GPU Command Queue Group is of high
+ * priority.
+ * @BASE_QUEUE_GROUP_PRIORITY_MEDIUM: GPU Command Queue Group is of medium
+ * priority.
+ * @BASE_QUEUE_GROUP_PRIORITY_LOW: GPU Command Queue Group is of low
+ * priority.
+ * @BASE_QUEUE_GROUP_PRIORITY_REALTIME: GPU Command Queue Group is of real-time
+ * priority.
+ * @BASE_QUEUE_GROUP_PRIORITY_COUNT: Number of GPU Command Queue Group
+ * priority levels.
+ *
+ * Currently this is in order of highest to lowest, but if new levels are added
+ * then those new levels may be out of order to preserve the ABI compatibility
+ * with previous releases. At that point, ensure assignment to
+ * the 'priority' member in &kbase_queue_group is updated to ensure it remains
+ * a linear ordering.
+ *
+ * There should be no gaps in the enum, otherwise use of
+ * BASE_QUEUE_GROUP_PRIORITY_COUNT in kbase must be updated.
+ */
+enum base_queue_group_priority {
+ BASE_QUEUE_GROUP_PRIORITY_HIGH = 0,
+ BASE_QUEUE_GROUP_PRIORITY_MEDIUM,
+ BASE_QUEUE_GROUP_PRIORITY_LOW,
+ BASE_QUEUE_GROUP_PRIORITY_REALTIME,
+ BASE_QUEUE_GROUP_PRIORITY_COUNT
+};
+
+struct base_kcpu_command_fence_info {
+ __u64 fence;
+};
+
+struct base_cqs_wait_info {
+ __u64 addr;
+ __u32 val;
+ __u32 padding;
+};
+
+struct base_kcpu_command_cqs_wait_info {
+ __u64 objs;
+ __u32 nr_objs;
+ __u32 inherit_err_flags;
+};
+
+struct base_cqs_set {
+ __u64 addr;
+};
+
+struct base_kcpu_command_cqs_set_info {
+ __u64 objs;
+ __u32 nr_objs;
+ __u32 padding;
+};
+
+/**
+ * basep_cqs_data_type - Enumeration of CQS Data Types
+ *
+ * @BASEP_CQS_DATA_TYPE_U32: The Data Type of a CQS Object's value
+ * is an unsigned 32-bit integer
+ * @BASEP_CQS_DATA_TYPE_U64: The Data Type of a CQS Object's value
+ * is an unsigned 64-bit integer
+ */
+typedef enum PACKED {
+ BASEP_CQS_DATA_TYPE_U32 = 0,
+ BASEP_CQS_DATA_TYPE_U64 = 1,
+} basep_cqs_data_type;
+
+/**
+ * basep_cqs_wait_operation_op - Enumeration of CQS Object Wait
+ * Operation conditions
+ *
+ * @BASEP_CQS_WAIT_OPERATION_LE: CQS Wait Operation indicating that a
+ * wait will be satisfied when a CQS Object's
+ * value is Less than or Equal to
+ * the Wait Operation value
+ * @BASEP_CQS_WAIT_OPERATION_GT: CQS Wait Operation indicating that a
+ * wait will be satisfied when a CQS Object's
+ * value is Greater than the Wait Operation value
+ */
+typedef enum {
+ BASEP_CQS_WAIT_OPERATION_LE = 0,
+ BASEP_CQS_WAIT_OPERATION_GT = 1,
+} basep_cqs_wait_operation_op;
+
+struct base_cqs_wait_operation_info {
+ __u64 addr;
+ __u64 val;
+ __u8 operation;
+ __u8 data_type;
+ __u8 padding[6];
+};
+
+/**
+ * struct base_kcpu_command_cqs_wait_operation_info - structure which contains information
+ * about the Timeline CQS wait objects
+ *
+ * @objs: An array of Timeline CQS waits.
+ * @nr_objs: Number of Timeline CQS waits in the array.
+ * @inherit_err_flags: Bit-pattern for the CQSs in the array who's error field
+ * to be served as the source for importing into the
+ * queue's error-state.
+ */
+struct base_kcpu_command_cqs_wait_operation_info {
+ __u64 objs;
+ __u32 nr_objs;
+ __u32 inherit_err_flags;
+};
+
+/**
+ * basep_cqs_set_operation_op - Enumeration of CQS Set Operations
+ *
+ * @BASEP_CQS_SET_OPERATION_ADD: CQS Set operation for adding a value
+ * to a synchronization object
+ * @BASEP_CQS_SET_OPERATION_SET: CQS Set operation for setting the value
+ * of a synchronization object
+ */
+typedef enum {
+ BASEP_CQS_SET_OPERATION_ADD = 0,
+ BASEP_CQS_SET_OPERATION_SET = 1,
+} basep_cqs_set_operation_op;
+
+struct base_cqs_set_operation_info {
+ __u64 addr;
+ __u64 val;
+ __u8 operation;
+ __u8 data_type;
+ __u8 padding[6];
+};
+
+/**
+ * struct base_kcpu_command_cqs_set_operation_info - structure which contains information
+ * about the Timeline CQS set objects
+ *
+ * @objs: An array of Timeline CQS sets.
+ * @nr_objs: Number of Timeline CQS sets in the array.
+ * @padding: Structure padding, unused bytes.
+ */
+struct base_kcpu_command_cqs_set_operation_info {
+ __u64 objs;
+ __u32 nr_objs;
+ __u32 padding;
+};
+
+/**
+ * struct base_kcpu_command_import_info - structure which contains information
+ * about the imported buffer.
+ *
+ * @handle: Address of imported user buffer.
+ */
+struct base_kcpu_command_import_info {
+ __u64 handle;
+};
+
+/**
+ * struct base_kcpu_command_jit_alloc_info - structure which contains
+ * information about jit memory allocation.
+ *
+ * @info: An array of elements of the
+ * struct base_jit_alloc_info type.
+ * @count: The number of elements in the info array.
+ * @padding: Padding to a multiple of 64 bits.
+ */
+struct base_kcpu_command_jit_alloc_info {
+ __u64 info;
+ __u8 count;
+ __u8 padding[7];
+};
+
+/**
+ * struct base_kcpu_command_jit_free_info - structure which contains
+ * information about jit memory which is to be freed.
+ *
+ * @ids: An array containing the JIT IDs to free.
+ * @count: The number of elements in the ids array.
+ * @padding: Padding to a multiple of 64 bits.
+ */
+struct base_kcpu_command_jit_free_info {
+ __u64 ids;
+ __u8 count;
+ __u8 padding[7];
+};
+
+/**
+ * struct base_kcpu_command_group_suspend_info - structure which contains
+ * suspend buffer data captured for a suspended queue group.
+ *
+ * @buffer: Pointer to an array of elements of the type char.
+ * @size: Number of elements in the @buffer array.
+ * @group_handle: Handle to the mapping of CSG.
+ * @padding: padding to a multiple of 64 bits.
+ */
+struct base_kcpu_command_group_suspend_info {
+ __u64 buffer;
+ __u32 size;
+ __u8 group_handle;
+ __u8 padding[3];
+};
+
+#if MALI_UNIT_TEST
+struct base_kcpu_command_sample_time_info {
+ __u64 time;
+};
+#endif /* MALI_UNIT_TEST */
+
+/**
+ * struct base_kcpu_command - kcpu command.
+ * @type: type of the kcpu command, one enum base_kcpu_command_type
+ * @padding: padding to a multiple of 64 bits
+ * @info: structure which contains information about the kcpu command;
+ * actual type is determined by @p type
+ * @info.fence: Fence
+ * @info.cqs_wait: CQS wait
+ * @info.cqs_set: CQS set
+ * @info.import: import
+ * @info.jit_alloc: jit allocation
+ * @info.jit_free: jit deallocation
+ * @info.suspend_buf_copy: suspend buffer copy
+ * @info.sample_time: sample time
+ * @info.padding: padding
+ */
+struct base_kcpu_command {
+ __u8 type;
+ __u8 padding[sizeof(__u64) - sizeof(__u8)];
+ union {
+ struct base_kcpu_command_fence_info fence;
+ struct base_kcpu_command_cqs_wait_info cqs_wait;
+ struct base_kcpu_command_cqs_set_info cqs_set;
+ struct base_kcpu_command_cqs_wait_operation_info cqs_wait_operation;
+ struct base_kcpu_command_cqs_set_operation_info cqs_set_operation;
+ struct base_kcpu_command_import_info import;
+ struct base_kcpu_command_jit_alloc_info jit_alloc;
+ struct base_kcpu_command_jit_free_info jit_free;
+ struct base_kcpu_command_group_suspend_info suspend_buf_copy;
+#if MALI_UNIT_TEST
+ struct base_kcpu_command_sample_time_info sample_time;
+#endif /* MALI_UNIT_TEST */
+ __u64 padding[2]; /* No sub-struct should be larger */
+ } info;
+};
+
+/**
+ * struct basep_cs_stream_control - CSI capabilities.
+ *
+ * @features: Features of this stream
+ * @padding: Padding to a multiple of 64 bits.
+ */
+struct basep_cs_stream_control {
+ __u32 features;
+ __u32 padding;
+};
+
+/**
+ * struct basep_cs_group_control - CSG interface capabilities.
+ *
+ * @features: Features of this group
+ * @stream_num: Number of streams in this group
+ * @suspend_size: Size in bytes of the suspend buffer for this group
+ * @padding: Padding to a multiple of 64 bits.
+ */
+struct basep_cs_group_control {
+ __u32 features;
+ __u32 stream_num;
+ __u32 suspend_size;
+ __u32 padding;
+};
+
+/**
+ * struct base_gpu_queue_group_error_fatal_payload - Unrecoverable fault
+ * error information associated with GPU command queue group.
+ *
+ * @sideband: Additional information of the unrecoverable fault.
+ * @status: Unrecoverable fault information.
+ * This consists of exception type (least significant byte) and
+ * data (remaining bytes). One example of exception type is
+ * CS_INVALID_INSTRUCTION (0x49).
+ * @padding: Padding to make multiple of 64bits
+ */
+struct base_gpu_queue_group_error_fatal_payload {
+ __u64 sideband;
+ __u32 status;
+ __u32 padding;
+};
+
+/**
+ * struct base_gpu_queue_error_fatal_payload - Unrecoverable fault
+ * error information related to GPU command queue.
+ *
+ * @sideband: Additional information about this unrecoverable fault.
+ * @status: Unrecoverable fault information.
+ * This consists of exception type (least significant byte) and
+ * data (remaining bytes). One example of exception type is
+ * CS_INVALID_INSTRUCTION (0x49).
+ * @csi_index: Index of the CSF interface the queue is bound to.
+ * @padding: Padding to make multiple of 64bits
+ */
+struct base_gpu_queue_error_fatal_payload {
+ __u64 sideband;
+ __u32 status;
+ __u8 csi_index;
+ __u8 padding[3];
+};
+
+/**
+ * enum base_gpu_queue_group_error_type - GPU Fatal error type.
+ *
+ * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL: Fatal error associated with GPU
+ * command queue group.
+ * @BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL: Fatal error associated with GPU
+ * command queue.
+ * @BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT: Fatal error associated with
+ * progress timeout.
+ * @BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM: Fatal error due to running out
+ * of tiler heap memory.
+ * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT: The number of fatal error types
+ *
+ * This type is used for &struct_base_gpu_queue_group_error.error_type.
+ */
+enum base_gpu_queue_group_error_type {
+ BASE_GPU_QUEUE_GROUP_ERROR_FATAL = 0,
+ BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL,
+ BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT,
+ BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM,
+ BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT
+};
+
+/**
+ * struct base_gpu_queue_group_error - Unrecoverable fault information
+ * @error_type: Error type of @base_gpu_queue_group_error_type
+ * indicating which field in union payload is filled
+ * @padding: Unused bytes for 64bit boundary
+ * @payload: Input Payload
+ * @payload.fatal_group: Unrecoverable fault error associated with
+ * GPU command queue group
+ * @payload.fatal_queue: Unrecoverable fault error associated with command queue
+ */
+struct base_gpu_queue_group_error {
+ __u8 error_type;
+ __u8 padding[7];
+ union {
+ struct base_gpu_queue_group_error_fatal_payload fatal_group;
+ struct base_gpu_queue_error_fatal_payload fatal_queue;
+ } payload;
+};
+
+/**
+ * enum base_csf_notification_type - Notification type
+ *
+ * @BASE_CSF_NOTIFICATION_EVENT: Notification with kernel event
+ * @BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR: Notification with GPU fatal
+ * error
+ * @BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP: Notification with dumping cpu
+ * queue
+ * @BASE_CSF_NOTIFICATION_COUNT: The number of notification type
+ *
+ * This type is used for &struct_base_csf_notification.type.
+ */
+enum base_csf_notification_type {
+ BASE_CSF_NOTIFICATION_EVENT = 0,
+ BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR,
+ BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP,
+ BASE_CSF_NOTIFICATION_COUNT
+};
+
+/**
+ * struct base_csf_notification - Event or error notification
+ *
+ * @type: Notification type of @base_csf_notification_type
+ * @padding: Padding for 64bit boundary
+ * @payload: Input Payload
+ * @payload.align: To fit the struct into a 64-byte cache line
+ * @payload.csg_error: CSG error
+ * @payload.csg_error.handle: Handle of GPU command queue group associated with
+ * fatal error
+ * @payload.csg_error.padding: Padding
+ * @payload.csg_error.error: Unrecoverable fault error
+ *
+ */
+struct base_csf_notification {
+ __u8 type;
+ __u8 padding[7];
+ union {
+ struct {
+ __u8 handle;
+ __u8 padding[7];
+ struct base_gpu_queue_group_error error;
+ } csg_error;
+
+ __u8 align[56];
+ } payload;
+};
+
+#endif /* _UAPI_BASE_CSF_KERNEL_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_control_registers.h b/common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_control_registers.h
new file mode 100644
index 0000000..570cba8
--- /dev/null
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_control_registers.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _UAPI_GPU_CSF_CONTROL_REGISTERS_H_
+#define _UAPI_GPU_CSF_CONTROL_REGISTERS_H_
+
+/* GPU_REGISTERS register offsets */
+#define GPU_CONTROL_MCU 0x3000 /* () MCU control registers */
+
+#endif /* _UAPI_GPU_CSF_CONTROL_REGISTERS_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_registers.h b/common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_registers.h
new file mode 100644
index 0000000..f233a0d
--- /dev/null
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_registers.h
@@ -0,0 +1,1414 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2018-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _UAPI_GPU_CSF_REGISTERS_H_
+#define _UAPI_GPU_CSF_REGISTERS_H_
+
+/*
+ * Begin register sets
+ */
+
+/* DOORBELLS base address */
+#define DOORBELLS_BASE 0x0080000
+#define DOORBELLS_REG(r) (DOORBELLS_BASE + (r))
+
+/* CS_KERNEL_INPUT_BLOCK base address */
+#define CS_KERNEL_INPUT_BLOCK_BASE 0x0000
+#define CS_KERNEL_INPUT_BLOCK_REG(r) (CS_KERNEL_INPUT_BLOCK_BASE + (r))
+
+/* CS_KERNEL_OUTPUT_BLOCK base address */
+#define CS_KERNEL_OUTPUT_BLOCK_BASE 0x0000
+#define CS_KERNEL_OUTPUT_BLOCK_REG(r) (CS_KERNEL_OUTPUT_BLOCK_BASE + (r))
+
+/* CS_USER_INPUT_BLOCK base address */
+#define CS_USER_INPUT_BLOCK_BASE 0x0000
+#define CS_USER_INPUT_BLOCK_REG(r) (CS_USER_INPUT_BLOCK_BASE + (r))
+
+/* CS_USER_OUTPUT_BLOCK base address */
+#define CS_USER_OUTPUT_BLOCK_BASE 0x0000
+#define CS_USER_OUTPUT_BLOCK_REG(r) (CS_USER_OUTPUT_BLOCK_BASE + (r))
+
+/* CSG_INPUT_BLOCK base address */
+#define CSG_INPUT_BLOCK_BASE 0x0000
+#define CSG_INPUT_BLOCK_REG(r) (CSG_INPUT_BLOCK_BASE + (r))
+
+/* CSG_OUTPUT_BLOCK base address */
+#define CSG_OUTPUT_BLOCK_BASE 0x0000
+#define CSG_OUTPUT_BLOCK_REG(r) (CSG_OUTPUT_BLOCK_BASE + (r))
+
+/* GLB_CONTROL_BLOCK base address */
+#define GLB_CONTROL_BLOCK_BASE 0x04000000
+#define GLB_CONTROL_BLOCK_REG(r) (GLB_CONTROL_BLOCK_BASE + (r))
+
+/* GLB_INPUT_BLOCK base address */
+#define GLB_INPUT_BLOCK_BASE 0x0000
+#define GLB_INPUT_BLOCK_REG(r) (GLB_INPUT_BLOCK_BASE + (r))
+
+/* GLB_OUTPUT_BLOCK base address */
+#define GLB_OUTPUT_BLOCK_BASE 0x0000
+#define GLB_OUTPUT_BLOCK_REG(r) (GLB_OUTPUT_BLOCK_BASE + (r))
+
+/* USER base address */
+#define USER_BASE 0x0010000
+#define USER_REG(r) (USER_BASE + (r))
+
+/* End register sets */
+
+/*
+ * Begin register offsets
+ */
+
+/* DOORBELLS register offsets */
+#define DOORBELL_0 0x0000 /* () Doorbell 0 register */
+#define DOORBELL(n) (DOORBELL_0 + (n)*65536)
+#define DOORBELL_REG(n, r) (DOORBELL(n) + DOORBELL_BLOCK_REG(r))
+#define DOORBELL_COUNT 1024
+
+/* DOORBELL_BLOCK register offsets */
+#define DB_BLK_DOORBELL 0x0000 /* (WO) Doorbell request */
+
+/* CS_KERNEL_INPUT_BLOCK register offsets */
+#define CS_REQ 0x0000 /* () CS request flags */
+#define CS_CONFIG 0x0004 /* () CS configuration */
+#define CS_ACK_IRQ_MASK 0x000C /* () Command steam interrupt mask */
+#define CS_BASE_LO 0x0010 /* () Base pointer for the ring buffer, low word */
+#define CS_BASE_HI 0x0014 /* () Base pointer for the ring buffer, high word */
+#define CS_SIZE 0x0018 /* () Size of the ring buffer */
+#define CS_TILER_HEAP_START_LO 0x0020 /* () Pointer to heap start, low word */
+#define CS_TILER_HEAP_START_HI 0x0024 /* () Pointer to heap start, high word */
+#define CS_TILER_HEAP_END_LO 0x0028 /* () Tiler heap descriptor address, low word */
+#define CS_TILER_HEAP_END_HI 0x002C /* () Tiler heap descriptor address, high word */
+#define CS_USER_INPUT_LO 0x0030 /* () CS user mode input page address, low word */
+#define CS_USER_INPUT_HI 0x0034 /* () CS user mode input page address, high word */
+#define CS_USER_OUTPUT_LO 0x0038 /* () CS user mode input page address, low word */
+#define CS_USER_OUTPUT_HI 0x003C /* () CS user mode input page address, high word */
+
+/* CS_KERNEL_OUTPUT_BLOCK register offsets */
+#define CS_ACK 0x0000 /* () CS acknowledge flags */
+#define CS_STATUS_CMD_PTR_LO 0x0040 /* () Program pointer current value, low word */
+#define CS_STATUS_CMD_PTR_HI 0x0044 /* () Program pointer current value, high word */
+#define CS_STATUS_WAIT 0x0048 /* () Wait condition status register */
+#define CS_STATUS_REQ_RESOURCE 0x004C /* () Indicates the resources requested by the CS */
+#define CS_STATUS_WAIT_SYNC_POINTER_LO 0x0050 /* () Sync object pointer, low word */
+#define CS_STATUS_WAIT_SYNC_POINTER_HI 0x0054 /* () Sync object pointer, high word */
+#define CS_STATUS_WAIT_SYNC_VALUE 0x0058 /* () Sync object test value */
+#define CS_STATUS_SCOREBOARDS 0x005C /* () Scoreboard status */
+#define CS_STATUS_BLOCKED_REASON 0x0060 /* () Blocked reason */
+#define CS_FAULT 0x0080 /* () Recoverable fault information */
+#define CS_FATAL 0x0084 /* () Unrecoverable fault information */
+#define CS_FAULT_INFO_LO 0x0088 /* () Additional information about a recoverable fault, low word */
+#define CS_FAULT_INFO_HI 0x008C /* () Additional information about a recoverable fault, high word */
+#define CS_FATAL_INFO_LO 0x0090 /* () Additional information about a non-recoverable fault, low word */
+#define CS_FATAL_INFO_HI 0x0094 /* () Additional information about a non-recoverable fault, high word */
+#define CS_HEAP_VT_START 0x00C0 /* () Number of vertex/tiling operations started */
+#define CS_HEAP_VT_END 0x00C4 /* () Number of vertex/tiling operations completed */
+#define CS_HEAP_FRAG_END 0x00CC /* () Number of fragment completed */
+#define CS_HEAP_ADDRESS_LO 0x00D0 /* () Heap address, low word */
+#define CS_HEAP_ADDRESS_HI 0x00D4 /* () Heap address, high word */
+
+/* CS_USER_INPUT_BLOCK register offsets */
+#define CS_INSERT_LO 0x0000 /* () Current insert offset for ring buffer, low word */
+#define CS_INSERT_HI 0x0004 /* () Current insert offset for ring buffer, high word */
+#define CS_EXTRACT_INIT_LO 0x0008 /* () Initial extract offset for ring buffer, low word */
+#define CS_EXTRACT_INIT_HI 0x000C /* () Initial extract offset for ring buffer, high word */
+
+/* CS_USER_OUTPUT_BLOCK register offsets */
+#define CS_EXTRACT_LO 0x0000 /* () Current extract offset for ring buffer, low word */
+#define CS_EXTRACT_HI 0x0004 /* () Current extract offset for ring buffer, high word */
+#define CS_ACTIVE 0x0008 /* () Initial extract offset when the CS is started */
+
+/* CSG_INPUT_BLOCK register offsets */
+#define CSG_REQ 0x0000 /* () CSG request */
+#define CSG_ACK_IRQ_MASK 0x0004 /* () Global acknowledge interrupt mask */
+#define CSG_DB_REQ 0x0008 /* () Global doorbell request */
+#define CSG_IRQ_ACK 0x000C /* () CS IRQ acknowledge */
+#define CSG_ALLOW_COMPUTE_LO 0x0020 /* () Allowed compute endpoints, low word */
+#define CSG_ALLOW_COMPUTE_HI 0x0024 /* () Allowed compute endpoints, high word */
+#define CSG_ALLOW_FRAGMENT_LO 0x0028 /* () Allowed fragment endpoints, low word */
+#define CSG_ALLOW_FRAGMENT_HI 0x002C /* () Allowed fragment endpoints, high word */
+#define CSG_ALLOW_OTHER 0x0030 /* () Allowed other endpoints */
+#define CSG_EP_REQ 0x0034 /* () Maximum number of endpoints allowed */
+#define CSG_SUSPEND_BUF_LO 0x0040 /* () Normal mode suspend buffer, low word */
+#define CSG_SUSPEND_BUF_HI 0x0044 /* () Normal mode suspend buffer, high word */
+#define CSG_PROTM_SUSPEND_BUF_LO 0x0048 /* () Protected mode suspend buffer, low word */
+#define CSG_PROTM_SUSPEND_BUF_HI 0x004C /* () Protected mode suspend buffer, high word */
+#define CSG_CONFIG 0x0050 /* () CSG configuration options */
+#define CSG_ITER_TRACE_CONFIG 0x0054 /* () CSG trace configuration */
+
+/* CSG_OUTPUT_BLOCK register offsets */
+#define CSG_ACK 0x0000 /* () CSG acknowledge flags */
+#define CSG_DB_ACK 0x0008 /* () CS kernel doorbell acknowledge flags */
+#define CSG_IRQ_REQ 0x000C /* () CS interrupt request flags */
+#define CSG_STATUS_EP_CURRENT 0x0010 /* () Endpoint allocation status register */
+#define CSG_STATUS_EP_REQ 0x0014 /* () Endpoint request status register */
+#define CSG_RESOURCE_DEP 0x001C /* () Current resource dependencies */
+
+/* GLB_CONTROL_BLOCK register offsets */
+#define GLB_VERSION 0x0000 /* () Global interface version */
+#define GLB_FEATURES 0x0004 /* () Global interface features */
+#define GLB_INPUT_VA 0x0008 /* () Address of GLB_INPUT_BLOCK */
+#define GLB_OUTPUT_VA 0x000C /* () Address of GLB_OUTPUT_BLOCK */
+#define GLB_GROUP_NUM 0x0010 /* () Number of CSG interfaces */
+#define GLB_GROUP_STRIDE 0x0014 /* () Stride between CSG interfaces */
+#define GLB_PRFCNT_SIZE 0x0018 /* () Size of CSF performance counters */
+#define GLB_INSTR_FEATURES 0x001C /* () TRACE_POINT instrumentation features */
+#define GROUP_CONTROL_0 0x1000 /* () CSG control and capabilities */
+#define GROUP_CONTROL(n) (GROUP_CONTROL_0 + (n)*256)
+#define GROUP_CONTROL_REG(n, r) (GROUP_CONTROL(n) + GROUP_CONTROL_BLOCK_REG(r))
+#define GROUP_CONTROL_COUNT 16
+
+/* STREAM_CONTROL_BLOCK register offsets */
+#define STREAM_FEATURES 0x0000 /* () CSI features */
+#define STREAM_INPUT_VA 0x0004 /* () Address of CS_KERNEL_INPUT_BLOCK */
+#define STREAM_OUTPUT_VA 0x0008 /* () Address of CS_KERNEL_OUTPUT_BLOCK */
+
+/* GROUP_CONTROL_BLOCK register offsets */
+#define GROUP_FEATURES 0x0000 /* () CSG interface features */
+#define GROUP_INPUT_VA 0x0004 /* () Address of CSG_INPUT_BLOCK */
+#define GROUP_OUTPUT_VA 0x0008 /* () Address of CSG_OUTPUT_BLOCK */
+#define GROUP_SUSPEND_SIZE 0x000C /* () Size of CSG suspend buffer */
+#define GROUP_PROTM_SUSPEND_SIZE 0x0010 /* () Size of CSG protected-mode suspend buffer */
+#define GROUP_STREAM_NUM 0x0014 /* () Number of CS interfaces */
+#define GROUP_STREAM_STRIDE 0x0018 /* () Stride between CS interfaces */
+#define STREAM_CONTROL_0 0x0040 /* () CS control and capabilities */
+#define STREAM_CONTROL(n) (STREAM_CONTROL_0 + (n)*12)
+#define STREAM_CONTROL_REG(n, r) (STREAM_CONTROL(n) + STREAM_CONTROL_BLOCK_REG(r))
+#define STREAM_CONTROL_COUNT 16
+
+/* GLB_INPUT_BLOCK register offsets */
+#define GLB_REQ 0x0000 /* () Global request */
+#define GLB_ACK_IRQ_MASK 0x0004 /* () Global acknowledge interrupt mask */
+#define GLB_DB_REQ 0x0008 /* () Global doorbell request */
+#define GLB_PROGRESS_TIMER 0x0010 /* () Global progress timeout */
+#define GLB_PWROFF_TIMER 0x0014 /* () Global shader core power off timer */
+#define GLB_ALLOC_EN_LO 0x0018 /* () Global shader core allocation enable mask, low word */
+#define GLB_ALLOC_EN_HI 0x001C /* () Global shader core allocation enable mask, high word */
+#define GLB_PROTM_COHERENCY 0x0020 /* () Configure COHERENCY_ENABLE register value to use in protected mode execution */
+
+#define GLB_PRFCNT_JASID 0x0024 /* () Performance counter address space */
+#define GLB_PRFCNT_BASE_LO 0x0028 /* () Performance counter buffer address, low word */
+#define GLB_PRFCNT_BASE_HI 0x002C /* () Performance counter buffer address, high word */
+#define GLB_PRFCNT_EXTRACT 0x0030 /* () Performance counter buffer extract index */
+#define GLB_PRFCNT_CONFIG 0x0040 /* () Performance counter configuration */
+#define GLB_PRFCNT_CSG_SELECT 0x0044 /* () CSG performance counting enable */
+#define GLB_PRFCNT_FW_EN 0x0048 /* () Performance counter enable for firmware */
+#define GLB_PRFCNT_CSG_EN 0x004C /* () Performance counter enable for CSG */
+#define GLB_PRFCNT_CSF_EN 0x0050 /* () Performance counter enable for CSF */
+#define GLB_PRFCNT_SHADER_EN 0x0054 /* () Performance counter enable for shader cores */
+#define GLB_PRFCNT_TILER_EN 0x0058 /* () Performance counter enable for tiler */
+#define GLB_PRFCNT_MMU_L2_EN 0x005C /* () Performance counter enable for MMU/L2 cache */
+
+#define GLB_DEBUG_FWUTF_DESTROY 0x0FE0 /* () Test fixture destroy function address */
+#define GLB_DEBUG_FWUTF_TEST 0x0FE4 /* () Test index */
+#define GLB_DEBUG_FWUTF_FIXTURE 0x0FE8 /* () Test fixture index */
+#define GLB_DEBUG_FWUTF_CREATE 0x0FEC /* () Test fixture create function address */
+#define GLB_DEBUG_ACK_IRQ_MASK 0x0FF8 /* () Global debug acknowledge interrupt mask */
+#define GLB_DEBUG_REQ 0x0FFC /* () Global debug request */
+
+/* GLB_OUTPUT_BLOCK register offsets */
+#define GLB_ACK 0x0000 /* () Global acknowledge */
+#define GLB_DB_ACK 0x0008 /* () Global doorbell acknowledge */
+#define GLB_HALT_STATUS 0x0010 /* () Global halt status */
+#define GLB_PRFCNT_STATUS 0x0014 /* () Performance counter status */
+#define GLB_PRFCNT_INSERT 0x0018 /* () Performance counter buffer insert index */
+#define GLB_DEBUG_FWUTF_RESULT 0x0FE0 /* () Firmware debug test result */
+#define GLB_DEBUG_ACK 0x0FFC /* () Global debug acknowledge */
+
+/* USER register offsets */
+#define LATEST_FLUSH 0x0000 /* () Flush ID of latest clean-and-invalidate operation */
+
+/* End register offsets */
+
+/* CS_KERNEL_INPUT_BLOCK register set definitions */
+/* GLB_VERSION register */
+#define GLB_VERSION_PATCH_SHIFT (0)
+#define GLB_VERSION_MINOR_SHIFT (16)
+#define GLB_VERSION_MAJOR_SHIFT (24)
+
+/* CS_REQ register */
+#define CS_REQ_STATE_SHIFT 0
+#define CS_REQ_STATE_MASK (0x7 << CS_REQ_STATE_SHIFT)
+#define CS_REQ_STATE_GET(reg_val) (((reg_val)&CS_REQ_STATE_MASK) >> CS_REQ_STATE_SHIFT)
+#define CS_REQ_STATE_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_STATE_MASK) | (((value) << CS_REQ_STATE_SHIFT) & CS_REQ_STATE_MASK))
+/* CS_REQ_STATE values */
+#define CS_REQ_STATE_STOP 0x0
+#define CS_REQ_STATE_START 0x1
+/* End of CS_REQ_STATE values */
+#define CS_REQ_EXTRACT_EVENT_SHIFT 4
+#define CS_REQ_EXTRACT_EVENT_MASK (0x1 << CS_REQ_EXTRACT_EVENT_SHIFT)
+#define CS_REQ_EXTRACT_EVENT_GET(reg_val) (((reg_val)&CS_REQ_EXTRACT_EVENT_MASK) >> CS_REQ_EXTRACT_EVENT_SHIFT)
+#define CS_REQ_EXTRACT_EVENT_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_EXTRACT_EVENT_MASK) | (((value) << CS_REQ_EXTRACT_EVENT_SHIFT) & CS_REQ_EXTRACT_EVENT_MASK))
+
+#define CS_REQ_IDLE_SYNC_WAIT_SHIFT 8
+#define CS_REQ_IDLE_SYNC_WAIT_MASK (0x1 << CS_REQ_IDLE_SYNC_WAIT_SHIFT)
+#define CS_REQ_IDLE_SYNC_WAIT_GET(reg_val) (((reg_val)&CS_REQ_IDLE_SYNC_WAIT_MASK) >> CS_REQ_IDLE_SYNC_WAIT_SHIFT)
+#define CS_REQ_IDLE_SYNC_WAIT_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_IDLE_SYNC_WAIT_MASK) | \
+ (((value) << CS_REQ_IDLE_SYNC_WAIT_SHIFT) & CS_REQ_IDLE_SYNC_WAIT_MASK))
+#define CS_REQ_IDLE_PROTM_PEND_SHIFT 9
+#define CS_REQ_IDLE_PROTM_PEND_MASK (0x1 << CS_REQ_IDLE_PROTM_PEND_SHIFT)
+#define CS_REQ_IDLE_PROTM_PEND_GET(reg_val) (((reg_val)&CS_REQ_IDLE_PROTM_PEND_MASK) >> CS_REQ_IDLE_PROTM_PEND_SHIFT)
+#define CS_REQ_IDLE_PROTM_PEND_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_IDLE_PROTM_PEND_MASK) | \
+ (((value) << CS_REQ_IDLE_PROTM_PEND_SHIFT) & CS_REQ_IDLE_PROTM_PEND_MASK))
+#define CS_REQ_IDLE_EMPTY_SHIFT 10
+#define CS_REQ_IDLE_EMPTY_MASK (0x1 << CS_REQ_IDLE_EMPTY_SHIFT)
+#define CS_REQ_IDLE_EMPTY_GET(reg_val) (((reg_val)&CS_REQ_IDLE_EMPTY_MASK) >> CS_REQ_IDLE_EMPTY_SHIFT)
+#define CS_REQ_IDLE_EMPTY_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_IDLE_EMPTY_MASK) | (((value) << CS_REQ_IDLE_EMPTY_SHIFT) & CS_REQ_IDLE_EMPTY_MASK))
+#define CS_REQ_IDLE_RESOURCE_REQ_SHIFT 11
+#define CS_REQ_IDLE_RESOURCE_REQ_MASK (0x1 << CS_REQ_IDLE_RESOURCE_REQ_SHIFT)
+#define CS_REQ_IDLE_RESOURCE_REQ_GET(reg_val) \
+ (((reg_val)&CS_REQ_IDLE_RESOURCE_REQ_MASK) >> CS_REQ_IDLE_RESOURCE_REQ_SHIFT)
+#define CS_REQ_IDLE_RESOURCE_REQ_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_IDLE_RESOURCE_REQ_MASK) | \
+ (((value) << CS_REQ_IDLE_RESOURCE_REQ_SHIFT) & CS_REQ_IDLE_RESOURCE_REQ_MASK))
+#define CS_REQ_TILER_OOM_SHIFT 26
+#define CS_REQ_TILER_OOM_MASK (0x1 << CS_REQ_TILER_OOM_SHIFT)
+#define CS_REQ_TILER_OOM_GET(reg_val) (((reg_val)&CS_REQ_TILER_OOM_MASK) >> CS_REQ_TILER_OOM_SHIFT)
+#define CS_REQ_TILER_OOM_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_TILER_OOM_MASK) | (((value) << CS_REQ_TILER_OOM_SHIFT) & CS_REQ_TILER_OOM_MASK))
+#define CS_REQ_PROTM_PEND_SHIFT 27
+#define CS_REQ_PROTM_PEND_MASK (0x1 << CS_REQ_PROTM_PEND_SHIFT)
+#define CS_REQ_PROTM_PEND_GET(reg_val) (((reg_val)&CS_REQ_PROTM_PEND_MASK) >> CS_REQ_PROTM_PEND_SHIFT)
+#define CS_REQ_PROTM_PEND_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_PROTM_PEND_MASK) | (((value) << CS_REQ_PROTM_PEND_SHIFT) & CS_REQ_PROTM_PEND_MASK))
+#define CS_REQ_FATAL_SHIFT 30
+#define CS_REQ_FATAL_MASK (0x1 << CS_REQ_FATAL_SHIFT)
+#define CS_REQ_FATAL_GET(reg_val) (((reg_val)&CS_REQ_FATAL_MASK) >> CS_REQ_FATAL_SHIFT)
+#define CS_REQ_FATAL_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_FATAL_MASK) | (((value) << CS_REQ_FATAL_SHIFT) & CS_REQ_FATAL_MASK))
+#define CS_REQ_FAULT_SHIFT 31
+#define CS_REQ_FAULT_MASK (0x1 << CS_REQ_FAULT_SHIFT)
+#define CS_REQ_FAULT_GET(reg_val) (((reg_val)&CS_REQ_FAULT_MASK) >> CS_REQ_FAULT_SHIFT)
+#define CS_REQ_FAULT_SET(reg_val, value) \
+ (((reg_val) & ~CS_REQ_FAULT_MASK) | (((value) << CS_REQ_FAULT_SHIFT) & CS_REQ_FAULT_MASK))
+
+/* CS_CONFIG register */
+#define CS_CONFIG_PRIORITY_SHIFT 0
+#define CS_CONFIG_PRIORITY_MASK (0xF << CS_CONFIG_PRIORITY_SHIFT)
+#define CS_CONFIG_PRIORITY_GET(reg_val) (((reg_val)&CS_CONFIG_PRIORITY_MASK) >> CS_CONFIG_PRIORITY_SHIFT)
+#define CS_CONFIG_PRIORITY_SET(reg_val, value) \
+ (((reg_val) & ~CS_CONFIG_PRIORITY_MASK) | (((value) << CS_CONFIG_PRIORITY_SHIFT) & CS_CONFIG_PRIORITY_MASK))
+#define CS_CONFIG_USER_DOORBELL_SHIFT 8
+#define CS_CONFIG_USER_DOORBELL_MASK (0xFF << CS_CONFIG_USER_DOORBELL_SHIFT)
+#define CS_CONFIG_USER_DOORBELL_GET(reg_val) (((reg_val)&CS_CONFIG_USER_DOORBELL_MASK) >> CS_CONFIG_USER_DOORBELL_SHIFT)
+#define CS_CONFIG_USER_DOORBELL_SET(reg_val, value) \
+ (((reg_val) & ~CS_CONFIG_USER_DOORBELL_MASK) | \
+ (((value) << CS_CONFIG_USER_DOORBELL_SHIFT) & CS_CONFIG_USER_DOORBELL_MASK))
+
+/* CS_ACK_IRQ_MASK register */
+#define CS_ACK_IRQ_MASK_STATE_SHIFT 0
+#define CS_ACK_IRQ_MASK_STATE_MASK (0x7 << CS_ACK_IRQ_MASK_STATE_SHIFT)
+#define CS_ACK_IRQ_MASK_STATE_GET(reg_val) (((reg_val)&CS_ACK_IRQ_MASK_STATE_MASK) >> CS_ACK_IRQ_MASK_STATE_SHIFT)
+#define CS_ACK_IRQ_MASK_STATE_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_IRQ_MASK_STATE_MASK) | \
+ (((value) << CS_ACK_IRQ_MASK_STATE_SHIFT) & CS_ACK_IRQ_MASK_STATE_MASK))
+/* CS_ACK_IRQ_MASK_STATE values */
+#define CS_ACK_IRQ_MASK_STATE_DISABLED 0x0
+#define CS_ACK_IRQ_MASK_STATE_ENABLED 0x7
+/* End of CS_ACK_IRQ_MASK_STATE values */
+#define CS_ACK_IRQ_MASK_EXTRACT_EVENT_SHIFT 4
+#define CS_ACK_IRQ_MASK_EXTRACT_EVENT_MASK (0x1 << CS_ACK_IRQ_MASK_EXTRACT_EVENT_SHIFT)
+#define CS_ACK_IRQ_MASK_EXTRACT_EVENT_GET(reg_val) \
+ (((reg_val)&CS_ACK_IRQ_MASK_EXTRACT_EVENT_MASK) >> CS_ACK_IRQ_MASK_EXTRACT_EVENT_SHIFT)
+#define CS_ACK_IRQ_MASK_EXTRACT_EVENT_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_IRQ_MASK_EXTRACT_EVENT_MASK) | \
+ (((value) << CS_ACK_IRQ_MASK_EXTRACT_EVENT_SHIFT) & CS_ACK_IRQ_MASK_EXTRACT_EVENT_MASK))
+#define CS_ACK_IRQ_MASK_TILER_OOM_SHIFT 26
+#define CS_ACK_IRQ_MASK_TILER_OOM_MASK (0x1 << CS_ACK_IRQ_MASK_TILER_OOM_SHIFT)
+#define CS_ACK_IRQ_MASK_TILER_OOM_GET(reg_val) \
+ (((reg_val)&CS_ACK_IRQ_MASK_TILER_OOM_MASK) >> CS_ACK_IRQ_MASK_TILER_OOM_SHIFT)
+#define CS_ACK_IRQ_MASK_TILER_OOM_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_IRQ_MASK_TILER_OOM_MASK) | \
+ (((value) << CS_ACK_IRQ_MASK_TILER_OOM_SHIFT) & CS_ACK_IRQ_MASK_TILER_OOM_MASK))
+#define CS_ACK_IRQ_MASK_PROTM_PEND_SHIFT 27
+#define CS_ACK_IRQ_MASK_PROTM_PEND_MASK (0x1 << CS_ACK_IRQ_MASK_PROTM_PEND_SHIFT)
+#define CS_ACK_IRQ_MASK_PROTM_PEND_GET(reg_val) \
+ (((reg_val)&CS_ACK_IRQ_MASK_PROTM_PEND_MASK) >> CS_ACK_IRQ_MASK_PROTM_PEND_SHIFT)
+#define CS_ACK_IRQ_MASK_PROTM_PEND_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_IRQ_MASK_PROTM_PEND_MASK) | \
+ (((value) << CS_ACK_IRQ_MASK_PROTM_PEND_SHIFT) & CS_ACK_IRQ_MASK_PROTM_PEND_MASK))
+#define CS_ACK_IRQ_MASK_FATAL_SHIFT 30
+#define CS_ACK_IRQ_MASK_FATAL_MASK (0x1 << CS_ACK_IRQ_MASK_FATAL_SHIFT)
+#define CS_ACK_IRQ_MASK_FATAL_GET(reg_val) (((reg_val)&CS_ACK_IRQ_MASK_FATAL_MASK) >> CS_ACK_IRQ_MASK_FATAL_SHIFT)
+#define CS_ACK_IRQ_MASK_FATAL_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_IRQ_MASK_FATAL_MASK) | \
+ (((value) << CS_ACK_IRQ_MASK_FATAL_SHIFT) & CS_ACK_IRQ_MASK_FATAL_MASK))
+#define CS_ACK_IRQ_MASK_FAULT_SHIFT 31
+#define CS_ACK_IRQ_MASK_FAULT_MASK (0x1 << CS_ACK_IRQ_MASK_FAULT_SHIFT)
+#define CS_ACK_IRQ_MASK_FAULT_GET(reg_val) (((reg_val)&CS_ACK_IRQ_MASK_FAULT_MASK) >> CS_ACK_IRQ_MASK_FAULT_SHIFT)
+#define CS_ACK_IRQ_MASK_FAULT_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_IRQ_MASK_FAULT_MASK) | \
+ (((value) << CS_ACK_IRQ_MASK_FAULT_SHIFT) & CS_ACK_IRQ_MASK_FAULT_MASK))
+
+/* CS_BASE register */
+#define CS_BASE_POINTER_SHIFT 0
+#define CS_BASE_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_BASE_POINTER_SHIFT)
+#define CS_BASE_POINTER_GET(reg_val) (((reg_val)&CS_BASE_POINTER_MASK) >> CS_BASE_POINTER_SHIFT)
+#define CS_BASE_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_BASE_POINTER_MASK) | (((value) << CS_BASE_POINTER_SHIFT) & CS_BASE_POINTER_MASK))
+
+/* CS_SIZE register */
+#define CS_SIZE_SIZE_SHIFT 0
+#define CS_SIZE_SIZE_MASK (0xFFFFFFFF << CS_SIZE_SIZE_SHIFT)
+#define CS_SIZE_SIZE_GET(reg_val) (((reg_val)&CS_SIZE_SIZE_MASK) >> CS_SIZE_SIZE_SHIFT)
+#define CS_SIZE_SIZE_SET(reg_val, value) \
+ (((reg_val) & ~CS_SIZE_SIZE_MASK) | (((value) << CS_SIZE_SIZE_SHIFT) & CS_SIZE_SIZE_MASK))
+
+/* CS_TILER_HEAP_START register */
+#define CS_TILER_HEAP_START_POINTER_SHIFT 0
+#define CS_TILER_HEAP_START_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_TILER_HEAP_START_POINTER_SHIFT)
+#define CS_TILER_HEAP_START_POINTER_GET(reg_val) \
+ (((reg_val)&CS_TILER_HEAP_START_POINTER_MASK) >> CS_TILER_HEAP_START_POINTER_SHIFT)
+#define CS_TILER_HEAP_START_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_TILER_HEAP_START_POINTER_MASK) | \
+ (((value) << CS_TILER_HEAP_START_POINTER_SHIFT) & CS_TILER_HEAP_START_POINTER_MASK))
+/* HeapChunkPointer nested in CS_TILER_HEAP_START_POINTER */
+/* End of HeapChunkPointer nested in CS_TILER_HEAP_START_POINTER */
+
+/* CS_TILER_HEAP_END register */
+#define CS_TILER_HEAP_END_POINTER_SHIFT 0
+#define CS_TILER_HEAP_END_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_TILER_HEAP_END_POINTER_SHIFT)
+#define CS_TILER_HEAP_END_POINTER_GET(reg_val) \
+ (((reg_val)&CS_TILER_HEAP_END_POINTER_MASK) >> CS_TILER_HEAP_END_POINTER_SHIFT)
+#define CS_TILER_HEAP_END_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_TILER_HEAP_END_POINTER_MASK) | \
+ (((value) << CS_TILER_HEAP_END_POINTER_SHIFT) & CS_TILER_HEAP_END_POINTER_MASK))
+/* HeapChunkPointer nested in CS_TILER_HEAP_END_POINTER */
+/* End of HeapChunkPointer nested in CS_TILER_HEAP_END_POINTER */
+
+/* CS_USER_INPUT register */
+#define CS_USER_INPUT_POINTER_SHIFT 0
+#define CS_USER_INPUT_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_USER_INPUT_POINTER_SHIFT)
+#define CS_USER_INPUT_POINTER_GET(reg_val) (((reg_val)&CS_USER_INPUT_POINTER_MASK) >> CS_USER_INPUT_POINTER_SHIFT)
+#define CS_USER_INPUT_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_USER_INPUT_POINTER_MASK) | \
+ (((value) << CS_USER_INPUT_POINTER_SHIFT) & CS_USER_INPUT_POINTER_MASK))
+
+/* CS_USER_OUTPUT register */
+#define CS_USER_OUTPUT_POINTER_SHIFT 0
+#define CS_USER_OUTPUT_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_USER_OUTPUT_POINTER_SHIFT)
+#define CS_USER_OUTPUT_POINTER_GET(reg_val) (((reg_val)&CS_USER_OUTPUT_POINTER_MASK) >> CS_USER_OUTPUT_POINTER_SHIFT)
+#define CS_USER_OUTPUT_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_USER_OUTPUT_POINTER_MASK) | \
+ (((value) << CS_USER_OUTPUT_POINTER_SHIFT) & CS_USER_OUTPUT_POINTER_MASK))
+/* End of CS_KERNEL_INPUT_BLOCK register set definitions */
+
+/* CS_KERNEL_OUTPUT_BLOCK register set definitions */
+
+/* CS_ACK register */
+#define CS_ACK_STATE_SHIFT 0
+#define CS_ACK_STATE_MASK (0x7 << CS_ACK_STATE_SHIFT)
+#define CS_ACK_STATE_GET(reg_val) (((reg_val)&CS_ACK_STATE_MASK) >> CS_ACK_STATE_SHIFT)
+#define CS_ACK_STATE_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_STATE_MASK) | (((value) << CS_ACK_STATE_SHIFT) & CS_ACK_STATE_MASK))
+/* CS_ACK_STATE values */
+#define CS_ACK_STATE_STOP 0x0
+#define CS_ACK_STATE_START 0x1
+/* End of CS_ACK_STATE values */
+#define CS_ACK_EXTRACT_EVENT_SHIFT 4
+#define CS_ACK_EXTRACT_EVENT_MASK (0x1 << CS_ACK_EXTRACT_EVENT_SHIFT)
+#define CS_ACK_EXTRACT_EVENT_GET(reg_val) (((reg_val)&CS_ACK_EXTRACT_EVENT_MASK) >> CS_ACK_EXTRACT_EVENT_SHIFT)
+#define CS_ACK_EXTRACT_EVENT_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_EXTRACT_EVENT_MASK) | (((value) << CS_ACK_EXTRACT_EVENT_SHIFT) & CS_ACK_EXTRACT_EVENT_MASK))
+#define CS_ACK_TILER_OOM_SHIFT 26
+#define CS_ACK_TILER_OOM_MASK (0x1 << CS_ACK_TILER_OOM_SHIFT)
+#define CS_ACK_TILER_OOM_GET(reg_val) (((reg_val)&CS_ACK_TILER_OOM_MASK) >> CS_ACK_TILER_OOM_SHIFT)
+#define CS_ACK_TILER_OOM_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_TILER_OOM_MASK) | (((value) << CS_ACK_TILER_OOM_SHIFT) & CS_ACK_TILER_OOM_MASK))
+#define CS_ACK_PROTM_PEND_SHIFT 27
+#define CS_ACK_PROTM_PEND_MASK (0x1 << CS_ACK_PROTM_PEND_SHIFT)
+#define CS_ACK_PROTM_PEND_GET(reg_val) (((reg_val)&CS_ACK_PROTM_PEND_MASK) >> CS_ACK_PROTM_PEND_SHIFT)
+#define CS_ACK_PROTM_PEND_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_PROTM_PEND_MASK) | (((value) << CS_ACK_PROTM_PEND_SHIFT) & CS_ACK_PROTM_PEND_MASK))
+#define CS_ACK_FATAL_SHIFT 30
+#define CS_ACK_FATAL_MASK (0x1 << CS_ACK_FATAL_SHIFT)
+#define CS_ACK_FATAL_GET(reg_val) (((reg_val)&CS_ACK_FATAL_MASK) >> CS_ACK_FATAL_SHIFT)
+#define CS_ACK_FATAL_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_FATAL_MASK) | (((value) << CS_ACK_FATAL_SHIFT) & CS_ACK_FATAL_MASK))
+#define CS_ACK_FAULT_SHIFT 31
+#define CS_ACK_FAULT_MASK (0x1 << CS_ACK_FAULT_SHIFT)
+#define CS_ACK_FAULT_GET(reg_val) (((reg_val)&CS_ACK_FAULT_MASK) >> CS_ACK_FAULT_SHIFT)
+#define CS_ACK_FAULT_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACK_FAULT_MASK) | (((value) << CS_ACK_FAULT_SHIFT) & CS_ACK_FAULT_MASK))
+
+/* CS_STATUS_CMD_PTR register */
+#define CS_STATUS_CMD_PTR_POINTER_SHIFT 0
+#define CS_STATUS_CMD_PTR_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_STATUS_CMD_PTR_POINTER_SHIFT)
+#define CS_STATUS_CMD_PTR_POINTER_GET(reg_val) \
+ (((reg_val)&CS_STATUS_CMD_PTR_POINTER_MASK) >> CS_STATUS_CMD_PTR_POINTER_SHIFT)
+#define CS_STATUS_CMD_PTR_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_CMD_PTR_POINTER_MASK) | \
+ (((value) << CS_STATUS_CMD_PTR_POINTER_SHIFT) & CS_STATUS_CMD_PTR_POINTER_MASK))
+
+/* CS_STATUS_WAIT register */
+#define CS_STATUS_WAIT_SB_MASK_SHIFT 0
+#define CS_STATUS_WAIT_SB_MASK_MASK (0xFFFF << CS_STATUS_WAIT_SB_MASK_SHIFT)
+#define CS_STATUS_WAIT_SB_MASK_GET(reg_val) (((reg_val)&CS_STATUS_WAIT_SB_MASK_MASK) >> CS_STATUS_WAIT_SB_MASK_SHIFT)
+#define CS_STATUS_WAIT_SB_MASK_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_SB_MASK_MASK) | \
+ (((value) << CS_STATUS_WAIT_SB_MASK_SHIFT) & CS_STATUS_WAIT_SB_MASK_MASK))
+#define CS_STATUS_WAIT_SYNC_WAIT_CONDITION_SHIFT 24
+#define CS_STATUS_WAIT_SYNC_WAIT_CONDITION_MASK (0xF << CS_STATUS_WAIT_SYNC_WAIT_CONDITION_SHIFT)
+#define CS_STATUS_WAIT_SYNC_WAIT_CONDITION_GET(reg_val) \
+ (((reg_val)&CS_STATUS_WAIT_SYNC_WAIT_CONDITION_MASK) >> CS_STATUS_WAIT_SYNC_WAIT_CONDITION_SHIFT)
+#define CS_STATUS_WAIT_SYNC_WAIT_CONDITION_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_SYNC_WAIT_CONDITION_MASK) | \
+ (((value) << CS_STATUS_WAIT_SYNC_WAIT_CONDITION_SHIFT) & CS_STATUS_WAIT_SYNC_WAIT_CONDITION_MASK))
+/* CS_STATUS_WAIT_SYNC_WAIT_CONDITION values */
+#define CS_STATUS_WAIT_SYNC_WAIT_CONDITION_LE 0x0
+#define CS_STATUS_WAIT_SYNC_WAIT_CONDITION_GT 0x1
+/* End of CS_STATUS_WAIT_SYNC_WAIT_CONDITION values */
+#define CS_STATUS_WAIT_PROGRESS_WAIT_SHIFT 28
+#define CS_STATUS_WAIT_PROGRESS_WAIT_MASK (0x1 << CS_STATUS_WAIT_PROGRESS_WAIT_SHIFT)
+#define CS_STATUS_WAIT_PROGRESS_WAIT_GET(reg_val) \
+ (((reg_val)&CS_STATUS_WAIT_PROGRESS_WAIT_MASK) >> CS_STATUS_WAIT_PROGRESS_WAIT_SHIFT)
+#define CS_STATUS_WAIT_PROGRESS_WAIT_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_PROGRESS_WAIT_MASK) | \
+ (((value) << CS_STATUS_WAIT_PROGRESS_WAIT_SHIFT) & CS_STATUS_WAIT_PROGRESS_WAIT_MASK))
+#define CS_STATUS_WAIT_PROTM_PEND_SHIFT 29
+#define CS_STATUS_WAIT_PROTM_PEND_MASK (0x1 << CS_STATUS_WAIT_PROTM_PEND_SHIFT)
+#define CS_STATUS_WAIT_PROTM_PEND_GET(reg_val) \
+ (((reg_val)&CS_STATUS_WAIT_PROTM_PEND_MASK) >> CS_STATUS_WAIT_PROTM_PEND_SHIFT)
+#define CS_STATUS_WAIT_PROTM_PEND_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_PROTM_PEND_MASK) | \
+ (((value) << CS_STATUS_WAIT_PROTM_PEND_SHIFT) & CS_STATUS_WAIT_PROTM_PEND_MASK))
+#define CS_STATUS_WAIT_SYNC_WAIT_SHIFT 31
+#define CS_STATUS_WAIT_SYNC_WAIT_MASK (0x1 << CS_STATUS_WAIT_SYNC_WAIT_SHIFT)
+#define CS_STATUS_WAIT_SYNC_WAIT_GET(reg_val) \
+ (((reg_val)&CS_STATUS_WAIT_SYNC_WAIT_MASK) >> CS_STATUS_WAIT_SYNC_WAIT_SHIFT)
+#define CS_STATUS_WAIT_SYNC_WAIT_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_SYNC_WAIT_MASK) | \
+ (((value) << CS_STATUS_WAIT_SYNC_WAIT_SHIFT) & CS_STATUS_WAIT_SYNC_WAIT_MASK))
+
+/* CS_STATUS_REQ_RESOURCE register */
+#define CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_SHIFT 0
+#define CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_MASK (0x1 << CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_GET(reg_val) \
+ (((reg_val)&CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_MASK) >> CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_MASK) | \
+ (((value) << CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_SHIFT) & CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_MASK))
+#define CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_SHIFT 1
+#define CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_MASK (0x1 << CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_GET(reg_val) \
+ (((reg_val)&CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_MASK) >> CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_MASK) | \
+ (((value) << CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_SHIFT) & CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_MASK))
+#define CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_SHIFT 2
+#define CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_MASK (0x1 << CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_GET(reg_val) \
+ (((reg_val)&CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_MASK) >> CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_MASK) | \
+ (((value) << CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_SHIFT) & CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_MASK))
+#define CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_SHIFT 3
+#define CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_MASK (0x1 << CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_GET(reg_val) \
+ (((reg_val)&CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_MASK) >> CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_SHIFT)
+#define CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_MASK) | \
+ (((value) << CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_SHIFT) & CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_MASK))
+
+/* CS_STATUS_WAIT_SYNC_POINTER register */
+#define CS_STATUS_WAIT_SYNC_POINTER_POINTER_SHIFT 0
+#define CS_STATUS_WAIT_SYNC_POINTER_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_STATUS_WAIT_SYNC_POINTER_POINTER_SHIFT)
+#define CS_STATUS_WAIT_SYNC_POINTER_POINTER_GET(reg_val) \
+ (((reg_val)&CS_STATUS_WAIT_SYNC_POINTER_POINTER_MASK) >> CS_STATUS_WAIT_SYNC_POINTER_POINTER_SHIFT)
+#define CS_STATUS_WAIT_SYNC_POINTER_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_SYNC_POINTER_POINTER_MASK) | \
+ (((value) << CS_STATUS_WAIT_SYNC_POINTER_POINTER_SHIFT) & CS_STATUS_WAIT_SYNC_POINTER_POINTER_MASK))
+
+/* CS_STATUS_WAIT_SYNC_VALUE register */
+#define CS_STATUS_WAIT_SYNC_VALUE_VALUE_SHIFT 0
+#define CS_STATUS_WAIT_SYNC_VALUE_VALUE_MASK (0xFFFFFFFF << CS_STATUS_WAIT_SYNC_VALUE_VALUE_SHIFT)
+#define CS_STATUS_WAIT_SYNC_VALUE_VALUE_GET(reg_val) \
+ (((reg_val)&CS_STATUS_WAIT_SYNC_VALUE_VALUE_MASK) >> CS_STATUS_WAIT_SYNC_VALUE_VALUE_SHIFT)
+#define CS_STATUS_WAIT_SYNC_VALUE_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_WAIT_SYNC_VALUE_VALUE_MASK) | \
+ (((value) << CS_STATUS_WAIT_SYNC_VALUE_VALUE_SHIFT) & CS_STATUS_WAIT_SYNC_VALUE_VALUE_MASK))
+
+/* CS_STATUS_SCOREBOARDS register */
+#define CS_STATUS_SCOREBOARDS_NONZERO_SHIFT (0)
+#define CS_STATUS_SCOREBOARDS_NONZERO_MASK \
+ ((0xFFFF) << CS_STATUS_SCOREBOARDS_NONZERO_SHIFT)
+#define CS_STATUS_SCOREBOARDS_NONZERO_GET(reg_val) \
+ (((reg_val)&CS_STATUS_SCOREBOARDS_NONZERO_MASK) >> \
+ CS_STATUS_SCOREBOARDS_NONZERO_SHIFT)
+#define CS_STATUS_SCOREBOARDS_NONZERO_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_SCOREBOARDS_NONZERO_MASK) | \
+ (((value) << CS_STATUS_SCOREBOARDS_NONZERO_SHIFT) & \
+ CS_STATUS_SCOREBOARDS_NONZERO_MASK))
+
+/* CS_STATUS_BLOCKED_REASON register */
+#define CS_STATUS_BLOCKED_REASON_REASON_SHIFT (0)
+#define CS_STATUS_BLOCKED_REASON_REASON_MASK \
+ ((0xF) << CS_STATUS_BLOCKED_REASON_REASON_SHIFT)
+#define CS_STATUS_BLOCKED_REASON_REASON_GET(reg_val) \
+ (((reg_val)&CS_STATUS_BLOCKED_REASON_REASON_MASK) >> \
+ CS_STATUS_BLOCKED_REASON_REASON_SHIFT)
+#define CS_STATUS_BLOCKED_REASON_REASON_SET(reg_val, value) \
+ (((reg_val) & ~CS_STATUS_BLOCKED_REASON_REASON_MASK) | \
+ (((value) << CS_STATUS_BLOCKED_REASON_REASON_SHIFT) & \
+ CS_STATUS_BLOCKED_REASON_REASON_MASK))
+/* CS_STATUS_BLOCKED_REASON_reason values */
+#define CS_STATUS_BLOCKED_REASON_REASON_UNBLOCKED 0x0
+#define CS_STATUS_BLOCKED_REASON_REASON_WAIT 0x1
+#define CS_STATUS_BLOCKED_REASON_REASON_PROGRESS_WAIT 0x2
+#define CS_STATUS_BLOCKED_REASON_REASON_SYNC_WAIT 0x3
+#define CS_STATUS_BLOCKED_REASON_REASON_DEFERRED 0x4
+#define CS_STATUS_BLOCKED_REASON_REASON_RESOURCE 0x5
+#define CS_STATUS_BLOCKED_REASON_REASON_FLUSH 0x6
+/* End of CS_STATUS_BLOCKED_REASON_reason values */
+
+/* CS_FAULT register */
+#define CS_FAULT_EXCEPTION_TYPE_SHIFT 0
+#define CS_FAULT_EXCEPTION_TYPE_MASK (0xFF << CS_FAULT_EXCEPTION_TYPE_SHIFT)
+#define CS_FAULT_EXCEPTION_TYPE_GET(reg_val) (((reg_val)&CS_FAULT_EXCEPTION_TYPE_MASK) >> CS_FAULT_EXCEPTION_TYPE_SHIFT)
+#define CS_FAULT_EXCEPTION_TYPE_SET(reg_val, value) \
+ (((reg_val) & ~CS_FAULT_EXCEPTION_TYPE_MASK) | \
+ (((value) << CS_FAULT_EXCEPTION_TYPE_SHIFT) & CS_FAULT_EXCEPTION_TYPE_MASK))
+/* CS_FAULT_EXCEPTION_TYPE values */
+#define CS_FAULT_EXCEPTION_TYPE_CS_RESOURCE_TERMINATED 0x0F
+#define CS_FAULT_EXCEPTION_TYPE_CS_INHERIT_FAULT 0x4B
+#define CS_FAULT_EXCEPTION_TYPE_INSTR_INVALID_PC 0x50
+#define CS_FAULT_EXCEPTION_TYPE_INSTR_INVALID_ENC 0x51
+#define CS_FAULT_EXCEPTION_TYPE_INSTR_BARRIER_FAULT 0x55
+#define CS_FAULT_EXCEPTION_TYPE_DATA_INVALID_FAULT 0x58
+#define CS_FAULT_EXCEPTION_TYPE_TILE_RANGE_FAULT 0x59
+#define CS_FAULT_EXCEPTION_TYPE_ADDR_RANGE_FAULT 0x5A
+#define CS_FAULT_EXCEPTION_TYPE_IMPRECISE_FAULT 0x5B
+#define CS_FAULT_EXCEPTION_TYPE_RESOURCE_EVICTION_TIMEOUT 0x69
+/* End of CS_FAULT_EXCEPTION_TYPE values */
+#define CS_FAULT_EXCEPTION_DATA_SHIFT 8
+#define CS_FAULT_EXCEPTION_DATA_MASK (0xFFFFFF << CS_FAULT_EXCEPTION_DATA_SHIFT)
+#define CS_FAULT_EXCEPTION_DATA_GET(reg_val) (((reg_val)&CS_FAULT_EXCEPTION_DATA_MASK) >> CS_FAULT_EXCEPTION_DATA_SHIFT)
+#define CS_FAULT_EXCEPTION_DATA_SET(reg_val, value) \
+ (((reg_val) & ~CS_FAULT_EXCEPTION_DATA_MASK) | \
+ (((value) << CS_FAULT_EXCEPTION_DATA_SHIFT) & CS_FAULT_EXCEPTION_DATA_MASK))
+
+/* CS_FATAL register */
+#define CS_FATAL_EXCEPTION_TYPE_SHIFT 0
+#define CS_FATAL_EXCEPTION_TYPE_MASK (0xFF << CS_FATAL_EXCEPTION_TYPE_SHIFT)
+#define CS_FATAL_EXCEPTION_TYPE_GET(reg_val) (((reg_val)&CS_FATAL_EXCEPTION_TYPE_MASK) >> CS_FATAL_EXCEPTION_TYPE_SHIFT)
+#define CS_FATAL_EXCEPTION_TYPE_SET(reg_val, value) \
+ (((reg_val) & ~CS_FATAL_EXCEPTION_TYPE_MASK) | \
+ (((value) << CS_FATAL_EXCEPTION_TYPE_SHIFT) & CS_FATAL_EXCEPTION_TYPE_MASK))
+/* CS_FATAL_EXCEPTION_TYPE values */
+#define CS_FATAL_EXCEPTION_TYPE_CS_CONFIG_FAULT 0x40
+#define CS_FATAL_EXCEPTION_TYPE_CS_ENDPOINT_FAULT 0x44
+#define CS_FATAL_EXCEPTION_TYPE_CS_BUS_FAULT 0x48
+#define CS_FATAL_EXCEPTION_TYPE_CS_INVALID_INSTRUCTION 0x49
+#define CS_FATAL_EXCEPTION_TYPE_CS_CALL_STACK_OVERFLOW 0x4A
+#define CS_FATAL_EXCEPTION_TYPE_FIRMWARE_INTERNAL_ERROR 0x68
+/* End of CS_FATAL_EXCEPTION_TYPE values */
+#define CS_FATAL_EXCEPTION_DATA_SHIFT 8
+#define CS_FATAL_EXCEPTION_DATA_MASK (0xFFFFFF << CS_FATAL_EXCEPTION_DATA_SHIFT)
+#define CS_FATAL_EXCEPTION_DATA_GET(reg_val) (((reg_val)&CS_FATAL_EXCEPTION_DATA_MASK) >> CS_FATAL_EXCEPTION_DATA_SHIFT)
+#define CS_FATAL_EXCEPTION_DATA_SET(reg_val, value) \
+ (((reg_val) & ~CS_FATAL_EXCEPTION_DATA_MASK) | \
+ (((value) << CS_FATAL_EXCEPTION_DATA_SHIFT) & CS_FATAL_EXCEPTION_DATA_MASK))
+
+/* CS_FAULT_INFO register */
+#define CS_FAULT_INFO_EXCEPTION_DATA_SHIFT 0
+#define CS_FAULT_INFO_EXCEPTION_DATA_MASK (0xFFFFFFFFFFFFFFFF << CS_FAULT_INFO_EXCEPTION_DATA_SHIFT)
+#define CS_FAULT_INFO_EXCEPTION_DATA_GET(reg_val) \
+ (((reg_val)&CS_FAULT_INFO_EXCEPTION_DATA_MASK) >> CS_FAULT_INFO_EXCEPTION_DATA_SHIFT)
+#define CS_FAULT_INFO_EXCEPTION_DATA_SET(reg_val, value) \
+ (((reg_val) & ~CS_FAULT_INFO_EXCEPTION_DATA_MASK) | \
+ (((value) << CS_FAULT_INFO_EXCEPTION_DATA_SHIFT) & CS_FAULT_INFO_EXCEPTION_DATA_MASK))
+
+/* CS_FATAL_INFO register */
+#define CS_FATAL_INFO_EXCEPTION_DATA_SHIFT 0
+#define CS_FATAL_INFO_EXCEPTION_DATA_MASK (0xFFFFFFFFFFFFFFFF << CS_FATAL_INFO_EXCEPTION_DATA_SHIFT)
+#define CS_FATAL_INFO_EXCEPTION_DATA_GET(reg_val) \
+ (((reg_val)&CS_FATAL_INFO_EXCEPTION_DATA_MASK) >> CS_FATAL_INFO_EXCEPTION_DATA_SHIFT)
+#define CS_FATAL_INFO_EXCEPTION_DATA_SET(reg_val, value) \
+ (((reg_val) & ~CS_FATAL_INFO_EXCEPTION_DATA_MASK) | \
+ (((value) << CS_FATAL_INFO_EXCEPTION_DATA_SHIFT) & CS_FATAL_INFO_EXCEPTION_DATA_MASK))
+
+/* CS_HEAP_VT_START register */
+#define CS_HEAP_VT_START_VALUE_SHIFT 0
+#define CS_HEAP_VT_START_VALUE_MASK (0xFFFFFFFF << CS_HEAP_VT_START_VALUE_SHIFT)
+#define CS_HEAP_VT_START_VALUE_GET(reg_val) (((reg_val)&CS_HEAP_VT_START_VALUE_MASK) >> CS_HEAP_VT_START_VALUE_SHIFT)
+#define CS_HEAP_VT_START_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_HEAP_VT_START_VALUE_MASK) | \
+ (((value) << CS_HEAP_VT_START_VALUE_SHIFT) & CS_HEAP_VT_START_VALUE_MASK))
+
+/* CS_HEAP_VT_END register */
+#define CS_HEAP_VT_END_VALUE_SHIFT 0
+#define CS_HEAP_VT_END_VALUE_MASK (0xFFFFFFFF << CS_HEAP_VT_END_VALUE_SHIFT)
+#define CS_HEAP_VT_END_VALUE_GET(reg_val) (((reg_val)&CS_HEAP_VT_END_VALUE_MASK) >> CS_HEAP_VT_END_VALUE_SHIFT)
+#define CS_HEAP_VT_END_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_HEAP_VT_END_VALUE_MASK) | (((value) << CS_HEAP_VT_END_VALUE_SHIFT) & CS_HEAP_VT_END_VALUE_MASK))
+
+/* CS_HEAP_FRAG_END register */
+#define CS_HEAP_FRAG_END_VALUE_SHIFT 0
+#define CS_HEAP_FRAG_END_VALUE_MASK (0xFFFFFFFF << CS_HEAP_FRAG_END_VALUE_SHIFT)
+#define CS_HEAP_FRAG_END_VALUE_GET(reg_val) (((reg_val)&CS_HEAP_FRAG_END_VALUE_MASK) >> CS_HEAP_FRAG_END_VALUE_SHIFT)
+#define CS_HEAP_FRAG_END_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_HEAP_FRAG_END_VALUE_MASK) | \
+ (((value) << CS_HEAP_FRAG_END_VALUE_SHIFT) & CS_HEAP_FRAG_END_VALUE_MASK))
+
+/* CS_HEAP_ADDRESS register */
+#define CS_HEAP_ADDRESS_POINTER_SHIFT 0
+#define CS_HEAP_ADDRESS_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CS_HEAP_ADDRESS_POINTER_SHIFT)
+#define CS_HEAP_ADDRESS_POINTER_GET(reg_val) (((reg_val)&CS_HEAP_ADDRESS_POINTER_MASK) >> CS_HEAP_ADDRESS_POINTER_SHIFT)
+#define CS_HEAP_ADDRESS_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CS_HEAP_ADDRESS_POINTER_MASK) | \
+ (((value) << CS_HEAP_ADDRESS_POINTER_SHIFT) & CS_HEAP_ADDRESS_POINTER_MASK))
+/* End of CS_KERNEL_OUTPUT_BLOCK register set definitions */
+
+/* CS_USER_INPUT_BLOCK register set definitions */
+
+/* CS_INSERT register */
+#define CS_INSERT_VALUE_SHIFT 0
+#define CS_INSERT_VALUE_MASK (0xFFFFFFFFFFFFFFFF << CS_INSERT_VALUE_SHIFT)
+#define CS_INSERT_VALUE_GET(reg_val) (((reg_val)&CS_INSERT_VALUE_MASK) >> CS_INSERT_VALUE_SHIFT)
+#define CS_INSERT_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_INSERT_VALUE_MASK) | (((value) << CS_INSERT_VALUE_SHIFT) & CS_INSERT_VALUE_MASK))
+
+/* CS_EXTRACT_INIT register */
+#define CS_EXTRACT_INIT_VALUE_SHIFT 0
+#define CS_EXTRACT_INIT_VALUE_MASK (0xFFFFFFFFFFFFFFFF << CS_EXTRACT_INIT_VALUE_SHIFT)
+#define CS_EXTRACT_INIT_VALUE_GET(reg_val) (((reg_val)&CS_EXTRACT_INIT_VALUE_MASK) >> CS_EXTRACT_INIT_VALUE_SHIFT)
+#define CS_EXTRACT_INIT_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_EXTRACT_INIT_VALUE_MASK) | \
+ (((value) << CS_EXTRACT_INIT_VALUE_SHIFT) & CS_EXTRACT_INIT_VALUE_MASK))
+/* End of CS_USER_INPUT_BLOCK register set definitions */
+
+/* CS_USER_OUTPUT_BLOCK register set definitions */
+
+/* CS_EXTRACT register */
+#define CS_EXTRACT_VALUE_SHIFT 0
+#define CS_EXTRACT_VALUE_MASK (0xFFFFFFFFFFFFFFFF << CS_EXTRACT_VALUE_SHIFT)
+#define CS_EXTRACT_VALUE_GET(reg_val) (((reg_val)&CS_EXTRACT_VALUE_MASK) >> CS_EXTRACT_VALUE_SHIFT)
+#define CS_EXTRACT_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~CS_EXTRACT_VALUE_MASK) | (((value) << CS_EXTRACT_VALUE_SHIFT) & CS_EXTRACT_VALUE_MASK))
+
+/* CS_ACTIVE register */
+#define CS_ACTIVE_HW_ACTIVE_SHIFT 0
+#define CS_ACTIVE_HW_ACTIVE_MASK (0x1 << CS_ACTIVE_HW_ACTIVE_SHIFT)
+#define CS_ACTIVE_HW_ACTIVE_GET(reg_val) (((reg_val)&CS_ACTIVE_HW_ACTIVE_MASK) >> CS_ACTIVE_HW_ACTIVE_SHIFT)
+#define CS_ACTIVE_HW_ACTIVE_SET(reg_val, value) \
+ (((reg_val) & ~CS_ACTIVE_HW_ACTIVE_MASK) | (((value) << CS_ACTIVE_HW_ACTIVE_SHIFT) & CS_ACTIVE_HW_ACTIVE_MASK))
+/* End of CS_USER_OUTPUT_BLOCK register set definitions */
+
+/* CSG_INPUT_BLOCK register set definitions */
+
+/* CSG_REQ register */
+#define CSG_REQ_STATE_SHIFT 0
+#define CSG_REQ_STATE_MASK (0x7 << CSG_REQ_STATE_SHIFT)
+#define CSG_REQ_STATE_GET(reg_val) (((reg_val)&CSG_REQ_STATE_MASK) >> CSG_REQ_STATE_SHIFT)
+#define CSG_REQ_STATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_STATE_MASK) | (((value) << CSG_REQ_STATE_SHIFT) & CSG_REQ_STATE_MASK))
+/* CSG_REQ_STATE values */
+#define CSG_REQ_STATE_TERMINATE 0x0
+#define CSG_REQ_STATE_START 0x1
+#define CSG_REQ_STATE_SUSPEND 0x2
+#define CSG_REQ_STATE_RESUME 0x3
+/* End of CSG_REQ_STATE values */
+#define CSG_REQ_EP_CFG_SHIFT 4
+#define CSG_REQ_EP_CFG_MASK (0x1 << CSG_REQ_EP_CFG_SHIFT)
+#define CSG_REQ_EP_CFG_GET(reg_val) (((reg_val)&CSG_REQ_EP_CFG_MASK) >> CSG_REQ_EP_CFG_SHIFT)
+#define CSG_REQ_EP_CFG_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_EP_CFG_MASK) | (((value) << CSG_REQ_EP_CFG_SHIFT) & CSG_REQ_EP_CFG_MASK))
+#define CSG_REQ_STATUS_UPDATE_SHIFT 5
+#define CSG_REQ_STATUS_UPDATE_MASK (0x1 << CSG_REQ_STATUS_UPDATE_SHIFT)
+#define CSG_REQ_STATUS_UPDATE_GET(reg_val) (((reg_val)&CSG_REQ_STATUS_UPDATE_MASK) >> CSG_REQ_STATUS_UPDATE_SHIFT)
+#define CSG_REQ_STATUS_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_STATUS_UPDATE_MASK) | \
+ (((value) << CSG_REQ_STATUS_UPDATE_SHIFT) & CSG_REQ_STATUS_UPDATE_MASK))
+#define CSG_REQ_SYNC_UPDATE_SHIFT 28
+#define CSG_REQ_SYNC_UPDATE_MASK (0x1 << CSG_REQ_SYNC_UPDATE_SHIFT)
+#define CSG_REQ_SYNC_UPDATE_GET(reg_val) (((reg_val)&CSG_REQ_SYNC_UPDATE_MASK) >> CSG_REQ_SYNC_UPDATE_SHIFT)
+#define CSG_REQ_SYNC_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_SYNC_UPDATE_MASK) | (((value) << CSG_REQ_SYNC_UPDATE_SHIFT) & CSG_REQ_SYNC_UPDATE_MASK))
+#define CSG_REQ_IDLE_SHIFT 29
+#define CSG_REQ_IDLE_MASK (0x1 << CSG_REQ_IDLE_SHIFT)
+#define CSG_REQ_IDLE_GET(reg_val) (((reg_val)&CSG_REQ_IDLE_MASK) >> CSG_REQ_IDLE_SHIFT)
+#define CSG_REQ_IDLE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_IDLE_MASK) | (((value) << CSG_REQ_IDLE_SHIFT) & CSG_REQ_IDLE_MASK))
+#define CSG_REQ_DOORBELL_SHIFT 30
+#define CSG_REQ_DOORBELL_MASK (0x1 << CSG_REQ_DOORBELL_SHIFT)
+#define CSG_REQ_DOORBELL_GET(reg_val) (((reg_val)&CSG_REQ_DOORBELL_MASK) >> CSG_REQ_DOORBELL_SHIFT)
+#define CSG_REQ_DOORBELL_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_DOORBELL_MASK) | (((value) << CSG_REQ_DOORBELL_SHIFT) & CSG_REQ_DOORBELL_MASK))
+#define CSG_REQ_PROGRESS_TIMER_EVENT_SHIFT 31
+#define CSG_REQ_PROGRESS_TIMER_EVENT_MASK (0x1 << CSG_REQ_PROGRESS_TIMER_EVENT_SHIFT)
+#define CSG_REQ_PROGRESS_TIMER_EVENT_GET(reg_val) \
+ (((reg_val)&CSG_REQ_PROGRESS_TIMER_EVENT_MASK) >> CSG_REQ_PROGRESS_TIMER_EVENT_SHIFT)
+#define CSG_REQ_PROGRESS_TIMER_EVENT_SET(reg_val, value) \
+ (((reg_val) & ~CSG_REQ_PROGRESS_TIMER_EVENT_MASK) | \
+ (((value) << CSG_REQ_PROGRESS_TIMER_EVENT_SHIFT) & CSG_REQ_PROGRESS_TIMER_EVENT_MASK))
+
+/* CSG_ACK_IRQ_MASK register */
+#define CSG_ACK_IRQ_MASK_STATE_SHIFT 0
+#define CSG_ACK_IRQ_MASK_STATE_MASK (0x7 << CSG_ACK_IRQ_MASK_STATE_SHIFT)
+#define CSG_ACK_IRQ_MASK_STATE_GET(reg_val) (((reg_val)&CSG_ACK_IRQ_MASK_STATE_MASK) >> CSG_ACK_IRQ_MASK_STATE_SHIFT)
+#define CSG_ACK_IRQ_MASK_STATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_STATE_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_STATE_SHIFT) & CSG_ACK_IRQ_MASK_STATE_MASK))
+/* CSG_ACK_IRQ_MASK_STATE values */
+#define CSG_ACK_IRQ_MASK_STATE_DISABLED 0x0
+#define CSG_ACK_IRQ_MASK_STATE_ENABLED 0x7
+/* End of CSG_ACK_IRQ_MASK_STATE values */
+#define CSG_ACK_IRQ_MASK_EP_CFG_SHIFT 4
+#define CSG_ACK_IRQ_MASK_EP_CFG_MASK (0x1 << CSG_ACK_IRQ_MASK_EP_CFG_SHIFT)
+#define CSG_ACK_IRQ_MASK_EP_CFG_GET(reg_val) (((reg_val)&CSG_ACK_IRQ_MASK_EP_CFG_MASK) >> CSG_ACK_IRQ_MASK_EP_CFG_SHIFT)
+#define CSG_ACK_IRQ_MASK_EP_CFG_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_EP_CFG_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_EP_CFG_SHIFT) & CSG_ACK_IRQ_MASK_EP_CFG_MASK))
+#define CSG_ACK_IRQ_MASK_STATUS_UPDATE_SHIFT 5
+#define CSG_ACK_IRQ_MASK_STATUS_UPDATE_MASK (0x1 << CSG_ACK_IRQ_MASK_STATUS_UPDATE_SHIFT)
+#define CSG_ACK_IRQ_MASK_STATUS_UPDATE_GET(reg_val) \
+ (((reg_val)&CSG_ACK_IRQ_MASK_STATUS_UPDATE_MASK) >> CSG_ACK_IRQ_MASK_STATUS_UPDATE_SHIFT)
+#define CSG_ACK_IRQ_MASK_STATUS_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_STATUS_UPDATE_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_STATUS_UPDATE_SHIFT) & CSG_ACK_IRQ_MASK_STATUS_UPDATE_MASK))
+#define CSG_ACK_IRQ_MASK_SYNC_UPDATE_SHIFT 28
+#define CSG_ACK_IRQ_MASK_SYNC_UPDATE_MASK (0x1 << CSG_ACK_IRQ_MASK_SYNC_UPDATE_SHIFT)
+#define CSG_ACK_IRQ_MASK_SYNC_UPDATE_GET(reg_val) \
+ (((reg_val)&CSG_ACK_IRQ_MASK_SYNC_UPDATE_MASK) >> CSG_ACK_IRQ_MASK_SYNC_UPDATE_SHIFT)
+#define CSG_ACK_IRQ_MASK_SYNC_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_SYNC_UPDATE_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_SYNC_UPDATE_SHIFT) & CSG_ACK_IRQ_MASK_SYNC_UPDATE_MASK))
+#define CSG_ACK_IRQ_MASK_IDLE_SHIFT 29
+#define CSG_ACK_IRQ_MASK_IDLE_MASK (0x1 << CSG_ACK_IRQ_MASK_IDLE_SHIFT)
+#define CSG_ACK_IRQ_MASK_IDLE_GET(reg_val) (((reg_val)&CSG_ACK_IRQ_MASK_IDLE_MASK) >> CSG_ACK_IRQ_MASK_IDLE_SHIFT)
+#define CSG_ACK_IRQ_MASK_IDLE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_IDLE_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_IDLE_SHIFT) & CSG_ACK_IRQ_MASK_IDLE_MASK))
+#define CSG_ACK_IRQ_MASK_DOORBELL_SHIFT 30
+#define CSG_ACK_IRQ_MASK_DOORBELL_MASK (0x1 << CSG_ACK_IRQ_MASK_DOORBELL_SHIFT)
+#define CSG_ACK_IRQ_MASK_DOORBELL_GET(reg_val) \
+ (((reg_val)&CSG_ACK_IRQ_MASK_DOORBELL_MASK) >> CSG_ACK_IRQ_MASK_DOORBELL_SHIFT)
+#define CSG_ACK_IRQ_MASK_DOORBELL_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_DOORBELL_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_DOORBELL_SHIFT) & CSG_ACK_IRQ_MASK_DOORBELL_MASK))
+#define CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_SHIFT 31
+#define CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_MASK (0x1 << CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_SHIFT)
+#define CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_GET(reg_val) \
+ (((reg_val)&CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_MASK) >> CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_SHIFT)
+#define CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_MASK) | \
+ (((value) << CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_SHIFT) & CSG_ACK_IRQ_MASK_PROGRESS_TIMER_EVENT_MASK))
+
+/* CSG_EP_REQ register */
+#define CSG_EP_REQ_COMPUTE_EP_SHIFT 0
+#define CSG_EP_REQ_COMPUTE_EP_MASK (0xFF << CSG_EP_REQ_COMPUTE_EP_SHIFT)
+#define CSG_EP_REQ_COMPUTE_EP_GET(reg_val) (((reg_val)&CSG_EP_REQ_COMPUTE_EP_MASK) >> CSG_EP_REQ_COMPUTE_EP_SHIFT)
+#define CSG_EP_REQ_COMPUTE_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_EP_REQ_COMPUTE_EP_MASK) | \
+ (((value) << CSG_EP_REQ_COMPUTE_EP_SHIFT) & CSG_EP_REQ_COMPUTE_EP_MASK))
+#define CSG_EP_REQ_FRAGMENT_EP_SHIFT 8
+#define CSG_EP_REQ_FRAGMENT_EP_MASK (0xFF << CSG_EP_REQ_FRAGMENT_EP_SHIFT)
+#define CSG_EP_REQ_FRAGMENT_EP_GET(reg_val) (((reg_val)&CSG_EP_REQ_FRAGMENT_EP_MASK) >> CSG_EP_REQ_FRAGMENT_EP_SHIFT)
+#define CSG_EP_REQ_FRAGMENT_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_EP_REQ_FRAGMENT_EP_MASK) | \
+ (((value) << CSG_EP_REQ_FRAGMENT_EP_SHIFT) & CSG_EP_REQ_FRAGMENT_EP_MASK))
+#define CSG_EP_REQ_TILER_EP_SHIFT 16
+#define CSG_EP_REQ_TILER_EP_MASK (0xF << CSG_EP_REQ_TILER_EP_SHIFT)
+#define CSG_EP_REQ_TILER_EP_GET(reg_val) (((reg_val)&CSG_EP_REQ_TILER_EP_MASK) >> CSG_EP_REQ_TILER_EP_SHIFT)
+#define CSG_EP_REQ_TILER_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_EP_REQ_TILER_EP_MASK) | (((value) << CSG_EP_REQ_TILER_EP_SHIFT) & CSG_EP_REQ_TILER_EP_MASK))
+#define CSG_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT 20
+#define CSG_EP_REQ_EXCLUSIVE_COMPUTE_MASK (0x1 << CSG_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT)
+#define CSG_EP_REQ_EXCLUSIVE_COMPUTE_GET(reg_val) \
+ (((reg_val)&CSG_EP_REQ_EXCLUSIVE_COMPUTE_MASK) >> CSG_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT)
+#define CSG_EP_REQ_EXCLUSIVE_COMPUTE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_EP_REQ_EXCLUSIVE_COMPUTE_MASK) | \
+ (((value) << CSG_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT) & CSG_EP_REQ_EXCLUSIVE_COMPUTE_MASK))
+#define CSG_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT 21
+#define CSG_EP_REQ_EXCLUSIVE_FRAGMENT_MASK (0x1 << CSG_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT)
+#define CSG_EP_REQ_EXCLUSIVE_FRAGMENT_GET(reg_val) \
+ (((reg_val)&CSG_EP_REQ_EXCLUSIVE_FRAGMENT_MASK) >> CSG_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT)
+#define CSG_EP_REQ_EXCLUSIVE_FRAGMENT_SET(reg_val, value) \
+ (((reg_val) & ~CSG_EP_REQ_EXCLUSIVE_FRAGMENT_MASK) | \
+ (((value) << CSG_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT) & CSG_EP_REQ_EXCLUSIVE_FRAGMENT_MASK))
+#define CSG_EP_REQ_PRIORITY_SHIFT 28
+#define CSG_EP_REQ_PRIORITY_MASK (0xF << CSG_EP_REQ_PRIORITY_SHIFT)
+#define CSG_EP_REQ_PRIORITY_GET(reg_val) (((reg_val)&CSG_EP_REQ_PRIORITY_MASK) >> CSG_EP_REQ_PRIORITY_SHIFT)
+#define CSG_EP_REQ_PRIORITY_SET(reg_val, value) \
+ (((reg_val) & ~CSG_EP_REQ_PRIORITY_MASK) | (((value) << CSG_EP_REQ_PRIORITY_SHIFT) & CSG_EP_REQ_PRIORITY_MASK))
+
+/* CSG_SUSPEND_BUF register */
+#define CSG_SUSPEND_BUF_POINTER_SHIFT 0
+#define CSG_SUSPEND_BUF_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CSG_SUSPEND_BUF_POINTER_SHIFT)
+#define CSG_SUSPEND_BUF_POINTER_GET(reg_val) (((reg_val)&CSG_SUSPEND_BUF_POINTER_MASK) >> CSG_SUSPEND_BUF_POINTER_SHIFT)
+#define CSG_SUSPEND_BUF_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CSG_SUSPEND_BUF_POINTER_MASK) | \
+ (((value) << CSG_SUSPEND_BUF_POINTER_SHIFT) & CSG_SUSPEND_BUF_POINTER_MASK))
+
+/* CSG_PROTM_SUSPEND_BUF register */
+#define CSG_PROTM_SUSPEND_BUF_POINTER_SHIFT 0
+#define CSG_PROTM_SUSPEND_BUF_POINTER_MASK (0xFFFFFFFFFFFFFFFF << CSG_PROTM_SUSPEND_BUF_POINTER_SHIFT)
+#define CSG_PROTM_SUSPEND_BUF_POINTER_GET(reg_val) \
+ (((reg_val)&CSG_PROTM_SUSPEND_BUF_POINTER_MASK) >> CSG_PROTM_SUSPEND_BUF_POINTER_SHIFT)
+#define CSG_PROTM_SUSPEND_BUF_POINTER_SET(reg_val, value) \
+ (((reg_val) & ~CSG_PROTM_SUSPEND_BUF_POINTER_MASK) | \
+ (((value) << CSG_PROTM_SUSPEND_BUF_POINTER_SHIFT) & CSG_PROTM_SUSPEND_BUF_POINTER_MASK))
+
+/* End of CSG_INPUT_BLOCK register set definitions */
+
+/* CSG_OUTPUT_BLOCK register set definitions */
+
+/* CSG_ACK register */
+#define CSG_ACK_STATE_SHIFT 0
+#define CSG_ACK_STATE_MASK (0x7 << CSG_ACK_STATE_SHIFT)
+#define CSG_ACK_STATE_GET(reg_val) (((reg_val)&CSG_ACK_STATE_MASK) >> CSG_ACK_STATE_SHIFT)
+#define CSG_ACK_STATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_STATE_MASK) | (((value) << CSG_ACK_STATE_SHIFT) & CSG_ACK_STATE_MASK))
+/* CSG_ACK_STATE values */
+#define CSG_ACK_STATE_TERMINATE 0x0
+#define CSG_ACK_STATE_START 0x1
+#define CSG_ACK_STATE_SUSPEND 0x2
+#define CSG_ACK_STATE_RESUME 0x3
+/* End of CSG_ACK_STATE values */
+#define CSG_ACK_EP_CFG_SHIFT 4
+#define CSG_ACK_EP_CFG_MASK (0x1 << CSG_ACK_EP_CFG_SHIFT)
+#define CSG_ACK_EP_CFG_GET(reg_val) (((reg_val)&CSG_ACK_EP_CFG_MASK) >> CSG_ACK_EP_CFG_SHIFT)
+#define CSG_ACK_EP_CFG_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_EP_CFG_MASK) | (((value) << CSG_ACK_EP_CFG_SHIFT) & CSG_ACK_EP_CFG_MASK))
+#define CSG_ACK_STATUS_UPDATE_SHIFT 5
+#define CSG_ACK_STATUS_UPDATE_MASK (0x1 << CSG_ACK_STATUS_UPDATE_SHIFT)
+#define CSG_ACK_STATUS_UPDATE_GET(reg_val) (((reg_val)&CSG_ACK_STATUS_UPDATE_MASK) >> CSG_ACK_STATUS_UPDATE_SHIFT)
+#define CSG_ACK_STATUS_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_STATUS_UPDATE_MASK) | \
+ (((value) << CSG_ACK_STATUS_UPDATE_SHIFT) & CSG_ACK_STATUS_UPDATE_MASK))
+#define CSG_ACK_SYNC_UPDATE_SHIFT 28
+#define CSG_ACK_SYNC_UPDATE_MASK (0x1 << CSG_ACK_SYNC_UPDATE_SHIFT)
+#define CSG_ACK_SYNC_UPDATE_GET(reg_val) (((reg_val)&CSG_ACK_SYNC_UPDATE_MASK) >> CSG_ACK_SYNC_UPDATE_SHIFT)
+#define CSG_ACK_SYNC_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_SYNC_UPDATE_MASK) | (((value) << CSG_ACK_SYNC_UPDATE_SHIFT) & CSG_ACK_SYNC_UPDATE_MASK))
+#define CSG_ACK_IDLE_SHIFT 29
+#define CSG_ACK_IDLE_MASK (0x1 << CSG_ACK_IDLE_SHIFT)
+#define CSG_ACK_IDLE_GET(reg_val) (((reg_val)&CSG_ACK_IDLE_MASK) >> CSG_ACK_IDLE_SHIFT)
+#define CSG_ACK_IDLE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_IDLE_MASK) | (((value) << CSG_ACK_IDLE_SHIFT) & CSG_ACK_IDLE_MASK))
+#define CSG_ACK_DOORBELL_SHIFT 30
+#define CSG_ACK_DOORBELL_MASK (0x1 << CSG_ACK_DOORBELL_SHIFT)
+#define CSG_ACK_DOORBELL_GET(reg_val) (((reg_val)&CSG_ACK_DOORBELL_MASK) >> CSG_ACK_DOORBELL_SHIFT)
+#define CSG_ACK_DOORBELL_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_DOORBELL_MASK) | (((value) << CSG_ACK_DOORBELL_SHIFT) & CSG_ACK_DOORBELL_MASK))
+#define CSG_ACK_PROGRESS_TIMER_EVENT_SHIFT 31
+#define CSG_ACK_PROGRESS_TIMER_EVENT_MASK (0x1 << CSG_ACK_PROGRESS_TIMER_EVENT_SHIFT)
+#define CSG_ACK_PROGRESS_TIMER_EVENT_GET(reg_val) \
+ (((reg_val)&CSG_ACK_PROGRESS_TIMER_EVENT_MASK) >> CSG_ACK_PROGRESS_TIMER_EVENT_SHIFT)
+#define CSG_ACK_PROGRESS_TIMER_EVENT_SET(reg_val, value) \
+ (((reg_val) & ~CSG_ACK_PROGRESS_TIMER_EVENT_MASK) | \
+ (((value) << CSG_ACK_PROGRESS_TIMER_EVENT_SHIFT) & CSG_ACK_PROGRESS_TIMER_EVENT_MASK))
+
+/* CSG_STATUS_EP_CURRENT register */
+#define CSG_STATUS_EP_CURRENT_COMPUTE_EP_SHIFT 0
+#define CSG_STATUS_EP_CURRENT_COMPUTE_EP_MASK (0xFF << CSG_STATUS_EP_CURRENT_COMPUTE_EP_SHIFT)
+#define CSG_STATUS_EP_CURRENT_COMPUTE_EP_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_CURRENT_COMPUTE_EP_MASK) >> CSG_STATUS_EP_CURRENT_COMPUTE_EP_SHIFT)
+#define CSG_STATUS_EP_CURRENT_COMPUTE_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_CURRENT_COMPUTE_EP_MASK) | \
+ (((value) << CSG_STATUS_EP_CURRENT_COMPUTE_EP_SHIFT) & CSG_STATUS_EP_CURRENT_COMPUTE_EP_MASK))
+#define CSG_STATUS_EP_CURRENT_FRAGMENT_EP_SHIFT 8
+#define CSG_STATUS_EP_CURRENT_FRAGMENT_EP_MASK (0xFF << CSG_STATUS_EP_CURRENT_FRAGMENT_EP_SHIFT)
+#define CSG_STATUS_EP_CURRENT_FRAGMENT_EP_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_CURRENT_FRAGMENT_EP_MASK) >> CSG_STATUS_EP_CURRENT_FRAGMENT_EP_SHIFT)
+#define CSG_STATUS_EP_CURRENT_FRAGMENT_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_CURRENT_FRAGMENT_EP_MASK) | \
+ (((value) << CSG_STATUS_EP_CURRENT_FRAGMENT_EP_SHIFT) & CSG_STATUS_EP_CURRENT_FRAGMENT_EP_MASK))
+#define CSG_STATUS_EP_CURRENT_TILER_EP_SHIFT 16
+#define CSG_STATUS_EP_CURRENT_TILER_EP_MASK (0xF << CSG_STATUS_EP_CURRENT_TILER_EP_SHIFT)
+#define CSG_STATUS_EP_CURRENT_TILER_EP_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_CURRENT_TILER_EP_MASK) >> CSG_STATUS_EP_CURRENT_TILER_EP_SHIFT)
+#define CSG_STATUS_EP_CURRENT_TILER_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_CURRENT_TILER_EP_MASK) | \
+ (((value) << CSG_STATUS_EP_CURRENT_TILER_EP_SHIFT) & CSG_STATUS_EP_CURRENT_TILER_EP_MASK))
+
+/* CSG_STATUS_EP_REQ register */
+#define CSG_STATUS_EP_REQ_COMPUTE_EP_SHIFT 0
+#define CSG_STATUS_EP_REQ_COMPUTE_EP_MASK (0xFF << CSG_STATUS_EP_REQ_COMPUTE_EP_SHIFT)
+#define CSG_STATUS_EP_REQ_COMPUTE_EP_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_REQ_COMPUTE_EP_MASK) >> CSG_STATUS_EP_REQ_COMPUTE_EP_SHIFT)
+#define CSG_STATUS_EP_REQ_COMPUTE_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_REQ_COMPUTE_EP_MASK) | \
+ (((value) << CSG_STATUS_EP_REQ_COMPUTE_EP_SHIFT) & CSG_STATUS_EP_REQ_COMPUTE_EP_MASK))
+#define CSG_STATUS_EP_REQ_FRAGMENT_EP_SHIFT 8
+#define CSG_STATUS_EP_REQ_FRAGMENT_EP_MASK (0xFF << CSG_STATUS_EP_REQ_FRAGMENT_EP_SHIFT)
+#define CSG_STATUS_EP_REQ_FRAGMENT_EP_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_REQ_FRAGMENT_EP_MASK) >> CSG_STATUS_EP_REQ_FRAGMENT_EP_SHIFT)
+#define CSG_STATUS_EP_REQ_FRAGMENT_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_REQ_FRAGMENT_EP_MASK) | \
+ (((value) << CSG_STATUS_EP_REQ_FRAGMENT_EP_SHIFT) & CSG_STATUS_EP_REQ_FRAGMENT_EP_MASK))
+#define CSG_STATUS_EP_REQ_TILER_EP_SHIFT 16
+#define CSG_STATUS_EP_REQ_TILER_EP_MASK (0xF << CSG_STATUS_EP_REQ_TILER_EP_SHIFT)
+#define CSG_STATUS_EP_REQ_TILER_EP_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_REQ_TILER_EP_MASK) >> CSG_STATUS_EP_REQ_TILER_EP_SHIFT)
+#define CSG_STATUS_EP_REQ_TILER_EP_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_REQ_TILER_EP_MASK) | \
+ (((value) << CSG_STATUS_EP_REQ_TILER_EP_SHIFT) & CSG_STATUS_EP_REQ_TILER_EP_MASK))
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT 20
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_MASK (0x1 << CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT)
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_MASK) >> CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT)
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_MASK) | \
+ (((value) << CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_SHIFT) & CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_MASK))
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT 21
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_MASK (0x1 << CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT)
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_MASK) >> CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT)
+#define CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_MASK) | \
+ (((value) << CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_SHIFT) & CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_MASK))
+
+/* End of CSG_OUTPUT_BLOCK register set definitions */
+
+/* STREAM_CONTROL_BLOCK register set definitions */
+
+/* STREAM_FEATURES register */
+#define STREAM_FEATURES_WORK_REGISTERS_SHIFT 0
+#define STREAM_FEATURES_WORK_REGISTERS_MASK (0xFF << STREAM_FEATURES_WORK_REGISTERS_SHIFT)
+#define STREAM_FEATURES_WORK_REGISTERS_GET(reg_val) \
+ (((reg_val)&STREAM_FEATURES_WORK_REGISTERS_MASK) >> STREAM_FEATURES_WORK_REGISTERS_SHIFT)
+#define STREAM_FEATURES_WORK_REGISTERS_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_FEATURES_WORK_REGISTERS_MASK) | \
+ (((value) << STREAM_FEATURES_WORK_REGISTERS_SHIFT) & STREAM_FEATURES_WORK_REGISTERS_MASK))
+#define STREAM_FEATURES_SCOREBOARDS_SHIFT 8
+#define STREAM_FEATURES_SCOREBOARDS_MASK (0xFF << STREAM_FEATURES_SCOREBOARDS_SHIFT)
+#define STREAM_FEATURES_SCOREBOARDS_GET(reg_val) \
+ (((reg_val)&STREAM_FEATURES_SCOREBOARDS_MASK) >> STREAM_FEATURES_SCOREBOARDS_SHIFT)
+#define STREAM_FEATURES_SCOREBOARDS_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_FEATURES_SCOREBOARDS_MASK) | \
+ (((value) << STREAM_FEATURES_SCOREBOARDS_SHIFT) & STREAM_FEATURES_SCOREBOARDS_MASK))
+#define STREAM_FEATURES_COMPUTE_SHIFT 16
+#define STREAM_FEATURES_COMPUTE_MASK (0x1 << STREAM_FEATURES_COMPUTE_SHIFT)
+#define STREAM_FEATURES_COMPUTE_GET(reg_val) (((reg_val)&STREAM_FEATURES_COMPUTE_MASK) >> STREAM_FEATURES_COMPUTE_SHIFT)
+#define STREAM_FEATURES_COMPUTE_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_FEATURES_COMPUTE_MASK) | \
+ (((value) << STREAM_FEATURES_COMPUTE_SHIFT) & STREAM_FEATURES_COMPUTE_MASK))
+#define STREAM_FEATURES_FRAGMENT_SHIFT 17
+#define STREAM_FEATURES_FRAGMENT_MASK (0x1 << STREAM_FEATURES_FRAGMENT_SHIFT)
+#define STREAM_FEATURES_FRAGMENT_GET(reg_val) \
+ (((reg_val)&STREAM_FEATURES_FRAGMENT_MASK) >> STREAM_FEATURES_FRAGMENT_SHIFT)
+#define STREAM_FEATURES_FRAGMENT_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_FEATURES_FRAGMENT_MASK) | \
+ (((value) << STREAM_FEATURES_FRAGMENT_SHIFT) & STREAM_FEATURES_FRAGMENT_MASK))
+#define STREAM_FEATURES_TILER_SHIFT 18
+#define STREAM_FEATURES_TILER_MASK (0x1 << STREAM_FEATURES_TILER_SHIFT)
+#define STREAM_FEATURES_TILER_GET(reg_val) (((reg_val)&STREAM_FEATURES_TILER_MASK) >> STREAM_FEATURES_TILER_SHIFT)
+#define STREAM_FEATURES_TILER_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_FEATURES_TILER_MASK) | \
+ (((value) << STREAM_FEATURES_TILER_SHIFT) & STREAM_FEATURES_TILER_MASK))
+
+/* STREAM_INPUT_VA register */
+#define STREAM_INPUT_VA_VALUE_SHIFT 0
+#define STREAM_INPUT_VA_VALUE_MASK (0xFFFFFFFF << STREAM_INPUT_VA_VALUE_SHIFT)
+#define STREAM_INPUT_VA_VALUE_GET(reg_val) (((reg_val)&STREAM_INPUT_VA_VALUE_MASK) >> STREAM_INPUT_VA_VALUE_SHIFT)
+#define STREAM_INPUT_VA_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_INPUT_VA_VALUE_MASK) | \
+ (((value) << STREAM_INPUT_VA_VALUE_SHIFT) & STREAM_INPUT_VA_VALUE_MASK))
+
+/* STREAM_OUTPUT_VA register */
+#define STREAM_OUTPUT_VA_VALUE_SHIFT 0
+#define STREAM_OUTPUT_VA_VALUE_MASK (0xFFFFFFFF << STREAM_OUTPUT_VA_VALUE_SHIFT)
+#define STREAM_OUTPUT_VA_VALUE_GET(reg_val) (((reg_val)&STREAM_OUTPUT_VA_VALUE_MASK) >> STREAM_OUTPUT_VA_VALUE_SHIFT)
+#define STREAM_OUTPUT_VA_VALUE_SET(reg_val, value) \
+ (((reg_val) & ~STREAM_OUTPUT_VA_VALUE_MASK) | \
+ (((value) << STREAM_OUTPUT_VA_VALUE_SHIFT) & STREAM_OUTPUT_VA_VALUE_MASK))
+/* End of STREAM_CONTROL_BLOCK register set definitions */
+
+/* GLB_INPUT_BLOCK register set definitions */
+
+/* GLB_REQ register */
+#define GLB_REQ_HALT_SHIFT 0
+#define GLB_REQ_HALT_MASK (0x1 << GLB_REQ_HALT_SHIFT)
+#define GLB_REQ_HALT_GET(reg_val) (((reg_val)&GLB_REQ_HALT_MASK) >> GLB_REQ_HALT_SHIFT)
+#define GLB_REQ_HALT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_HALT_MASK) | (((value) << GLB_REQ_HALT_SHIFT) & GLB_REQ_HALT_MASK))
+#define GLB_REQ_CFG_PROGRESS_TIMER_SHIFT 1
+#define GLB_REQ_CFG_PROGRESS_TIMER_MASK (0x1 << GLB_REQ_CFG_PROGRESS_TIMER_SHIFT)
+#define GLB_REQ_CFG_PROGRESS_TIMER_GET(reg_val) \
+ (((reg_val)&GLB_REQ_CFG_PROGRESS_TIMER_MASK) >> GLB_REQ_CFG_PROGRESS_TIMER_SHIFT)
+#define GLB_REQ_CFG_PROGRESS_TIMER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_CFG_PROGRESS_TIMER_MASK) | \
+ (((value) << GLB_REQ_CFG_PROGRESS_TIMER_SHIFT) & GLB_REQ_CFG_PROGRESS_TIMER_MASK))
+#define GLB_REQ_CFG_ALLOC_EN_SHIFT 2
+#define GLB_REQ_CFG_ALLOC_EN_MASK (0x1 << GLB_REQ_CFG_ALLOC_EN_SHIFT)
+#define GLB_REQ_CFG_ALLOC_EN_GET(reg_val) (((reg_val)&GLB_REQ_CFG_ALLOC_EN_MASK) >> GLB_REQ_CFG_ALLOC_EN_SHIFT)
+#define GLB_REQ_CFG_ALLOC_EN_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_CFG_ALLOC_EN_MASK) | (((value) << GLB_REQ_CFG_ALLOC_EN_SHIFT) & GLB_REQ_CFG_ALLOC_EN_MASK))
+#define GLB_REQ_CFG_PWROFF_TIMER_SHIFT 3
+#define GLB_REQ_CFG_PWROFF_TIMER_MASK (0x1 << GLB_REQ_CFG_PWROFF_TIMER_SHIFT)
+#define GLB_REQ_CFG_PWROFF_TIMER_GET(reg_val) \
+ (((reg_val)&GLB_REQ_CFG_PWROFF_TIMER_MASK) >> GLB_REQ_CFG_PWROFF_TIMER_SHIFT)
+#define GLB_REQ_CFG_PWROFF_TIMER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_CFG_PWROFF_TIMER_MASK) | \
+ (((value) << GLB_REQ_CFG_PWROFF_TIMER_SHIFT) & GLB_REQ_CFG_PWROFF_TIMER_MASK))
+#define GLB_REQ_PROTM_ENTER_SHIFT 4
+#define GLB_REQ_PROTM_ENTER_MASK (0x1 << GLB_REQ_PROTM_ENTER_SHIFT)
+#define GLB_REQ_PROTM_ENTER_GET(reg_val) (((reg_val)&GLB_REQ_PROTM_ENTER_MASK) >> GLB_REQ_PROTM_ENTER_SHIFT)
+#define GLB_REQ_PROTM_ENTER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PROTM_ENTER_MASK) | (((value) << GLB_REQ_PROTM_ENTER_SHIFT) & GLB_REQ_PROTM_ENTER_MASK))
+#define GLB_REQ_PRFCNT_ENABLE_SHIFT 5
+#define GLB_REQ_PRFCNT_ENABLE_MASK (0x1 << GLB_REQ_PRFCNT_ENABLE_SHIFT)
+#define GLB_REQ_PRFCNT_ENABLE_GET(reg_val) (((reg_val)&GLB_REQ_PRFCNT_ENABLE_MASK) >> GLB_REQ_PRFCNT_ENABLE_SHIFT)
+#define GLB_REQ_PRFCNT_ENABLE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PRFCNT_ENABLE_MASK) | \
+ (((value) << GLB_REQ_PRFCNT_ENABLE_SHIFT) & GLB_REQ_PRFCNT_ENABLE_MASK))
+#define GLB_REQ_PRFCNT_SAMPLE_SHIFT 6
+#define GLB_REQ_PRFCNT_SAMPLE_MASK (0x1 << GLB_REQ_PRFCNT_SAMPLE_SHIFT)
+#define GLB_REQ_PRFCNT_SAMPLE_GET(reg_val) (((reg_val)&GLB_REQ_PRFCNT_SAMPLE_MASK) >> GLB_REQ_PRFCNT_SAMPLE_SHIFT)
+#define GLB_REQ_PRFCNT_SAMPLE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PRFCNT_SAMPLE_MASK) | \
+ (((value) << GLB_REQ_PRFCNT_SAMPLE_SHIFT) & GLB_REQ_PRFCNT_SAMPLE_MASK))
+#define GLB_REQ_COUNTER_ENABLE_SHIFT 7
+#define GLB_REQ_COUNTER_ENABLE_MASK (0x1 << GLB_REQ_COUNTER_ENABLE_SHIFT)
+#define GLB_REQ_COUNTER_ENABLE_GET(reg_val) (((reg_val)&GLB_REQ_COUNTER_ENABLE_MASK) >> GLB_REQ_COUNTER_ENABLE_SHIFT)
+#define GLB_REQ_COUNTER_ENABLE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_COUNTER_ENABLE_MASK) | \
+ (((value) << GLB_REQ_COUNTER_ENABLE_SHIFT) & GLB_REQ_COUNTER_ENABLE_MASK))
+#define GLB_REQ_PING_SHIFT 8
+#define GLB_REQ_PING_MASK (0x1 << GLB_REQ_PING_SHIFT)
+#define GLB_REQ_PING_GET(reg_val) (((reg_val)&GLB_REQ_PING_MASK) >> GLB_REQ_PING_SHIFT)
+#define GLB_REQ_PING_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PING_MASK) | (((value) << GLB_REQ_PING_SHIFT) & GLB_REQ_PING_MASK))
+#define GLB_REQ_FIRMWARE_CONFIG_UPDATE_SHIFT 9
+#define GLB_REQ_FIRMWARE_CONFIG_UPDATE_MASK \
+ (0x1 << GLB_REQ_FIRMWARE_CONFIG_UPDATE_SHIFT)
+#define GLB_REQ_FIRMWARE_CONFIG_UPDATE_GET(reg_val) \
+ (((reg_val)&GLB_REQ_FIRMWARE_CONFIG_UPDATE_MASK) >> \
+ GLB_REQ_FIRMWARE_CONFIG_UPDATE_SHIFT)
+#define GLB_REQ_FIRMWARE_CONFIG_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_FIRMWARE_CONFIG_UPDATE_MASK) | \
+ (((value) << GLB_REQ_FIRMWARE_CONFIG_UPDATE_SHIFT) & \
+ GLB_REQ_FIRMWARE_CONFIG_UPDATE_MASK))
+#define GLB_REQ_INACTIVE_COMPUTE_SHIFT 20
+#define GLB_REQ_INACTIVE_COMPUTE_MASK (0x1 << GLB_REQ_INACTIVE_COMPUTE_SHIFT)
+#define GLB_REQ_INACTIVE_COMPUTE_GET(reg_val) \
+ (((reg_val)&GLB_REQ_INACTIVE_COMPUTE_MASK) >> GLB_REQ_INACTIVE_COMPUTE_SHIFT)
+#define GLB_REQ_INACTIVE_COMPUTE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_INACTIVE_COMPUTE_MASK) | \
+ (((value) << GLB_REQ_INACTIVE_COMPUTE_SHIFT) & GLB_REQ_INACTIVE_COMPUTE_MASK))
+#define GLB_REQ_INACTIVE_FRAGMENT_SHIFT 21
+#define GLB_REQ_INACTIVE_FRAGMENT_MASK (0x1 << GLB_REQ_INACTIVE_FRAGMENT_SHIFT)
+#define GLB_REQ_INACTIVE_FRAGMENT_GET(reg_val) \
+ (((reg_val)&GLB_REQ_INACTIVE_FRAGMENT_MASK) >> GLB_REQ_INACTIVE_FRAGMENT_SHIFT)
+#define GLB_REQ_INACTIVE_FRAGMENT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_INACTIVE_FRAGMENT_MASK) | \
+ (((value) << GLB_REQ_INACTIVE_FRAGMENT_SHIFT) & GLB_REQ_INACTIVE_FRAGMENT_MASK))
+#define GLB_REQ_INACTIVE_TILER_SHIFT 22
+#define GLB_REQ_INACTIVE_TILER_MASK (0x1 << GLB_REQ_INACTIVE_TILER_SHIFT)
+#define GLB_REQ_INACTIVE_TILER_GET(reg_val) (((reg_val)&GLB_REQ_INACTIVE_TILER_MASK) >> GLB_REQ_INACTIVE_TILER_SHIFT)
+#define GLB_REQ_INACTIVE_TILER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_INACTIVE_TILER_MASK) | \
+ (((value) << GLB_REQ_INACTIVE_TILER_SHIFT) & GLB_REQ_INACTIVE_TILER_MASK))
+#define GLB_REQ_PROTM_EXIT_SHIFT 23
+#define GLB_REQ_PROTM_EXIT_MASK (0x1 << GLB_REQ_PROTM_EXIT_SHIFT)
+#define GLB_REQ_PROTM_EXIT_GET(reg_val) (((reg_val)&GLB_REQ_PROTM_EXIT_MASK) >> GLB_REQ_PROTM_EXIT_SHIFT)
+#define GLB_REQ_PROTM_EXIT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PROTM_EXIT_MASK) | (((value) << GLB_REQ_PROTM_EXIT_SHIFT) & GLB_REQ_PROTM_EXIT_MASK))
+#define GLB_REQ_PRFCNT_THRESHOLD_SHIFT 24
+#define GLB_REQ_PRFCNT_THRESHOLD_MASK (0x1 << GLB_REQ_PRFCNT_THRESHOLD_SHIFT)
+#define GLB_REQ_PRFCNT_THRESHOLD_GET(reg_val) \
+ (((reg_val)&GLB_REQ_PRFCNT_THRESHOLD_MASK) >> \
+ GLB_REQ_PRFCNT_THRESHOLD_SHIFT)
+#define GLB_REQ_PRFCNT_THRESHOLD_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PRFCNT_THRESHOLD_MASK) | \
+ (((value) << GLB_REQ_PRFCNT_THRESHOLD_SHIFT) & \
+ GLB_REQ_PRFCNT_THRESHOLD_MASK))
+#define GLB_REQ_PRFCNT_OVERFLOW_SHIFT 25
+#define GLB_REQ_PRFCNT_OVERFLOW_MASK (0x1 << GLB_REQ_PRFCNT_OVERFLOW_SHIFT)
+#define GLB_REQ_PRFCNT_OVERFLOW_GET(reg_val) \
+ (((reg_val)&GLB_REQ_PRFCNT_OVERFLOW_MASK) >> \
+ GLB_REQ_PRFCNT_OVERFLOW_SHIFT)
+#define GLB_REQ_PRFCNT_OVERFLOW_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_PRFCNT_OVERFLOW_MASK) | \
+ (((value) << GLB_REQ_PRFCNT_OVERFLOW_SHIFT) & \
+ GLB_REQ_PRFCNT_OVERFLOW_MASK))
+#define GLB_REQ_DEBUG_CSF_REQ_SHIFT 30
+#define GLB_REQ_DEBUG_CSF_REQ_MASK (0x1 << GLB_REQ_DEBUG_CSF_REQ_SHIFT)
+#define GLB_REQ_DEBUG_CSF_REQ_GET(reg_val) (((reg_val)&GLB_REQ_DEBUG_CSF_REQ_MASK) >> GLB_REQ_DEBUG_CSF_REQ_SHIFT)
+#define GLB_REQ_DEBUG_CSF_REQ_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_DEBUG_CSF_REQ_MASK) | \
+ (((value) << GLB_REQ_DEBUG_CSF_REQ_SHIFT) & GLB_REQ_DEBUG_CSF_REQ_MASK))
+#define GLB_REQ_DEBUG_HOST_REQ_SHIFT 31
+#define GLB_REQ_DEBUG_HOST_REQ_MASK (0x1 << GLB_REQ_DEBUG_HOST_REQ_SHIFT)
+#define GLB_REQ_DEBUG_HOST_REQ_GET(reg_val) (((reg_val)&GLB_REQ_DEBUG_HOST_REQ_MASK) >> GLB_REQ_DEBUG_HOST_REQ_SHIFT)
+#define GLB_REQ_DEBUG_HOST_REQ_SET(reg_val, value) \
+ (((reg_val) & ~GLB_REQ_DEBUG_HOST_REQ_MASK) | \
+ (((value) << GLB_REQ_DEBUG_HOST_REQ_SHIFT) & GLB_REQ_DEBUG_HOST_REQ_MASK))
+
+/* GLB_ACK_IRQ_MASK register */
+#define GLB_ACK_IRQ_MASK_HALT_SHIFT 0
+#define GLB_ACK_IRQ_MASK_HALT_MASK (0x1 << GLB_ACK_IRQ_MASK_HALT_SHIFT)
+#define GLB_ACK_IRQ_MASK_HALT_GET(reg_val) (((reg_val)&GLB_ACK_IRQ_MASK_HALT_MASK) >> GLB_ACK_IRQ_MASK_HALT_SHIFT)
+#define GLB_ACK_IRQ_MASK_HALT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_HALT_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_HALT_SHIFT) & GLB_ACK_IRQ_MASK_HALT_MASK))
+#define GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_SHIFT 1
+#define GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_MASK (0x1 << GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_SHIFT)
+#define GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_MASK) >> GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_SHIFT)
+#define GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_SHIFT) & GLB_ACK_IRQ_MASK_CFG_PROGRESS_TIMER_MASK))
+#define GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_SHIFT 2
+#define GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_MASK (0x1 << GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_SHIFT)
+#define GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_MASK) >> GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_SHIFT)
+#define GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_SHIFT) & GLB_ACK_IRQ_MASK_CFG_ALLOC_EN_MASK))
+#define GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_SHIFT 3
+#define GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_MASK (0x1 << GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_SHIFT)
+#define GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_MASK) >> GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_SHIFT)
+#define GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_SHIFT) & GLB_ACK_IRQ_MASK_CFG_PWROFF_TIMER_MASK))
+#define GLB_ACK_IRQ_MASK_PROTM_ENTER_SHIFT 4
+#define GLB_ACK_IRQ_MASK_PROTM_ENTER_MASK (0x1 << GLB_ACK_IRQ_MASK_PROTM_ENTER_SHIFT)
+#define GLB_ACK_IRQ_MASK_PROTM_ENTER_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_PROTM_ENTER_MASK) >> GLB_ACK_IRQ_MASK_PROTM_ENTER_SHIFT)
+#define GLB_ACK_IRQ_MASK_PROTM_ENTER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PROTM_ENTER_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PROTM_ENTER_SHIFT) & GLB_ACK_IRQ_MASK_PROTM_ENTER_MASK))
+#define GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_SHIFT 5
+#define GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_MASK (0x1 << GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_MASK) >> GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_SHIFT) & GLB_ACK_IRQ_MASK_PRFCNT_ENABLE_MASK))
+#define GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_SHIFT 6
+#define GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_MASK (0x1 << GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_MASK) >> GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_SHIFT) & GLB_ACK_IRQ_MASK_PRFCNT_SAMPLE_MASK))
+#define GLB_ACK_IRQ_MASK_COUNTER_ENABLE_SHIFT 7
+#define GLB_ACK_IRQ_MASK_COUNTER_ENABLE_MASK (0x1 << GLB_ACK_IRQ_MASK_COUNTER_ENABLE_SHIFT)
+#define GLB_ACK_IRQ_MASK_COUNTER_ENABLE_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_COUNTER_ENABLE_MASK) >> GLB_ACK_IRQ_MASK_COUNTER_ENABLE_SHIFT)
+#define GLB_ACK_IRQ_MASK_COUNTER_ENABLE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_COUNTER_ENABLE_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_COUNTER_ENABLE_SHIFT) & GLB_ACK_IRQ_MASK_COUNTER_ENABLE_MASK))
+#define GLB_ACK_IRQ_MASK_PING_SHIFT 8
+#define GLB_ACK_IRQ_MASK_PING_MASK (0x1 << GLB_ACK_IRQ_MASK_PING_SHIFT)
+#define GLB_ACK_IRQ_MASK_PING_GET(reg_val) (((reg_val)&GLB_ACK_IRQ_MASK_PING_MASK) >> GLB_ACK_IRQ_MASK_PING_SHIFT)
+#define GLB_ACK_IRQ_MASK_PING_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PING_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PING_SHIFT) & GLB_ACK_IRQ_MASK_PING_MASK))
+#define GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_SHIFT 9
+#define GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_MASK \
+ (0x1 << GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_SHIFT)
+#define GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_MASK) >> \
+ GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_SHIFT)
+#define GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_SHIFT) & \
+ GLB_ACK_IRQ_MASK_FIRMWARE_CONFIG_UPDATE_MASK))
+#define GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_SHIFT 20
+#define GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_MASK (0x1 << GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_SHIFT)
+#define GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_MASK) >> GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_SHIFT)
+#define GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_SHIFT) & GLB_ACK_IRQ_MASK_INACTIVE_COMPUTE_MASK))
+#define GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_SHIFT 21
+#define GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_MASK (0x1 << GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_SHIFT)
+#define GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_MASK) >> GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_SHIFT)
+#define GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_SHIFT) & GLB_ACK_IRQ_MASK_INACTIVE_FRAGMENT_MASK))
+#define GLB_ACK_IRQ_MASK_INACTIVE_TILER_SHIFT 22
+#define GLB_ACK_IRQ_MASK_INACTIVE_TILER_MASK (0x1 << GLB_ACK_IRQ_MASK_INACTIVE_TILER_SHIFT)
+#define GLB_ACK_IRQ_MASK_INACTIVE_TILER_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_INACTIVE_TILER_MASK) >> GLB_ACK_IRQ_MASK_INACTIVE_TILER_SHIFT)
+#define GLB_ACK_IRQ_MASK_INACTIVE_TILER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_INACTIVE_TILER_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_INACTIVE_TILER_SHIFT) & GLB_ACK_IRQ_MASK_INACTIVE_TILER_MASK))
+#define GLB_ACK_IRQ_MASK_PROTM_EXIT_SHIFT 23
+#define GLB_ACK_IRQ_MASK_PROTM_EXIT_MASK (0x1 << GLB_ACK_IRQ_MASK_PROTM_EXIT_SHIFT)
+#define GLB_ACK_IRQ_MASK_PROTM_EXIT_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_PROTM_EXIT_MASK) >> GLB_ACK_IRQ_MASK_PROTM_EXIT_SHIFT)
+#define GLB_ACK_IRQ_MASK_PROTM_EXIT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PROTM_EXIT_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PROTM_EXIT_SHIFT) & GLB_ACK_IRQ_MASK_PROTM_EXIT_MASK))
+#define GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_SHIFT 24
+#define GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_MASK \
+ (0x1 << GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_MASK) >> \
+ GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_SHIFT) & \
+ GLB_ACK_IRQ_MASK_PRFCNT_THRESHOLD_MASK))
+#define GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_SHIFT 25
+#define GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_MASK \
+ (0x1 << GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_MASK) >> \
+ GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_SHIFT)
+#define GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_SHIFT) & \
+ GLB_ACK_IRQ_MASK_PRFCNT_OVERFLOW_MASK))
+#define GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_SHIFT 30
+#define GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_MASK (0x1 << GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_SHIFT)
+#define GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_MASK) >> GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_SHIFT)
+#define GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_SHIFT) & GLB_ACK_IRQ_MASK_DEBUG_CSF_REQ_MASK))
+#define GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_SHIFT 31
+#define GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_MASK (0x1 << GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_SHIFT)
+#define GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_GET(reg_val) \
+ (((reg_val)&GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_MASK) >> GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_SHIFT)
+#define GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_MASK) | \
+ (((value) << GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_SHIFT) & GLB_ACK_IRQ_MASK_DEBUG_HOST_REQ_MASK))
+
+/* GLB_PROGRESS_TIMER register */
+#define GLB_PROGRESS_TIMER_TIMEOUT_SHIFT 0
+#define GLB_PROGRESS_TIMER_TIMEOUT_MASK (0xFFFFFFFF << GLB_PROGRESS_TIMER_TIMEOUT_SHIFT)
+#define GLB_PROGRESS_TIMER_TIMEOUT_GET(reg_val) \
+ (((reg_val)&GLB_PROGRESS_TIMER_TIMEOUT_MASK) >> GLB_PROGRESS_TIMER_TIMEOUT_SHIFT)
+#define GLB_PROGRESS_TIMER_TIMEOUT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_PROGRESS_TIMER_TIMEOUT_MASK) | \
+ (((value) << GLB_PROGRESS_TIMER_TIMEOUT_SHIFT) & GLB_PROGRESS_TIMER_TIMEOUT_MASK))
+
+/* GLB_PWROFF_TIMER register */
+#define GLB_PWROFF_TIMER_TIMEOUT_SHIFT 0
+#define GLB_PWROFF_TIMER_TIMEOUT_MASK (0x7FFFFFFF << GLB_PWROFF_TIMER_TIMEOUT_SHIFT)
+#define GLB_PWROFF_TIMER_TIMEOUT_GET(reg_val) \
+ (((reg_val)&GLB_PWROFF_TIMER_TIMEOUT_MASK) >> GLB_PWROFF_TIMER_TIMEOUT_SHIFT)
+#define GLB_PWROFF_TIMER_TIMEOUT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_PWROFF_TIMER_TIMEOUT_MASK) | \
+ (((value) << GLB_PWROFF_TIMER_TIMEOUT_SHIFT) & GLB_PWROFF_TIMER_TIMEOUT_MASK))
+#define GLB_PWROFF_TIMER_TIMER_SOURCE_SHIFT 31
+#define GLB_PWROFF_TIMER_TIMER_SOURCE_MASK (0x1 << GLB_PWROFF_TIMER_TIMER_SOURCE_SHIFT)
+#define GLB_PWROFF_TIMER_TIMER_SOURCE_GET(reg_val) \
+ (((reg_val)&GLB_PWROFF_TIMER_TIMER_SOURCE_MASK) >> GLB_PWROFF_TIMER_TIMER_SOURCE_SHIFT)
+#define GLB_PWROFF_TIMER_TIMER_SOURCE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_PWROFF_TIMER_TIMER_SOURCE_MASK) | \
+ (((value) << GLB_PWROFF_TIMER_TIMER_SOURCE_SHIFT) & GLB_PWROFF_TIMER_TIMER_SOURCE_MASK))
+/* GLB_PWROFF_TIMER_TIMER_SOURCE values */
+#define GLB_PWROFF_TIMER_TIMER_SOURCE_SYSTEM_TIMESTAMP 0x0
+#define GLB_PWROFF_TIMER_TIMER_SOURCE_GPU_COUNTER 0x1
+/* End of GLB_PWROFF_TIMER_TIMER_SOURCE values */
+
+/* GLB_ALLOC_EN register */
+#define GLB_ALLOC_EN_MASK_SHIFT 0
+#define GLB_ALLOC_EN_MASK_MASK (0xFFFFFFFFFFFFFFFF << GLB_ALLOC_EN_MASK_SHIFT)
+#define GLB_ALLOC_EN_MASK_GET(reg_val) (((reg_val)&GLB_ALLOC_EN_MASK_MASK) >> GLB_ALLOC_EN_MASK_SHIFT)
+#define GLB_ALLOC_EN_MASK_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ALLOC_EN_MASK_MASK) | (((value) << GLB_ALLOC_EN_MASK_SHIFT) & GLB_ALLOC_EN_MASK_MASK))
+
+/* GLB_PROTM_COHERENCY register */
+#define GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_SHIFT 0
+#define GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_MASK \
+ (0xFFFFFFFF << GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_SHIFT)
+#define GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_GET(reg_val) \
+ (((reg_val)&GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_MASK) >> \
+ GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_SHIFT)
+#define GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_MASK) | \
+ (((value) << GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_SHIFT) & \
+ GLB_PROTM_COHERENCY_L2_CACHE_PROTOCOL_SELECT_MASK))
+/* End of GLB_INPUT_BLOCK register set definitions */
+
+/* GLB_OUTPUT_BLOCK register set definitions */
+
+/* GLB_ACK register */
+#define GLB_ACK_CFG_PROGRESS_TIMER_SHIFT 1
+#define GLB_ACK_CFG_PROGRESS_TIMER_MASK (0x1 << GLB_ACK_CFG_PROGRESS_TIMER_SHIFT)
+#define GLB_ACK_CFG_PROGRESS_TIMER_GET(reg_val) \
+ (((reg_val)&GLB_ACK_CFG_PROGRESS_TIMER_MASK) >> GLB_ACK_CFG_PROGRESS_TIMER_SHIFT)
+#define GLB_ACK_CFG_PROGRESS_TIMER_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_CFG_PROGRESS_TIMER_MASK) | \
+ (((value) << GLB_ACK_CFG_PROGRESS_TIMER_SHIFT) & GLB_ACK_CFG_PROGRESS_TIMER_MASK))
+#define GLB_ACK_CFG_ALLOC_EN_SHIFT 2
+#define GLB_ACK_CFG_ALLOC_EN_MASK (0x1 << GLB_ACK_CFG_ALLOC_EN_SHIFT)
+#define GLB_ACK_CFG_ALLOC_EN_GET(reg_val) (((reg_val)&GLB_ACK_CFG_ALLOC_EN_MASK) >> GLB_ACK_CFG_ALLOC_EN_SHIFT)
+#define GLB_ACK_CFG_ALLOC_EN_SET(reg_val, value) \
+ (((reg_val) & ~GLB_ACK_CFG_ALLOC_EN_MASK) | (((value) << GLB_ACK_CFG_ALLOC_EN_SHIFT) & GLB_ACK_CFG_ALLOC_EN_MASK))
+/* End of GLB_OUTPUT_BLOCK register set definitions */
+
+/* The following register and fields are for headers before 10.x.7/11.x.4 */
+#define GLB_REQ_IDLE_ENABLE_SHIFT (10)
+#define GLB_REQ_REQ_IDLE_ENABLE (1 << GLB_REQ_IDLE_ENABLE_SHIFT)
+#define GLB_REQ_REQ_IDLE_DISABLE (0 << GLB_REQ_IDLE_ENABLE_SHIFT)
+#define GLB_REQ_IDLE_ENABLE_MASK (0x1 << GLB_REQ_IDLE_ENABLE_SHIFT)
+#define GLB_REQ_IDLE_DISABLE_MASK (0x1 << GLB_REQ_IDLE_ENABLE_SHIFT)
+#define GLB_REQ_IDLE_EVENT_SHIFT (26)
+#define GLB_REQ_IDLE_EVENT_MASK (0x1 << GLB_REQ_IDLE_EVENT_SHIFT)
+#define GLB_ACK_IDLE_ENABLE_SHIFT (10)
+#define GLB_ACK_ACK_IDLE_ENABLE (1 << GLB_ACK_IDLE_ENABLE_SHIFT)
+#define GLB_ACK_ACK_IDLE_DISABLE (0 << GLB_ACK_IDLE_ENABLE_SHIFT)
+#define GLB_ACK_IDLE_ENABLE_MASK (0x1 << GLB_ACK_IDLE_ENABLE_SHIFT)
+#define GLB_ACK_IDLE_EVENT_SHIFT (26)
+#define GLB_ACK_IDLE_EVENT_MASK (0x1 << GLB_REQ_IDLE_EVENT_SHIFT)
+
+#define GLB_ACK_IRQ_MASK_IDLE_EVENT_SHIFT (26)
+#define GLB_ACK_IRQ_MASK_IDLE_EVENT_MASK (0x1 << GLB_ACK_IRQ_MASK_IDLE_EVENT_SHIFT)
+
+#define GLB_IDLE_TIMER (0x0080)
+/* GLB_IDLE_TIMER register */
+#define GLB_IDLE_TIMER_TIMEOUT_SHIFT (0)
+#define GLB_IDLE_TIMER_TIMEOUT_MASK ((0x7FFFFFFF) << GLB_IDLE_TIMER_TIMEOUT_SHIFT)
+#define GLB_IDLE_TIMER_TIMEOUT_GET(reg_val) (((reg_val)&GLB_IDLE_TIMER_TIMEOUT_MASK) >> GLB_IDLE_TIMER_TIMEOUT_SHIFT)
+#define GLB_IDLE_TIMER_TIMEOUT_SET(reg_val, value) \
+ (((reg_val) & ~GLB_IDLE_TIMER_TIMEOUT_MASK) | \
+ (((value) << GLB_IDLE_TIMER_TIMEOUT_SHIFT) & GLB_IDLE_TIMER_TIMEOUT_MASK))
+#define GLB_IDLE_TIMER_TIMER_SOURCE_SHIFT (31)
+#define GLB_IDLE_TIMER_TIMER_SOURCE_MASK ((0x1) << GLB_IDLE_TIMER_TIMER_SOURCE_SHIFT)
+#define GLB_IDLE_TIMER_TIMER_SOURCE_GET(reg_val) \
+ (((reg_val)&GLB_IDLE_TIMER_TIMER_SOURCE_MASK) >> GLB_IDLE_TIMER_TIMER_SOURCE_SHIFT)
+#define GLB_IDLE_TIMER_TIMER_SOURCE_SET(reg_val, value) \
+ (((reg_val) & ~GLB_IDLE_TIMER_TIMER_SOURCE_MASK) | \
+ (((value) << GLB_IDLE_TIMER_TIMER_SOURCE_SHIFT) & GLB_IDLE_TIMER_TIMER_SOURCE_MASK))
+/* GLB_IDLE_TIMER_TIMER_SOURCE values */
+#define GLB_IDLE_TIMER_TIMER_SOURCE_SYSTEM_TIMESTAMP 0x0
+#define GLB_IDLE_TIMER_TIMER_SOURCE_GPU_COUNTER 0x1
+/* End of GLB_IDLE_TIMER_TIMER_SOURCE values */
+
+#define CSG_STATUS_STATE (0x0018) /* CSG state status register */
+/* CSG_STATUS_STATE register */
+#define CSG_STATUS_STATE_IDLE_SHIFT (0)
+#define CSG_STATUS_STATE_IDLE_MASK ((0x1) << CSG_STATUS_STATE_IDLE_SHIFT)
+#define CSG_STATUS_STATE_IDLE_GET(reg_val) \
+ (((reg_val)&CSG_STATUS_STATE_IDLE_MASK) >> CSG_STATUS_STATE_IDLE_SHIFT)
+#define CSG_STATUS_STATE_IDLE_SET(reg_val, value) \
+ (((reg_val) & ~CSG_STATUS_STATE_IDLE_MASK) | \
+ (((value) << CSG_STATUS_STATE_IDLE_SHIFT) & CSG_STATUS_STATE_IDLE_MASK))
+
+#endif /* _UAPI_GPU_CSF_REGISTERS_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h b/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
new file mode 100644
index 0000000..237cc2e
--- /dev/null
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_KBASE_CSF_IOCTL_H_
+#define _UAPI_KBASE_CSF_IOCTL_H_
+
+#include <asm-generic/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * 1.0:
+ * - CSF IOCTL header separated from JM
+ * 1.1:
+ * - Add a new priority level BASE_QUEUE_GROUP_PRIORITY_REALTIME
+ * - Add ioctl 54: This controls the priority setting.
+ * 1.2:
+ * - Add new CSF GPU_FEATURES register into the property structure
+ * returned by KBASE_IOCTL_GET_GPUPROPS
+ * 1.3:
+ * - Add __u32 group_uid member to
+ * &struct_kbase_ioctl_cs_queue_group_create.out
+ * 1.4:
+ * - Replace padding in kbase_ioctl_cs_get_glb_iface with
+ * instr_features member of same size
+ */
+
+#define BASE_UK_VERSION_MAJOR 1
+#define BASE_UK_VERSION_MINOR 4
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility between
+ * kernel and userspace
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+ __u16 major;
+ __u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK_RESERVED \
+ _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+
+/**
+ * struct kbase_ioctl_cs_queue_register - Register a GPU command queue with the
+ * base back-end
+ *
+ * @buffer_gpu_addr: GPU address of the buffer backing the queue
+ * @buffer_size: Size of the buffer in bytes
+ * @priority: Priority of the queue within a group when run within a process
+ * @padding: Currently unused, must be zero
+ */
+struct kbase_ioctl_cs_queue_register {
+ __u64 buffer_gpu_addr;
+ __u32 buffer_size;
+ __u8 priority;
+ __u8 padding[3];
+};
+
+#define KBASE_IOCTL_CS_QUEUE_REGISTER \
+ _IOW(KBASE_IOCTL_TYPE, 36, struct kbase_ioctl_cs_queue_register)
+
+/**
+ * struct kbase_ioctl_cs_queue_kick - Kick the GPU command queue group scheduler
+ * to notify that a queue has been updated
+ *
+ * @buffer_gpu_addr: GPU address of the buffer backing the queue
+ */
+struct kbase_ioctl_cs_queue_kick {
+ __u64 buffer_gpu_addr;
+};
+
+#define KBASE_IOCTL_CS_QUEUE_KICK \
+ _IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick)
+
+/**
+ * union kbase_ioctl_cs_queue_bind - Bind a GPU command queue to a group
+ *
+ * @in: Input parameters
+ * @in.buffer_gpu_addr: GPU address of the buffer backing the queue
+ * @in.group_handle: Handle of the group to which the queue should be bound
+ * @in.csi_index: Index of the CSF interface the queue should be bound to
+ * @in.padding: Currently unused, must be zero
+ * @out: Output parameters
+ * @out.mmap_handle: Handle to be used for creating the mapping of CS
+ * input/output pages
+ */
+union kbase_ioctl_cs_queue_bind {
+ struct {
+ __u64 buffer_gpu_addr;
+ __u8 group_handle;
+ __u8 csi_index;
+ __u8 padding[6];
+ } in;
+ struct {
+ __u64 mmap_handle;
+ } out;
+};
+
+#define KBASE_IOCTL_CS_QUEUE_BIND \
+ _IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind)
+
+/* ioctl 40 is free to use */
+
+/**
+ * struct kbase_ioctl_cs_queue_terminate - Terminate a GPU command queue
+ *
+ * @buffer_gpu_addr: GPU address of the buffer backing the queue
+ */
+struct kbase_ioctl_cs_queue_terminate {
+ __u64 buffer_gpu_addr;
+};
+
+#define KBASE_IOCTL_CS_QUEUE_TERMINATE \
+ _IOW(KBASE_IOCTL_TYPE, 41, struct kbase_ioctl_cs_queue_terminate)
+
+/**
+ * union kbase_ioctl_cs_queue_group_create - Create a GPU command queue group
+ * @in: Input parameters
+ * @in.tiler_mask: Mask of tiler endpoints the group is allowed to use.
+ * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use.
+ * @in.compute_mask: Mask of compute endpoints the group is allowed to use.
+ * @in.cs_min: Minimum number of CSs required.
+ * @in.priority: Queue group's priority within a process.
+ * @in.tiler_max: Maximum number of tiler endpoints the group is allowed
+ * to use.
+ * @in.fragment_max: Maximum number of fragment endpoints the group is
+ * allowed to use.
+ * @in.compute_max: Maximum number of compute endpoints the group is allowed
+ * to use.
+ * @in.padding: Currently unused, must be zero
+ * @out: Output parameters
+ * @out.group_handle: Handle of a newly created queue group.
+ * @out.padding: Currently unused, must be zero
+ * @out.group_uid: UID of the queue group available to base.
+ */
+union kbase_ioctl_cs_queue_group_create {
+ struct {
+ __u64 tiler_mask;
+ __u64 fragment_mask;
+ __u64 compute_mask;
+ __u8 cs_min;
+ __u8 priority;
+ __u8 tiler_max;
+ __u8 fragment_max;
+ __u8 compute_max;
+ __u8 padding[3];
+
+ } in;
+ struct {
+ __u8 group_handle;
+ __u8 padding[3];
+ __u32 group_uid;
+ } out;
+};
+
+#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE \
+ _IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create)
+
+/**
+ * struct kbase_ioctl_cs_queue_group_term - Terminate a GPU command queue group
+ *
+ * @group_handle: Handle of the queue group to be terminated
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_cs_queue_group_term {
+ __u8 group_handle;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE \
+ _IOW(KBASE_IOCTL_TYPE, 43, struct kbase_ioctl_cs_queue_group_term)
+
+#define KBASE_IOCTL_CS_EVENT_SIGNAL \
+ _IO(KBASE_IOCTL_TYPE, 44)
+
+typedef __u8 base_kcpu_queue_id; /* We support up to 256 active KCPU queues */
+
+/**
+ * struct kbase_ioctl_kcpu_queue_new - Create a KCPU command queue
+ *
+ * @id: ID of the new command queue returned by the kernel
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_kcpu_queue_new {
+ base_kcpu_queue_id id;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_KCPU_QUEUE_CREATE \
+ _IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new)
+
+/**
+ * struct kbase_ioctl_kcpu_queue_delete - Destroy a KCPU command queue
+ *
+ * @id: ID of the command queue to be destroyed
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_kcpu_queue_delete {
+ base_kcpu_queue_id id;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_KCPU_QUEUE_DELETE \
+ _IOW(KBASE_IOCTL_TYPE, 46, struct kbase_ioctl_kcpu_queue_delete)
+
+/**
+ * struct kbase_ioctl_kcpu_queue_enqueue - Enqueue commands into the KCPU queue
+ *
+ * @addr: Memory address of an array of struct base_kcpu_queue_command
+ * @nr_commands: Number of commands in the array
+ * @id: kcpu queue identifier, returned by KBASE_IOCTL_KCPU_QUEUE_CREATE ioctl
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_kcpu_queue_enqueue {
+ __u64 addr;
+ __u32 nr_commands;
+ base_kcpu_queue_id id;
+ __u8 padding[3];
+};
+
+#define KBASE_IOCTL_KCPU_QUEUE_ENQUEUE \
+ _IOW(KBASE_IOCTL_TYPE, 47, struct kbase_ioctl_kcpu_queue_enqueue)
+
+/**
+ * union kbase_ioctl_cs_tiler_heap_init - Initialize chunked tiler memory heap
+ * @in: Input parameters
+ * @in.chunk_size: Size of each chunk.
+ * @in.initial_chunks: Initial number of chunks that heap will be created with.
+ * @in.max_chunks: Maximum number of chunks that the heap is allowed to use.
+ * @in.target_in_flight: Number of render-passes that the driver should attempt to
+ * keep in flight for which allocation of new chunks is
+ * allowed.
+ * @in.group_id: Group ID to be used for physical allocations.
+ * @in.padding: Padding
+ * @out: Output parameters
+ * @out.gpu_heap_va: GPU VA (virtual address) of Heap context that was set up
+ * for the heap.
+ * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap,
+ * actually points to the header of heap chunk and not to
+ * the low address of free memory in the chunk.
+ */
+union kbase_ioctl_cs_tiler_heap_init {
+ struct {
+ __u32 chunk_size;
+ __u32 initial_chunks;
+ __u32 max_chunks;
+ __u16 target_in_flight;
+ __u8 group_id;
+ __u8 padding;
+ } in;
+ struct {
+ __u64 gpu_heap_va;
+ __u64 first_chunk_va;
+ } out;
+};
+
+#define KBASE_IOCTL_CS_TILER_HEAP_INIT \
+ _IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init)
+
+/**
+ * struct kbase_ioctl_cs_tiler_heap_term - Terminate a chunked tiler heap
+ * instance
+ *
+ * @gpu_heap_va: GPU VA of Heap context that was set up for the heap.
+ */
+struct kbase_ioctl_cs_tiler_heap_term {
+ __u64 gpu_heap_va;
+};
+
+#define KBASE_IOCTL_CS_TILER_HEAP_TERM \
+ _IOW(KBASE_IOCTL_TYPE, 49, struct kbase_ioctl_cs_tiler_heap_term)
+
+/**
+ * union kbase_ioctl_cs_get_glb_iface - Request the global control block
+ * of CSF interface capabilities
+ *
+ * @in: Input parameters
+ * @in.max_group_num: The maximum number of groups to be read. Can be 0, in
+ * which case groups_ptr is unused.
+ * @in.max_total_stream _num: The maximum number of CSs to be read. Can be 0, in
+ * which case streams_ptr is unused.
+ * @in.groups_ptr: Pointer where to store all the group data (sequentially).
+ * @in.streams_ptr: Pointer where to store all the CS data (sequentially).
+ * @out: Output parameters
+ * @out.glb_version: Global interface version.
+ * @out.features: Bit mask of features (e.g. whether certain types of job
+ * can be suspended).
+ * @out.group_num: Number of CSGs supported.
+ * @out.prfcnt_size: Size of CSF performance counters, in bytes. Bits 31:16
+ * hold the size of firmware performance counter data
+ * and 15:0 hold the size of hardware performance counter
+ * data.
+ * @out.total_stream_num: Total number of CSs, summed across all groups.
+ * @out.instr_features: Instrumentation features. Bits 7:4 hold the maximum
+ * size of events. Bits 3:0 hold the offset update rate.
+ *
+ */
+union kbase_ioctl_cs_get_glb_iface {
+ struct {
+ __u32 max_group_num;
+ __u32 max_total_stream_num;
+ __u64 groups_ptr;
+ __u64 streams_ptr;
+ } in;
+ struct {
+ __u32 glb_version;
+ __u32 features;
+ __u32 group_num;
+ __u32 prfcnt_size;
+ __u32 total_stream_num;
+ __u32 instr_features;
+ } out;
+};
+
+#define KBASE_IOCTL_CS_GET_GLB_IFACE \
+ _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface)
+
+struct kbase_ioctl_cs_cpu_queue_info {
+ __u64 buffer;
+ __u64 size;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
+
+#define KBASE_IOCTL_CS_CPU_QUEUE_DUMP \
+ _IOW(KBASE_IOCTL_TYPE, 53, struct kbase_ioctl_cs_cpu_queue_info)
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+/**
+ * struct kbase_ioctl_cs_event_memory_write - Write an event memory address
+ * @cpu_addr: Memory address to write
+ * @value: Value to write
+ * @padding: Currently unused, must be zero
+ */
+struct kbase_ioctl_cs_event_memory_write {
+ __u64 cpu_addr;
+ __u8 value;
+ __u8 padding[7];
+};
+
+/**
+ * union kbase_ioctl_cs_event_memory_read - Read an event memory address
+ * @in: Input parameters
+ * @in.cpu_addr: Memory address to read
+ * @out: Output parameters
+ * @out.value: Value read
+ * @out.padding: Currently unused, must be zero
+ */
+union kbase_ioctl_cs_event_memory_read {
+ struct {
+ __u64 cpu_addr;
+ } in;
+ struct {
+ __u8 value;
+ __u8 padding[7];
+ } out;
+};
+
+#endif /* MALI_UNIT_TEST */
+
+#endif /* _UAPI_KBASE_CSF_IOCTL_H_ */