summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
committerJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
commit049a542207ed694271316782397b78b2e202086a (patch)
tree105e9378d4d5062dc72109fdd4a77c915bd9425d /common
parente61eb93296e9f940b32d4ad4b0c3a5557cbeaf17 (diff)
downloadgpu-049a542207ed694271316782397b78b2e202086a.tar.gz
Update KMD to r47p0
Provenance: ipdelivery@ad01e50d640910a99224382bb227e6d4de627657 Change-Id: I19ac9bce34a5c5a319c1b4a388e8b037b3dfe6e7
Diffstat (limited to 'common')
-rw-r--r--common/include/linux/mali_arbiter_interface.h9
-rw-r--r--common/include/linux/memory_group_manager.h49
-rw-r--r--common/include/linux/priority_control_manager.h89
-rw-r--r--common/include/linux/protected_memory_allocator.h15
-rw-r--r--common/include/linux/protected_mode_switcher.h8
-rw-r--r--common/include/linux/version_compat_defs.h122
-rw-r--r--common/include/uapi/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.h6
-rw-r--r--common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_dummy.h43
-rw-r--r--common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_linux.h3
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h29
-rw-r--r--common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h38
-rw-r--r--common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_coherency.h10
-rw-r--r--common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_id.h204
-rw-r--r--common/include/uapi/gpu/arm/midgard/jm/mali_base_jm_kernel.h150
-rw-r--r--common/include/uapi/gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h23
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_base_common_kernel.h12
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_base_kernel.h109
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_base_mem_priv.h6
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_gpu_props.h111
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_kbase_hwcnt_reader.h57
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_kbase_ioctl.h26
-rw-r--r--common/include/uapi/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h4
22 files changed, 698 insertions, 425 deletions
diff --git a/common/include/linux/mali_arbiter_interface.h b/common/include/linux/mali_arbiter_interface.h
index 8e675ec..b4162f8 100644
--- a/common/include/linux/mali_arbiter_interface.h
+++ b/common/include/linux/mali_arbiter_interface.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -142,15 +142,14 @@ struct arbiter_if_arb_vm_ops {
* (via arbiter_if_arb_vm_ops above) in the context of these callbacks.
*/
struct arbiter_if_vm_arb_ops {
- int (*vm_arb_register_dev)(struct arbiter_if_dev *arbif_dev,
- struct device *dev, struct arbiter_if_arb_vm_ops *ops);
+ int (*vm_arb_register_dev)(struct arbiter_if_dev *arbif_dev, struct device *dev,
+ struct arbiter_if_arb_vm_ops *ops);
void (*vm_arb_unregister_dev)(struct arbiter_if_dev *arbif_dev);
void (*vm_arb_get_max_config)(struct arbiter_if_dev *arbif_dev);
void (*vm_arb_gpu_request)(struct arbiter_if_dev *arbif_dev);
void (*vm_arb_gpu_active)(struct arbiter_if_dev *arbif_dev);
void (*vm_arb_gpu_idle)(struct arbiter_if_dev *arbif_dev);
- void (*vm_arb_gpu_stopped)(struct arbiter_if_dev *arbif_dev,
- u8 gpu_required);
+ void (*vm_arb_gpu_stopped)(struct arbiter_if_dev *arbif_dev, u8 gpu_required);
};
/**
diff --git a/common/include/linux/memory_group_manager.h b/common/include/linux/memory_group_manager.h
index 786e3b9..3820f1b 100644
--- a/common/include/linux/memory_group_manager.h
+++ b/common/include/linux/memory_group_manager.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -58,14 +58,17 @@ struct memory_group_manager_ops {
* 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
* @gfp_mask: Bitmask of Get Free Page flags affecting allocator
* behavior.
- * @order: Page order for physical page size (order=0 means 4 KiB,
- * order=9 means 2 MiB).
+ * @order: Page order for physical page size.
+ * order = 0 refers to small pages
+ * order != 0 refers to 2 MB pages, so
+ * order = 9 (when small page size is 4KB, 2^9 * 4KB = 2 MB)
+ * order = 7 (when small page size is 16KB, 2^7 * 16KB = 2 MB)
+ * order = 5 (when small page size is 64KB, 2^5 * 64KB = 2 MB)
*
* Return: Pointer to allocated page, or NULL if allocation failed.
*/
- struct page *(*mgm_alloc_page)(
- struct memory_group_manager_device *mgm_dev, int group_id,
- gfp_t gfp_mask, unsigned int order);
+ struct page *(*mgm_alloc_page)(struct memory_group_manager_device *mgm_dev,
+ unsigned int group_id, gfp_t gfp_mask, unsigned int order);
/*
* mgm_free_page - Free a physical memory page in a group
@@ -79,12 +82,12 @@ struct memory_group_manager_ops {
* memory that was allocated by calling the mgm_alloc_page
* method of the same memory pool with the same values of
* @group_id and @order.
- * @order: Page order for physical page size (order=0 means 4 KiB,
- * order=9 means 2 MiB).
+ * @order: Page order for physical page size.
+ * order = 0 refers to small pages
+ * order != 0 refers to 2 MB pages.
*/
- void (*mgm_free_page)(
- struct memory_group_manager_device *mgm_dev, int group_id,
- struct page *page, unsigned int order);
+ void (*mgm_free_page)(struct memory_group_manager_device *mgm_dev, unsigned int group_id,
+ struct page *page, unsigned int order);
/*
* mgm_get_import_memory_id - Get the physical memory group ID for the
@@ -101,9 +104,8 @@ struct memory_group_manager_ops {
* Return: The memory group ID to use when mapping pages from this
* imported memory.
*/
- int (*mgm_get_import_memory_id)(
- struct memory_group_manager_device *mgm_dev,
- struct memory_group_manager_import_data *import_data);
+ int (*mgm_get_import_memory_id)(struct memory_group_manager_device *mgm_dev,
+ struct memory_group_manager_import_data *import_data);
/*
* mgm_update_gpu_pte - Modify a GPU page table entry for a memory group
@@ -128,7 +130,7 @@ struct memory_group_manager_ops {
* Return: A modified GPU page table entry to be stored in a page table.
*/
u64 (*mgm_update_gpu_pte)(struct memory_group_manager_device *mgm_dev,
- int group_id, int mmu_level, u64 pte);
+ unsigned int group_id, int mmu_level, u64 pte);
/*
* mgm_pte_to_original_pte - Undo any modification done during mgm_update_gpu_pte()
@@ -148,8 +150,8 @@ struct memory_group_manager_ops {
*
* Return: PTE entry as originally specified to mgm_update_gpu_pte()
*/
- u64 (*mgm_pte_to_original_pte)(struct memory_group_manager_device *mgm_dev, int group_id,
- int mmu_level, u64 pte);
+ u64 (*mgm_pte_to_original_pte)(struct memory_group_manager_device *mgm_dev,
+ unsigned int group_id, int mmu_level, u64 pte);
/*
* mgm_vmf_insert_pfn_prot - Map a physical page in a group for the CPU
@@ -172,10 +174,10 @@ struct memory_group_manager_ops {
* Return: Type of fault that occurred or VM_FAULT_NOPAGE if the page
* table entry was successfully installed.
*/
- vm_fault_t (*mgm_vmf_insert_pfn_prot)(
- struct memory_group_manager_device *mgm_dev, int group_id,
- struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, pgprot_t pgprot);
+ vm_fault_t (*mgm_vmf_insert_pfn_prot)(struct memory_group_manager_device *mgm_dev,
+ unsigned int group_id, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ pgprot_t pgprot);
};
/**
@@ -199,10 +201,7 @@ struct memory_group_manager_device {
struct module *owner;
};
-
-enum memory_group_manager_import_type {
- MEMORY_GROUP_MANAGER_IMPORT_TYPE_DMA_BUF
-};
+enum memory_group_manager_import_type { MEMORY_GROUP_MANAGER_IMPORT_TYPE_DMA_BUF };
/**
* struct memory_group_manager_import_data - Structure describing the imported
diff --git a/common/include/linux/priority_control_manager.h b/common/include/linux/priority_control_manager.h
index a6b1519..d82419e 100644
--- a/common/include/linux/priority_control_manager.h
+++ b/common/include/linux/priority_control_manager.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,32 +29,77 @@
struct priority_control_manager_device;
/**
+ * DOC: PCM notifier callback types
+ *
+ * ADD_PRIORITIZED_PROCESS - indicate that work items for this process should be
+ * given priority over the work items from other
+ * processes that were assigned the same static
+ * priority level. Processes that would benefit from
+ * being added to this list includes foreground
+ * applications, as well as any other latency-sensitive
+ * applications.
+ *
+ * REMOVE_PRIORITIZED_PROCESS - indicate that work items for this process
+ * should no longer be prioritized over other work
+ * items given the same static priority level.
+ */
+#define ADD_PRIORITIZED_PROCESS 0
+#define REMOVE_PRIORITIZED_PROCESS 1
+
+/**
+ * struct pcm_prioritized_process_notifier_data - change of prioritized process
+ * list passed to the callback
+ *
+ * @pid: PID of the process being added/removed
+ */
+struct pcm_prioritized_process_notifier_data {
+ uint32_t pid;
+};
+
+/**
* struct priority_control_manager_ops - Callbacks for priority control manager operations
*
* @pcm_scheduler_priority_check: Callback to check if scheduling priority level can be requested
+ * pcm_dev: The priority control manager through which the
+ * request is being made.
+ * task: The task struct of the process requesting the
+ * priority check.
+ * requested_priority: The priority level being requested.
+ *
+ * The returned value will be:
+ * The same as requested_priority if the process has permission to
+ * use requested_priority.A lower priority value if the process does
+ * not have permission to use requested_priority
+ *
+ * requested_priority has the following value range:
+ * 0-3 : Priority level, 0 being highest and 3 being lowest
+ *
+ * Return: The priority that would actually be given, could be lower
+ * than requested_priority
+ *
+ * @pcm_prioritized_process_notifier_register: register a callback for changes to the
+ * list of prioritized processes
+ * pcm_dev: The priority control manager through
+ * which the request is being made.
+ * nb: notifier block with callback function pointer
+ * On Success returns 0 otherwise -1
+ *
+ * @pcm_prioritized_process_notifier_unregister: unregister the callback for changes to the
+ * list of prioritized processes
+ * pcm_dev: The priority control manager through
+ * which the request is being made.
+ * nb: notifier block which will be unregistered
+ * On Success returns 0 otherwise -1
*/
struct priority_control_manager_ops {
- /*
- * pcm_scheduler_priority_check: This function can be used to check what priority its work
- * would be treated as based on the requested_priority value.
- *
- * @pcm_dev: The priority control manager through which the request is
- * being made.
- * @task: The task struct of the process requesting the priority check.
- * @requested_priority: The priority level being requested.
- *
- * The returned value will be:
- * The same as requested_priority if the process has permission to use requested_priority
- * A lower priority value if the process does not have permission to use requested_priority
- *
- * requested_priority has the following value range:
- * 0-3 : Priority level, 0 being highest and 3 being lowest
- *
- * Return: The priority that would actually be given, could be lower than requested_priority
- */
- int (*pcm_scheduler_priority_check)(
- struct priority_control_manager_device *pcm_dev,
- struct task_struct *task, int requested_priority);
+ int (*pcm_scheduler_priority_check)(struct priority_control_manager_device *pcm_dev,
+ struct task_struct *task, int requested_priority);
+
+ int (*pcm_prioritized_process_notifier_register)(
+ struct priority_control_manager_device *pcm_dev, struct notifier_block *nb);
+
+ int (*pcm_prioritized_process_notifier_unregister)(
+ struct priority_control_manager_device *pcm_dev, struct notifier_block *nb);
};
/**
diff --git a/common/include/linux/protected_memory_allocator.h b/common/include/linux/protected_memory_allocator.h
index 825af48..0c83845 100644
--- a/common/include/linux/protected_memory_allocator.h
+++ b/common/include/linux/protected_memory_allocator.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -56,8 +56,7 @@ struct protected_memory_allocator_ops {
* Return: Pointer to allocated memory, or NULL if allocation failed.
*/
struct protected_memory_allocation *(*pma_alloc_page)(
- struct protected_memory_allocator_device *pma_dev,
- unsigned int order);
+ struct protected_memory_allocator_device *pma_dev, unsigned int order);
/*
* pma_get_phys_addr - Get the physical address of the protected memory
@@ -70,9 +69,8 @@ struct protected_memory_allocator_ops {
*
* Return: The physical address of the given allocation.
*/
- phys_addr_t (*pma_get_phys_addr)(
- struct protected_memory_allocator_device *pma_dev,
- struct protected_memory_allocation *pma);
+ phys_addr_t (*pma_get_phys_addr)(struct protected_memory_allocator_device *pma_dev,
+ struct protected_memory_allocation *pma);
/*
* pma_free_page - Free a page of memory
@@ -81,9 +79,8 @@ struct protected_memory_allocator_ops {
* through.
* @pma: The protected memory allocation to free.
*/
- void (*pma_free_page)(
- struct protected_memory_allocator_device *pma_dev,
- struct protected_memory_allocation *pma);
+ void (*pma_free_page)(struct protected_memory_allocator_device *pma_dev,
+ struct protected_memory_allocation *pma);
};
/**
diff --git a/common/include/linux/protected_mode_switcher.h b/common/include/linux/protected_mode_switcher.h
index 0f1e6ab..5559b18 100644
--- a/common/include/linux/protected_mode_switcher.h
+++ b/common/include/linux/protected_mode_switcher.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2017, 2020-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,8 +37,7 @@ struct protected_mode_ops {
*
* Return: 0 on success, non-zero on error
*/
- int (*protected_mode_enable)(
- struct protected_mode_device *protected_dev);
+ int (*protected_mode_enable)(struct protected_mode_device *protected_dev);
/*
* protected_mode_disable() - Disable protected mode on device, and
@@ -47,8 +46,7 @@ struct protected_mode_ops {
*
* Return: 0 on success, non-zero on error
*/
- int (*protected_mode_disable)(
- struct protected_mode_device *protected_dev);
+ int (*protected_mode_disable)(struct protected_mode_device *protected_dev);
};
/**
diff --git a/common/include/linux/version_compat_defs.h b/common/include/linux/version_compat_defs.h
index 47551f2..366b50c 100644
--- a/common/include/linux/version_compat_defs.h
+++ b/common/include/linux/version_compat_defs.h
@@ -25,6 +25,7 @@
#include <linux/version.h>
#include <linux/highmem.h>
#include <linux/timer.h>
+#include <linux/iopoll.h>
#if (KERNEL_VERSION(4, 4, 267) < LINUX_VERSION_CODE)
#include <linux/overflow.h>
@@ -41,7 +42,11 @@
#if KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE
typedef unsigned int __poll_t;
+
+#ifndef HRTIMER_MODE_REL_SOFT
+#define HRTIMER_MODE_REL_SOFT HRTIMER_MODE_REL
#endif
+#endif /* KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE */
#if KERNEL_VERSION(4, 9, 78) >= LINUX_VERSION_CODE
@@ -77,19 +82,19 @@ typedef unsigned int __poll_t;
/* Replace the default definition with CONFIG_LSM_MMAP_MIN_ADDR */
#undef kbase_mmap_min_addr
#define kbase_mmap_min_addr CONFIG_LSM_MMAP_MIN_ADDR
-#define KBASE_COMPILED_MMAP_MIN_ADDR_MSG \
+#define KBASE_COMPILED_MMAP_MIN_ADDR_MSG \
"* MALI kbase_mmap_min_addr compiled to CONFIG_LSM_MMAP_MIN_ADDR, no runtime update possible! *"
#endif /* (CONFIG_LSM_MMAP_MIN_ADDR > CONFIG_DEFAULT_MMAP_MIN_ADDR) */
#endif /* CONFIG_LSM_MMAP_MIN_ADDR */
#if (kbase_mmap_min_addr == CONFIG_DEFAULT_MMAP_MIN_ADDR)
-#define KBASE_COMPILED_MMAP_MIN_ADDR_MSG \
+#define KBASE_COMPILED_MMAP_MIN_ADDR_MSG \
"* MALI kbase_mmap_min_addr compiled to CONFIG_DEFAULT_MMAP_MIN_ADDR, no runtime update possible! *"
#endif
#else /* CONFIG_MMU */
#define kbase_mmap_min_addr (0UL)
-#define KBASE_COMPILED_MMAP_MIN_ADDR_MSG \
+#define KBASE_COMPILED_MMAP_MIN_ADDR_MSG \
"* MALI kbase_mmap_min_addr compiled to (0UL), no runtime update possible! *"
#endif /* CONFIG_MMU */
#endif /* KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE */
@@ -116,6 +121,10 @@ static inline void kbase_timer_setup(struct timer_list *timer,
#define READ_ONCE(x) ACCESS_ONCE(x)
#endif
+#ifndef CSTD_UNUSED
+#define CSTD_UNUSED(x) ((void)(x))
+#endif
+
static inline void *kbase_kmap(struct page *p)
{
#if KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE
@@ -137,8 +146,10 @@ static inline void *kbase_kmap_atomic(struct page *p)
static inline void kbase_kunmap(struct page *p, void *address)
{
#if KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE
+ CSTD_UNUSED(p);
kunmap_local(address);
#else
+ CSTD_UNUSED(address);
kunmap(p);
#endif /* KERNEL_VERSION(5, 11, 0) */
}
@@ -164,6 +175,7 @@ static inline void kbase_kunmap_atomic(void *address)
* are simple to reproduce.
*/
#define check_mul_overflow(a, b, d) __builtin_mul_overflow(a, b, d)
+#define check_add_overflow(a, b, d) __builtin_add_overflow(a, b, d)
#endif
/*
@@ -177,6 +189,7 @@ static inline void kbase_kunmap_atomic(void *address)
#define dma_fence fence
#define dma_fence_ops fence_ops
+#define dma_fence_cb fence_cb
#define dma_fence_context_alloc(a) fence_context_alloc(a)
#define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e)
#define dma_fence_get(a) fence_get(a)
@@ -205,16 +218,16 @@ static inline void kbase_kunmap_atomic(void *address)
static inline void dma_fence_set_error_helper(
#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
- struct fence *fence,
+ struct fence *fence,
#else
- struct dma_fence *fence,
+ struct dma_fence *fence,
#endif
- int error)
+ int error)
{
#if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE)
dma_fence_set_error(fence, error);
#elif (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
- KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
+ KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
fence_set_error(fence, error);
#else
fence->status = error;
@@ -234,10 +247,105 @@ static inline void vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
}
#endif
+static inline void kbase_unpin_user_buf_page(struct page *page)
+{
+#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+ put_page(page);
+#else
+ unpin_user_page(page);
+#endif
+}
+
+static inline long kbase_get_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+#if ((KERNEL_VERSION(6, 5, 0) > LINUX_VERSION_CODE) && !defined(__ANDROID_COMMON_KERNEL__)) || \
+ ((KERNEL_VERSION(6, 4, 0) > LINUX_VERSION_CODE) && defined(__ANDROID_COMMON_KERNEL__))
+ return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+#else
+ return get_user_pages(start, nr_pages, gup_flags, pages);
+#endif
+}
+
+static inline long kbase_pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *locked)
+{
+#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+ return get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, pages, vmas);
+#elif KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
+ return get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, pages, vmas, locked);
+#elif KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+ return pin_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, pages, vmas, locked);
+#elif ((KERNEL_VERSION(6, 5, 0) > LINUX_VERSION_CODE) && !defined(__ANDROID_COMMON_KERNEL__)) || \
+ ((KERNEL_VERSION(6, 4, 0) > LINUX_VERSION_CODE) && defined(__ANDROID_COMMON_KERNEL__))
+ return pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked);
+#else
+ return pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, locked);
+#endif
+}
+
#if (KERNEL_VERSION(6, 4, 0) <= LINUX_VERSION_CODE)
#define KBASE_CLASS_CREATE(owner, name) class_create(name)
#else
#define KBASE_CLASS_CREATE(owner, name) class_create(owner, name)
+#endif /* (KERNEL_VERSION(6, 4, 0) <= LINUX_VERSION_CODE) */
+
+#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE
+#define kbase_totalram_pages() totalram_pages
+#else
+#define kbase_totalram_pages() totalram_pages()
+#endif /* KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE */
+
+#ifndef read_poll_timeout_atomic
+#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, delay_before_read, args...) \
+ ({ \
+ const u64 __timeout_us = (timeout_us); \
+ s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
+ const unsigned long __delay_us = (delay_us); \
+ const u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
+ if (delay_before_read && __delay_us) \
+ udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ do { \
+ (val) = op(args); \
+ if (__timeout_us) { \
+ if (__delay_us) { \
+ udelay(__delay_us); \
+ __left_ns -= __delay_ns; \
+ } \
+ __left_ns--; \
+ } \
+ } while (!(cond) && (!__timeout_us || (__left_ns > 0))); \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
#endif
+#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE)
+
+#define kbase_refcount_t atomic_t
+#define kbase_refcount_read(x) atomic_read(x)
+#define kbase_refcount_set(x, v) atomic_set(x, v)
+#define kbase_refcount_dec_and_test(x) atomic_dec_and_test(x)
+#define kbase_refcount_dec(x) atomic_dec(x)
+#define kbase_refcount_inc_not_zero(x) atomic_inc_not_zero(x)
+#define kbase_refcount_inc(x) atomic_inc(x)
+
+#else
+
+#include <linux/refcount.h>
+
+#define kbase_refcount_t refcount_t
+#define kbase_refcount_read(x) refcount_read(x)
+#define kbase_refcount_set(x, v) refcount_set(x, v)
+#define kbase_refcount_dec_and_test(x) refcount_dec_and_test(x)
+#define kbase_refcount_dec(x) refcount_dec(x)
+#define kbase_refcount_inc_not_zero(x) refcount_inc_not_zero(x)
+#define kbase_refcount_inc(x) refcount_inc(x)
+
+#endif /* (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE) */
+
#endif /* _VERSION_COMPAT_DEFS_H_ */
diff --git a/common/include/uapi/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.h b/common/include/uapi/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.h
index a92e296..56a16e1 100644
--- a/common/include/uapi/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.h
+++ b/common/include/uapi/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2012-2013, 2017, 2020-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -62,7 +62,7 @@ struct dma_buf_te_ioctl_set_failing {
struct dma_buf_te_ioctl_fill {
int fd;
- unsigned int value;
+ int value;
};
#define DMA_BUF_TE_IOCTL_BASE 'E'
@@ -70,7 +70,7 @@ struct dma_buf_te_ioctl_fill {
#define DMA_BUF_TE_VERSION _IOR(DMA_BUF_TE_IOCTL_BASE, 0x00, struct dma_buf_te_ioctl_version)
#define DMA_BUF_TE_ALLOC _IOR(DMA_BUF_TE_IOCTL_BASE, 0x01, struct dma_buf_te_ioctl_alloc)
#define DMA_BUF_TE_QUERY _IOR(DMA_BUF_TE_IOCTL_BASE, 0x02, struct dma_buf_te_ioctl_status)
-#define DMA_BUF_TE_SET_FAILING \
+#define DMA_BUF_TE_SET_FAILING \
_IOW(DMA_BUF_TE_IOCTL_BASE, 0x03, struct dma_buf_te_ioctl_set_failing)
#define DMA_BUF_TE_ALLOC_CONT _IOR(DMA_BUF_TE_IOCTL_BASE, 0x04, struct dma_buf_te_ioctl_alloc)
#define DMA_BUF_TE_FILL _IOR(DMA_BUF_TE_IOCTL_BASE, 0x05, struct dma_buf_te_ioctl_fill)
diff --git a/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_dummy.h b/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_dummy.h
index a44da7b..564f477 100644
--- a/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_dummy.h
+++ b/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_dummy.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2021-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,34 +30,31 @@
#define KBASE_DUMMY_MODEL_COUNTER_HEADER_DWORDS (4)
#if MALI_USE_CSF
-#define KBASE_DUMMY_MODEL_COUNTER_PER_CORE (65)
+#define KBASE_DUMMY_MODEL_COUNTER_PER_CORE (65)
#else /* MALI_USE_CSF */
-#define KBASE_DUMMY_MODEL_COUNTER_PER_CORE (60)
-#endif /* !MALI_USE_CSF */
-#define KBASE_DUMMY_MODEL_COUNTERS_PER_BIT (4)
+#define KBASE_DUMMY_MODEL_COUNTER_PER_CORE (60)
+#endif /* MALI_USE_CSF */
+#define KBASE_DUMMY_MODEL_COUNTERS_PER_BIT (4)
#define KBASE_DUMMY_MODEL_COUNTER_ENABLED(enable_mask, ctr_idx) \
- (enable_mask & (1 << (ctr_idx / KBASE_DUMMY_MODEL_COUNTERS_PER_BIT)))
+ (enable_mask & (1U << (ctr_idx / KBASE_DUMMY_MODEL_COUNTERS_PER_BIT)))
#define KBASE_DUMMY_MODEL_HEADERS_PER_BLOCK 4
-#define KBASE_DUMMY_MODEL_COUNTERS_PER_BLOCK 60
-#define KBASE_DUMMY_MODEL_VALUES_PER_BLOCK \
- (KBASE_DUMMY_MODEL_COUNTERS_PER_BLOCK + \
- KBASE_DUMMY_MODEL_HEADERS_PER_BLOCK)
-#define KBASE_DUMMY_MODEL_BLOCK_SIZE \
- (KBASE_DUMMY_MODEL_VALUES_PER_BLOCK * sizeof(__u32))
-#define KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS 8
-#define KBASE_DUMMY_MODEL_MAX_SHADER_CORES 32
+#define KBASE_DUMMY_MODEL_COUNTERS_PER_BLOCK KBASE_DUMMY_MODEL_COUNTER_PER_CORE
+#define KBASE_DUMMY_MODEL_VALUES_PER_BLOCK \
+ (KBASE_DUMMY_MODEL_COUNTERS_PER_BLOCK + KBASE_DUMMY_MODEL_HEADERS_PER_BLOCK)
+#define KBASE_DUMMY_MODEL_BLOCK_SIZE (KBASE_DUMMY_MODEL_VALUES_PER_BLOCK * sizeof(__u32))
+#define KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS 8
+#define KBASE_DUMMY_MODEL_MAX_SHADER_CORES 32
#define KBASE_DUMMY_MODEL_MAX_FIRMWARE_BLOCKS 0
-#define KBASE_DUMMY_MODEL_MAX_NUM_HARDWARE_BLOCKS \
+#define KBASE_DUMMY_MODEL_MAX_NUM_HARDWARE_BLOCKS \
(1 + 1 + KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS + KBASE_DUMMY_MODEL_MAX_SHADER_CORES)
-#define KBASE_DUMMY_MODEL_MAX_NUM_PERF_BLOCKS \
+#define KBASE_DUMMY_MODEL_MAX_NUM_PERF_BLOCKS \
(KBASE_DUMMY_MODEL_MAX_NUM_HARDWARE_BLOCKS + KBASE_DUMMY_MODEL_MAX_FIRMWARE_BLOCKS)
-#define KBASE_DUMMY_MODEL_COUNTER_TOTAL \
- (KBASE_DUMMY_MODEL_MAX_NUM_PERF_BLOCKS * \
- KBASE_DUMMY_MODEL_COUNTER_PER_CORE)
-#define KBASE_DUMMY_MODEL_MAX_VALUES_PER_SAMPLE \
+#define KBASE_DUMMY_MODEL_COUNTER_TOTAL \
+ (KBASE_DUMMY_MODEL_MAX_NUM_PERF_BLOCKS * KBASE_DUMMY_MODEL_COUNTER_PER_CORE)
+#define KBASE_DUMMY_MODEL_MAX_VALUES_PER_SAMPLE \
(KBASE_DUMMY_MODEL_MAX_NUM_PERF_BLOCKS * KBASE_DUMMY_MODEL_VALUES_PER_BLOCK)
-#define KBASE_DUMMY_MODEL_MAX_SAMPLE_SIZE \
+#define KBASE_DUMMY_MODEL_MAX_SAMPLE_SIZE \
(KBASE_DUMMY_MODEL_MAX_NUM_PERF_BLOCKS * KBASE_DUMMY_MODEL_BLOCK_SIZE)
/*
@@ -70,8 +67,10 @@
#define DUMMY_IMPLEMENTATION_SHADER_PRESENT_TODX (0x3FFull)
#define DUMMY_IMPLEMENTATION_SHADER_PRESENT_TTUX (0x7FFull)
#define DUMMY_IMPLEMENTATION_SHADER_PRESENT_TTIX (0xFFFull)
-#define DUMMY_IMPLEMENTATION_TILER_PRESENT (0x1ull)
+#define DUMMY_IMPLEMENTATION_SHADER_PRESENT_TKRX (0x1FFFull)
#define DUMMY_IMPLEMENTATION_L2_PRESENT (0x1ull)
+#define DUMMY_IMPLEMENTATION_TILER_PRESENT (0x1ull)
#define DUMMY_IMPLEMENTATION_STACK_PRESENT (0xFull)
+
#endif /* _UAPI_KBASE_MODEL_DUMMY_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_linux.h b/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_linux.h
index c83cedd..7e56fd7 100644
--- a/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_linux.h
+++ b/common/include/uapi/gpu/arm/midgard/backend/gpu/mali_kbase_model_linux.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2022-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,7 +30,6 @@
#define MODEL_LINUX_JOB_IRQ (0x1 << 0)
#define MODEL_LINUX_GPU_IRQ (0x1 << 1)
#define MODEL_LINUX_MMU_IRQ (0x1 << 2)
-
#define MODEL_LINUX_IRQ_MASK (MODEL_LINUX_JOB_IRQ | MODEL_LINUX_GPU_IRQ | MODEL_LINUX_MMU_IRQ)
#endif /* _UAPI_KBASE_MODEL_LINUX_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h b/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h
index a8e5802..0fb8242 100644
--- a/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h
@@ -48,7 +48,6 @@
#define BASE_MEM_RESERVED_BIT_20 ((base_mem_alloc_flags)1 << 20)
-
/* Must be FIXABLE memory: its GPU VA will be determined at a later point,
* at which time it will be at a fixed GPU VA.
*/
@@ -61,8 +60,7 @@
/* A mask of all the flags which are only valid for allocations within kbase,
* and may not be passed from user space.
*/
-#define BASEP_MEM_FLAGS_KERNEL_ONLY \
- (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
+#define BASEP_MEM_FLAGS_KERNEL_ONLY (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
/* A mask of all currently reserved flags
*/
@@ -74,8 +72,7 @@
#define BASEP_MEM_CSF_USER_IO_PAGES_HANDLE (48ul << LOCAL_PAGE_SHIFT)
#define KBASE_CSF_NUM_USER_IO_PAGES_HANDLE \
- ((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> \
- LOCAL_PAGE_SHIFT)
+ ((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> LOCAL_PAGE_SHIFT)
/* Valid set of just-in-time memory allocation flags */
#define BASE_JIT_ALLOC_VALID_FLAGS ((__u8)0)
@@ -92,23 +89,21 @@
/* Bitpattern describing the ::base_context_create_flags that can be
* passed to base_context_init()
*/
-#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
- (BASE_CONTEXT_CCTX_EMBEDDED | \
- BASE_CONTEXT_CSF_EVENT_THREAD | \
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (BASE_CONTEXT_CCTX_EMBEDDED | BASE_CONTEXT_CSF_EVENT_THREAD | \
BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
/* Flags for base tracepoint specific to CSF */
/* Enable KBase tracepoints for CSF builds */
-#define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1 << 2)
+#define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1U << 2)
/* Enable additional CSF Firmware side tracepoints */
-#define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1 << 3)
+#define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1U << 3)
-#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
- BASE_TLSTREAM_JOB_DUMPING_ENABLED | \
- BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | \
- BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS)
+#define BASE_TLSTREAM_FLAGS_MASK \
+ (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | BASE_TLSTREAM_JOB_DUMPING_ENABLED | \
+ BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS)
/* Number of pages mapped into the process address space for a bound GPU
* command queue. A pair of input/output pages and a Hw doorbell page
@@ -146,7 +141,7 @@
#define BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK (BASE_CSF_TILER_OOM_EXCEPTION_FLAG)
/* Initial value for LATEST_FLUSH register */
-#define POWER_DOWN_LATEST_FLUSH_VALUE ((uint32_t)1)
+#define POWER_DOWN_LATEST_FLUSH_VALUE ((__u32)1)
/**
* enum base_kcpu_command_type - Kernel CPU queue command type.
@@ -162,7 +157,7 @@
* @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC: jit_alloc,
* @BASE_KCPU_COMMAND_TYPE_JIT_FREE: jit_free,
* @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND: group_suspend,
- * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier,
+ * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER: error_barrier
*/
enum base_kcpu_command_type {
BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL,
@@ -177,7 +172,7 @@ enum base_kcpu_command_type {
BASE_KCPU_COMMAND_TYPE_JIT_ALLOC,
BASE_KCPU_COMMAND_TYPE_JIT_FREE,
BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND,
- BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER,
+ BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER
};
/**
diff --git a/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h b/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
index c9de5fd..537c90d 100644
--- a/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
+++ b/common/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
@@ -90,10 +90,20 @@
* - Restrict child process from doing supported file operations (like mmap, ioctl,
* read, poll) on the file descriptor of mali device file that was inherited
* from the parent process.
+ * 1.21:
+ * - Remove KBASE_IOCTL_HWCNT_READER_SETUP and KBASE_HWCNT_READER_* ioctls.
+ * 1.22:
+ * - Add comp_pri_threshold and comp_pri_ratio attributes to
+ * kbase_ioctl_cs_queue_group_create.
+ * 1.23:
+ * - Disallows changing the sharability on the GPU of imported dma-bufs to
+ * BASE_MEM_COHERENT_SYSTEM using KBASE_IOCTL_MEM_FLAGS_CHANGE.
+ * 1.24:
+ * - Implement full block state support for hardware counters.
*/
#define BASE_UK_VERSION_MAJOR 1
-#define BASE_UK_VERSION_MINOR 20
+#define BASE_UK_VERSION_MINOR 24
/**
* struct kbase_ioctl_version_check - Check version compatibility between
@@ -142,8 +152,7 @@ struct kbase_ioctl_cs_queue_kick {
__u64 buffer_gpu_addr;
};
-#define KBASE_IOCTL_CS_QUEUE_KICK \
- _IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick)
+#define KBASE_IOCTL_CS_QUEUE_KICK _IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick)
/**
* union kbase_ioctl_cs_queue_bind - Bind a GPU command queue to a group
@@ -169,8 +178,7 @@ union kbase_ioctl_cs_queue_bind {
} out;
};
-#define KBASE_IOCTL_CS_QUEUE_BIND \
- _IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind)
+#define KBASE_IOCTL_CS_QUEUE_BIND _IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind)
/**
* struct kbase_ioctl_cs_queue_register_ex - Register a GPU command queue with the
@@ -262,7 +270,7 @@ union kbase_ioctl_cs_queue_group_create_1_6 {
} out;
};
-#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 \
+#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6 \
_IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create_1_6)
/**
@@ -312,7 +320,7 @@ union kbase_ioctl_cs_queue_group_create_1_18 {
} out;
};
-#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18 \
+#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18 \
_IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create_1_18)
/**
@@ -366,7 +374,7 @@ union kbase_ioctl_cs_queue_group_create {
} out;
};
-#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE \
+#define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE \
_IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create)
/**
@@ -383,8 +391,7 @@ struct kbase_ioctl_cs_queue_group_term {
#define KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE \
_IOW(KBASE_IOCTL_TYPE, 43, struct kbase_ioctl_cs_queue_group_term)
-#define KBASE_IOCTL_CS_EVENT_SIGNAL \
- _IO(KBASE_IOCTL_TYPE, 44)
+#define KBASE_IOCTL_CS_EVENT_SIGNAL _IO(KBASE_IOCTL_TYPE, 44)
typedef __u8 base_kcpu_queue_id; /* We support up to 256 active KCPU queues */
@@ -399,8 +406,7 @@ struct kbase_ioctl_kcpu_queue_new {
__u8 padding[7];
};
-#define KBASE_IOCTL_KCPU_QUEUE_CREATE \
- _IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new)
+#define KBASE_IOCTL_KCPU_QUEUE_CREATE _IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new)
/**
* struct kbase_ioctl_kcpu_queue_delete - Destroy a KCPU command queue
@@ -506,7 +512,7 @@ union kbase_ioctl_cs_tiler_heap_init_1_13 {
} out;
};
-#define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 \
+#define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13 \
_IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init_1_13)
/**
@@ -565,16 +571,14 @@ union kbase_ioctl_cs_get_glb_iface {
} out;
};
-#define KBASE_IOCTL_CS_GET_GLB_IFACE \
- _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface)
+#define KBASE_IOCTL_CS_GET_GLB_IFACE _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface)
struct kbase_ioctl_cs_cpu_queue_info {
__u64 buffer;
__u64 size;
};
-#define KBASE_IOCTL_VERSION_CHECK \
- _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
+#define KBASE_IOCTL_VERSION_CHECK _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
#define KBASE_IOCTL_CS_CPU_QUEUE_DUMP \
_IOW(KBASE_IOCTL_TYPE, 53, struct kbase_ioctl_cs_cpu_queue_info)
diff --git a/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_coherency.h b/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_coherency.h
index 83d8413..ee64184 100644
--- a/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_coherency.h
+++ b/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_coherency.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,9 +22,9 @@
#ifndef _UAPI_KBASE_GPU_COHERENCY_H_
#define _UAPI_KBASE_GPU_COHERENCY_H_
-#define COHERENCY_ACE_LITE 0
-#define COHERENCY_ACE 1
-#define COHERENCY_NONE 31
-#define COHERENCY_FEATURE_BIT(x) (1 << (x))
+#define COHERENCY_ACE_LITE 0U
+#define COHERENCY_ACE 1U
+#define COHERENCY_NONE 31U
+#define COHERENCY_FEATURE_BIT(x) (1U << (x))
#endif /* _UAPI_KBASE_GPU_COHERENCY_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_id.h b/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_id.h
index 784e09a..d347854 100644
--- a/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_id.h
+++ b/common/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_id.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,124 +22,156 @@
#ifndef _UAPI_KBASE_GPU_ID_H_
#define _UAPI_KBASE_GPU_ID_H_
+#if defined(__linux)
#include <linux/types.h>
+#endif
-/* GPU_ID register */
-#define KBASE_GPU_ID_VERSION_STATUS_SHIFT 0
-#define KBASE_GPU_ID_VERSION_MINOR_SHIFT 4
-#define KBASE_GPU_ID_VERSION_MAJOR_SHIFT 12
-#define KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT 16
-
-#define GPU_ID_VERSION_STATUS (0xFu << KBASE_GPU_ID_VERSION_STATUS_SHIFT)
-#define GPU_ID_VERSION_MINOR (0xFFu << KBASE_GPU_ID_VERSION_MINOR_SHIFT)
-#define GPU_ID_VERSION_MAJOR (0xFu << KBASE_GPU_ID_VERSION_MAJOR_SHIFT)
-#define GPU_ID_VERSION_PRODUCT_ID (0xFFFFu << KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT)
-
-#define GPU_ID2_VERSION_STATUS_SHIFT 0
-#define GPU_ID2_VERSION_MINOR_SHIFT 4
-#define GPU_ID2_VERSION_MAJOR_SHIFT 12
-#define GPU_ID2_PRODUCT_MAJOR_SHIFT 16
-#define GPU_ID2_ARCH_REV_SHIFT 20
-#define GPU_ID2_ARCH_MINOR_SHIFT 24
-#define GPU_ID2_ARCH_MAJOR_SHIFT 28
-#define GPU_ID2_VERSION_STATUS (0xFu << GPU_ID2_VERSION_STATUS_SHIFT)
-#define GPU_ID2_VERSION_MINOR (0xFFu << GPU_ID2_VERSION_MINOR_SHIFT)
-#define GPU_ID2_VERSION_MAJOR (0xFu << GPU_ID2_VERSION_MAJOR_SHIFT)
-#define GPU_ID2_PRODUCT_MAJOR (0xFu << GPU_ID2_PRODUCT_MAJOR_SHIFT)
-#define GPU_ID2_ARCH_REV (0xFu << GPU_ID2_ARCH_REV_SHIFT)
-#define GPU_ID2_ARCH_MINOR (0xFu << GPU_ID2_ARCH_MINOR_SHIFT)
-#define GPU_ID2_ARCH_MAJOR (0xFu << GPU_ID2_ARCH_MAJOR_SHIFT)
-#define GPU_ID2_PRODUCT_MODEL (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
-#define GPU_ID2_VERSION (GPU_ID2_VERSION_MAJOR | \
- GPU_ID2_VERSION_MINOR | \
- GPU_ID2_VERSION_STATUS)
+#define GPU_ID2_VERSION_STATUS_SHIFT 0
+#define GPU_ID2_VERSION_MINOR_SHIFT 4
+#define GPU_ID2_VERSION_MAJOR_SHIFT 12
+#define GPU_ID2_PRODUCT_MAJOR_SHIFT 16
+#define GPU_ID2_ARCH_REV_SHIFT 20
+#define GPU_ID2_ARCH_MINOR_SHIFT 24
+#define GPU_ID2_ARCH_MAJOR_SHIFT 28
+#define GPU_ID2_VERSION_STATUS (0xFu << GPU_ID2_VERSION_STATUS_SHIFT)
+#define GPU_ID2_VERSION_MINOR (0xFFu << GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR (0xFu << GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR (0xFu << GPU_ID2_PRODUCT_MAJOR_SHIFT)
+#define GPU_ID2_ARCH_REV (0xFu << GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR (0xFu << GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR (0xFu << GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MODEL (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
+#define GPU_ID2_VERSION (GPU_ID2_VERSION_MAJOR | GPU_ID2_VERSION_MINOR | GPU_ID2_VERSION_STATUS)
+#define GPU_ID2_ARCH_REV_GET(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_ARCH_REV) >> GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR_GET(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_ARCH_MINOR) >> GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR_GET(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_ARCH_MAJOR) >> GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_VERSION_MINOR_GET(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_VERSION_MINOR) >> GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR_GET(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_VERSION_MAJOR) >> GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR_GET(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_PRODUCT_MAJOR) >> GPU_ID2_PRODUCT_MAJOR_SHIFT)
/* Helper macro to construct a value consisting of arch major and revision
* using the value of gpu_id.
*/
-#define ARCH_MAJOR_REV_REG(gpu_id) \
- ((((__u32)gpu_id) & GPU_ID2_ARCH_MAJOR) | \
- (((__u32)gpu_id) & GPU_ID2_ARCH_REV))
+#define GPU_ID2_ARCH_MAJOR_REV_REG(gpu_id) \
+ ((((__u32)gpu_id) & GPU_ID2_ARCH_MAJOR) | (((__u32)gpu_id) & GPU_ID2_ARCH_REV))
/* Helper macro to create a partial GPU_ID (new format) that defines
* a arch major and revision.
*/
-#define GPU_ID2_ARCH_MAJOR_REV_MAKE(arch_major, arch_rev) \
- ((((__u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+#define GPU_ID2_ARCH_MAJOR_REV_MAKE(arch_major, arch_rev) \
+ ((((__u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
(((__u32)arch_rev) << GPU_ID2_ARCH_REV_SHIFT))
/* Helper macro to create a partial GPU_ID (new format) that defines
* a product ignoring its version.
*/
#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \
- ((((__u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
- (((__u32)arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \
- (((__u32)arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \
- (((__u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+ ((((__u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ (((__u32)arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \
+ (((__u32)arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \
+ (((__u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
/* Helper macro to create a partial GPU_ID (new format) that specifies the
* revision (major, minor, status) of a product
*/
#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \
- ((((__u32)version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \
- (((__u32)version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \
- (((__u32)version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
+ ((((__u32)version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \
+ (((__u32)version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \
+ (((__u32)version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
/* Helper macro to create a complete GPU_ID (new format) */
-#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \
- version_major, version_minor, version_status) \
- (GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, \
- product_major) | \
- GPU_ID2_VERSION_MAKE(version_major, version_minor, \
- version_status))
+#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, version_major, \
+ version_minor, version_status) \
+ (GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) | \
+ GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status))
/* Helper macro to create a partial GPU_ID (new format) that identifies
* a particular GPU model by its arch_major and product_major.
*/
-#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
- ((((__u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
- (((__u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
+ ((((__u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ (((__u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
/* Strip off the non-relevant bits from a product_id value and make it suitable
* for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU
* model.
*/
#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \
- ((((__u32)product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
- GPU_ID2_PRODUCT_MODEL)
-
-#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6, 0)
-#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6, 1)
-#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7, 0)
-#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7, 3)
-#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7, 1)
-#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7, 2)
-#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(9, 0)
-#define GPU_ID2_PRODUCT_TNAX GPU_ID2_MODEL_MAKE(9, 1)
-#define GPU_ID2_PRODUCT_TBEX GPU_ID2_MODEL_MAKE(9, 2)
-#define GPU_ID2_PRODUCT_LBEX GPU_ID2_MODEL_MAKE(9, 4)
-#define GPU_ID2_PRODUCT_TBAX GPU_ID2_MODEL_MAKE(9, 5)
-#define GPU_ID2_PRODUCT_TODX GPU_ID2_MODEL_MAKE(10, 2)
-#define GPU_ID2_PRODUCT_TGRX GPU_ID2_MODEL_MAKE(10, 3)
-#define GPU_ID2_PRODUCT_TVAX GPU_ID2_MODEL_MAKE(10, 4)
-#define GPU_ID2_PRODUCT_LODX GPU_ID2_MODEL_MAKE(10, 7)
-#define GPU_ID2_PRODUCT_TTUX GPU_ID2_MODEL_MAKE(11, 2)
-#define GPU_ID2_PRODUCT_LTUX GPU_ID2_MODEL_MAKE(11, 3)
-#define GPU_ID2_PRODUCT_TTIX GPU_ID2_MODEL_MAKE(12, 0)
-#define GPU_ID2_PRODUCT_LTIX GPU_ID2_MODEL_MAKE(12, 1)
-
-/**
- * GPU_ID_MAKE - Helper macro to generate GPU_ID using id, major, minor, status
- *
- * @id: Product Major of GPU ID
- * @major: Version major of GPU ID
- * @minor: Version minor of GPU ID
- * @status: Version status of GPU ID
+ ((((__u32)product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & GPU_ID2_PRODUCT_MODEL)
+
+#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6, 0)
+#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6, 1)
+#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7, 0)
+#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7, 3)
+#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7, 1)
+#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7, 2)
+#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(9, 0)
+#define GPU_ID2_PRODUCT_TNAX GPU_ID2_MODEL_MAKE(9, 1)
+#define GPU_ID2_PRODUCT_TBEX GPU_ID2_MODEL_MAKE(9, 2)
+#define GPU_ID2_PRODUCT_LBEX GPU_ID2_MODEL_MAKE(9, 4)
+#define GPU_ID2_PRODUCT_TBAX GPU_ID2_MODEL_MAKE(9, 5)
+#define GPU_ID2_PRODUCT_TODX GPU_ID2_MODEL_MAKE(10, 2)
+#define GPU_ID2_PRODUCT_TGRX GPU_ID2_MODEL_MAKE(10, 3)
+#define GPU_ID2_PRODUCT_TVAX GPU_ID2_MODEL_MAKE(10, 4)
+#define GPU_ID2_PRODUCT_LODX GPU_ID2_MODEL_MAKE(10, 7)
+#define GPU_ID2_PRODUCT_TTUX GPU_ID2_MODEL_MAKE(11, 2)
+#define GPU_ID2_PRODUCT_LTUX GPU_ID2_MODEL_MAKE(11, 3)
+#define GPU_ID2_PRODUCT_TTIX GPU_ID2_MODEL_MAKE(12, 0)
+#define GPU_ID2_PRODUCT_LTIX GPU_ID2_MODEL_MAKE(12, 1)
+#define GPU_ID2_PRODUCT_TKRX GPU_ID2_MODEL_MAKE(13, 0)
+#define GPU_ID2_PRODUCT_LKRX GPU_ID2_MODEL_MAKE(13, 1)
+
+
+
+#define GPU_ID_U8_COMP(val3, val2, val1, val0) \
+ ((((__u32)val3) << 24U) | (((__u32)val2) << 16U) | (((__u32)val1) << 8U) | ((__u32)val0))
+#define GPU_ID_U8_COMP_SHIFT(comp, idx) (((__u32)comp) >> (idx * 8U))
+#define GPU_ID_U8_COMP_GET(comp, idx) (GPU_ID_U8_COMP_SHIFT(comp, idx) & 0xFF)
+
+#define GPU_ID_PRODUCT_ID_MAKE(arch_major, arch_minor, arch_rev, product_major) \
+ GPU_ID_U8_COMP(arch_major, arch_minor, arch_rev, product_major)
+#define GPU_ID_MODEL_MAKE(arch_major, product_major) GPU_ID_U8_COMP(arch_major, 0, 0, product_major)
+#define GPU_ID_VERSION_MAKE(version_major, version_minor, version_status) \
+ GPU_ID_U8_COMP(0, version_major, version_minor, version_status)
+#define GPU_ID_ARCH_MAKE(arch_major, arch_minor, arch_rev) \
+ GPU_ID_U8_COMP(0, arch_major, arch_minor, arch_rev)
+
+/* Convert ID created from GPU_ID_PRODUCT_ID_MAKE() to match the format of
+ * GPU_ID_MODEL_MAKE()
*/
-#define GPU_ID_MAKE(id, major, minor, status) \
- ((((__u32)id) << KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
- (((__u32)major) << KBASE_GPU_ID_VERSION_MAJOR_SHIFT) | \
- (((__u32)minor) << KBASE_GPU_ID_VERSION_MINOR_SHIFT) | \
- (((__u32)status) << KBASE_GPU_ID_VERSION_STATUS_SHIFT))
+#define GPU_ID_MODEL_MATCH_VALUE(product_id) (((__u32)product_id) & GPU_ID_MODEL_MAKE(0xFF, 0xFF))
+
+#define GPU_ID_VERSION_ID_MAJOR_MINOR_GET(version_id) GPU_ID_U8_COMP_SHIFT(version_id, 1)
+#define GPU_ID_VERSION_ID_STATUS_GET(version_id) GPU_ID_U8_COMP_GET(version_id, 0)
+#define GPU_ID_VERSION_ID_MINOR_GET(version_id) GPU_ID_U8_COMP_GET(version_id, 1)
+#define GPU_ID_VERSION_ID_MAJOR_GET(version_id) GPU_ID_U8_COMP_GET(version_id, 2)
+
+#define GPU_ID_PRODUCT_TMIX GPU_ID_MODEL_MAKE(6, 0)
+#define GPU_ID_PRODUCT_THEX GPU_ID_MODEL_MAKE(6, 1)
+#define GPU_ID_PRODUCT_TSIX GPU_ID_MODEL_MAKE(7, 0)
+#define GPU_ID_PRODUCT_TDVX GPU_ID_MODEL_MAKE(7, 3)
+#define GPU_ID_PRODUCT_TNOX GPU_ID_MODEL_MAKE(7, 1)
+#define GPU_ID_PRODUCT_TGOX GPU_ID_MODEL_MAKE(7, 2)
+#define GPU_ID_PRODUCT_TTRX GPU_ID_MODEL_MAKE(9, 0)
+#define GPU_ID_PRODUCT_TNAX GPU_ID_MODEL_MAKE(9, 1)
+#define GPU_ID_PRODUCT_TBEX GPU_ID_MODEL_MAKE(9, 2)
+#define GPU_ID_PRODUCT_LBEX GPU_ID_MODEL_MAKE(9, 4)
+#define GPU_ID_PRODUCT_TBAX GPU_ID_MODEL_MAKE(9, 5)
+#define GPU_ID_PRODUCT_TODX GPU_ID_MODEL_MAKE(10, 2)
+#define GPU_ID_PRODUCT_TGRX GPU_ID_MODEL_MAKE(10, 3)
+#define GPU_ID_PRODUCT_TVAX GPU_ID_MODEL_MAKE(10, 4)
+#define GPU_ID_PRODUCT_LODX GPU_ID_MODEL_MAKE(10, 7)
+#define GPU_ID_PRODUCT_TTUX GPU_ID_MODEL_MAKE(11, 2)
+#define GPU_ID_PRODUCT_LTUX GPU_ID_MODEL_MAKE(11, 3)
+#define GPU_ID_PRODUCT_TTIX GPU_ID_MODEL_MAKE(12, 0)
+#define GPU_ID_PRODUCT_LTIX GPU_ID_MODEL_MAKE(12, 1)
+#define GPU_ID_PRODUCT_TKRX GPU_ID_MODEL_MAKE(13, 0)
+#define GPU_ID_PRODUCT_LKRX GPU_ID_MODEL_MAKE(13, 1)
#endif /* _UAPI_KBASE_GPU_ID_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/jm/mali_base_jm_kernel.h b/common/include/uapi/gpu/arm/midgard/jm/mali_base_jm_kernel.h
index 1a3098d..9478334 100644
--- a/common/include/uapi/gpu/arm/midgard/jm/mali_base_jm_kernel.h
+++ b/common/include/uapi/gpu/arm/midgard/jm/mali_base_jm_kernel.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -60,28 +60,26 @@
/* A mask of all the flags which are only valid for allocations within kbase,
* and may not be passed from user space.
*/
-#define BASEP_MEM_FLAGS_KERNEL_ONLY \
- (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \
- BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM)
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | BASE_MEM_FLAG_MAP_FIXED | \
+ BASEP_MEM_PERFORM_JIT_TRIM)
/* A mask of all currently reserved flags
*/
-#define BASE_MEM_FLAGS_RESERVED \
- (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19)
-
+#define BASE_MEM_FLAGS_RESERVED (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19)
/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
* initial commit is aligned to 'extension' pages, where 'extension' must be a power
* of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
*/
-#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0)
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0)
/**
* BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE - If set, the heap info address points
* to a __u32 holding the used size in bytes;
* otherwise it points to a __u64 holding the lowest address of unused memory.
*/
-#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1)
+#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1)
/**
* BASE_JIT_ALLOC_VALID_FLAGS - Valid set of just-in-time memory allocation flags
@@ -109,26 +107,25 @@
*/
/* Private flag tracking whether job descriptor dumping is disabled */
-#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \
- ((base_context_create_flags)(1 << 31))
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((base_context_create_flags)(1 << 31))
/* Flags for base tracepoint specific to JM */
-#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
- BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+#define BASE_TLSTREAM_FLAGS_MASK \
+ (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | BASE_TLSTREAM_JOB_DUMPING_ENABLED)
/*
* Dependency stuff, keep it private for now. May want to expose it if
* we decide to make the number of semaphores a configurable
* option.
*/
-#define BASE_JD_ATOM_COUNT 256
+#define BASE_JD_ATOM_COUNT 256
/* Maximum number of concurrent render passes.
*/
#define BASE_JD_RP_COUNT (256)
/* Set/reset values for a software event */
-#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
-#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
/**
* struct base_jd_udata - Per-job data
@@ -156,9 +153,9 @@ struct base_jd_udata {
*/
typedef __u8 base_jd_dep_type;
-#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
-#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
-#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
/**
* typedef base_jd_core_req - Job chain hardware requirements.
@@ -180,7 +177,7 @@ typedef __u32 base_jd_core_req;
/* Requires fragment shaders
*/
-#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
+#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
/* Requires compute shaders
*
@@ -196,20 +193,20 @@ typedef __u32 base_jd_core_req;
#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
/* Requires tiling */
-#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2)
+#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2)
/* Requires cache flushes */
#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3)
/* Requires value writeback */
-#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4)
+#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4)
/* SW-only requirements - the HW does not expose these as part of the job slot
* capabilities
*/
/* Requires fragment job with AFBC encoding */
-#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
+#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
/* SW-only requirement: coalesce completion events.
* If this bit is set then completion of this atom will not cause an event to
@@ -223,29 +220,29 @@ typedef __u32 base_jd_core_req;
/* SW Only requirement: the job chain requires a coherent core group. We don't
* mind which coherent core group is used.
*/
-#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
+#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
/* SW Only requirement: The performance counters should be enabled only when
* they are needed, to reduce power consumption.
*/
-#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
+#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
/* SW Only requirement: External resources are referenced by this atom.
*
* This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and
* BASE_JD_REQ_SOFT_EVENT_WAIT.
*/
-#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
+#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
/* SW Only requirement: Software defined job. Jobs with this bit set will not be
* submitted to the hardware but will cause some action to happen within the
* driver
*/
-#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
+#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
-#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
-#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
-#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
/* 0x4 RESERVED for now */
@@ -257,11 +254,11 @@ typedef __u32 base_jd_core_req;
* - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
* possible for other jobs to wait upon. It completes immediately.
*/
-#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
-#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
-#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
-#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
/* SW only requirement: Just In Time allocation
*
@@ -278,7 +275,7 @@ typedef __u32 base_jd_core_req;
*
* The job will complete immediately.
*/
-#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
/* SW only requirement: Just In Time free
*
@@ -288,7 +285,7 @@ typedef __u32 base_jd_core_req;
*
* The job will complete immediately.
*/
-#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
/* SW only requirement: Map external resource
*
@@ -297,7 +294,7 @@ typedef __u32 base_jd_core_req;
* passed via the jc element of the atom which is a pointer to a
* base_external_resource_list.
*/
-#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
/* SW only requirement: Unmap external resource
*
@@ -306,7 +303,7 @@ typedef __u32 base_jd_core_req;
* passed via the jc element of the atom which is a pointer to a
* base_external_resource_list.
*/
-#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
*
@@ -316,7 +313,7 @@ typedef __u32 base_jd_core_req;
* In contrast to BASE_JD_REQ_CS, this does not indicate that the Job
* Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
*/
-#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
+#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
/* HW Requirement: Use the base_jd_atom::device_nr field to specify a
* particular core group
@@ -331,7 +328,7 @@ typedef __u32 base_jd_core_req;
/* SW Flag: If this bit is set then the successful completion of this atom
* will not cause an event to be sent to userspace
*/
-#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
/* SW Flag: If this bit is set then completion of this atom will not cause an
* event to be sent to userspace, whether successful or not.
@@ -408,23 +405,22 @@ typedef __u32 base_jd_core_req;
/* These requirement bits are currently unused in base_jd_core_req
*/
-#define BASEP_JD_REQ_RESERVED \
- (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
- BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
- BASE_JD_REQ_EVENT_COALESCE | \
- BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
- BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
- BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \
- BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \
- BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK))
+#define BASEP_JD_REQ_RESERVED \
+ (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+ BASE_JD_REQ_EVENT_COALESCE | BASE_JD_REQ_COHERENT_GROUP | \
+ BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | BASE_JD_REQ_JOB_SLOT | \
+ BASE_JD_REQ_START_RENDERPASS | BASE_JD_REQ_END_RENDERPASS | \
+ BASE_JD_REQ_LIMITED_CORE_MASK))
/* Mask of all bits in base_jd_core_req that control the type of the atom.
*
* This allows dependency only atoms to have flags set
*/
-#define BASE_JD_REQ_ATOM_TYPE \
- (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
- BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+#define BASE_JD_REQ_ATOM_TYPE \
+ (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | BASE_JD_REQ_V | \
+ BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
/**
* BASE_JD_REQ_SOFT_JOB_TYPE - Mask of all bits in base_jd_core_req that
@@ -436,8 +432,7 @@ typedef __u32 base_jd_core_req;
* a dependency only job.
*/
#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
- (((core_req) & BASE_JD_REQ_SOFT_JOB) || \
- ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+ (((core_req)&BASE_JD_REQ_SOFT_JOB) || ((core_req)&BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
/**
* enum kbase_jd_atom_state - Atom states
@@ -571,17 +566,17 @@ struct base_jd_fragment {
typedef __u8 base_jd_prio;
/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
-#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
* BASE_JD_PRIO_LOW
*/
-#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
/* Low atom priority. */
-#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH,
* BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW
*/
-#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3)
+#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3)
/* Invalid atom priority (max uint8_t value) */
#define BASE_JD_PRIO_INVALID ((base_jd_prio)255)
@@ -709,7 +704,7 @@ enum {
BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */
BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */
BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */
- BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */
+ BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */
/* Mask to extract the type from an event code */
BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)
};
@@ -924,34 +919,29 @@ enum base_jd_event_code {
BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
/* SW defined exceptions */
- BASE_JD_EVENT_MEM_GROWTH_FAILED =
- BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
- BASE_JD_EVENT_JOB_CANCELLED =
- BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
- BASE_JD_EVENT_JOB_INVALID =
- BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+ BASE_JD_SW_EVENT_RESERVED | 0x3FF,
- BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_SUCCESS | 0x000,
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
- BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+ BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS |
+ BASE_JD_SW_EVENT_INFO | 0x000,
- BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS |
+ BASE_JD_SW_EVENT_RESERVED | 0x3FF,
- BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_KERNEL | 0x000,
- BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
- BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
+ BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL |
+ BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL |
+ BASE_JD_SW_EVENT_JOB | 0x001,
- BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT |
- BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL |
+ BASE_JD_SW_EVENT_RESERVED | 0x3FF
};
/**
diff --git a/common/include/uapi/gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h b/common/include/uapi/gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h
index f2329f9..2a7a06a 100644
--- a/common/include/uapi/gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h
+++ b/common/include/uapi/gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h
@@ -147,10 +147,17 @@
* - Restrict child process from doing supported file operations (like mmap, ioctl,
* read, poll) on the file descriptor of mali device file that was inherited
* from the parent process.
+ * 11.40:
+ * - Remove KBASE_IOCTL_HWCNT_READER_SETUP and KBASE_HWCNT_READER_* ioctls.
+ * 11.41:
+ * - Disallows changing the sharability on the GPU of imported dma-bufs to
+ * BASE_MEM_COHERENT_SYSTEM using KBASE_IOCTL_MEM_FLAGS_CHANGE.
+ * 11.42:
+ * - Implement full block state support for hardware counters.
*/
#define BASE_UK_VERSION_MAJOR 11
-#define BASE_UK_VERSION_MINOR 39
+#define BASE_UK_VERSION_MINOR 42
/**
* struct kbase_ioctl_version_check - Check version compatibility between
@@ -164,9 +171,7 @@ struct kbase_ioctl_version_check {
__u16 minor;
};
-#define KBASE_IOCTL_VERSION_CHECK \
- _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
-
+#define KBASE_IOCTL_VERSION_CHECK _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
/**
* struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
@@ -181,11 +186,9 @@ struct kbase_ioctl_job_submit {
__u32 stride;
};
-#define KBASE_IOCTL_JOB_SUBMIT \
- _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+#define KBASE_IOCTL_JOB_SUBMIT _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
-#define KBASE_IOCTL_POST_TERM \
- _IO(KBASE_IOCTL_TYPE, 4)
+#define KBASE_IOCTL_POST_TERM _IO(KBASE_IOCTL_TYPE, 4)
/**
* struct kbase_ioctl_soft_event_update - Update the status of a soft-event
@@ -242,9 +245,7 @@ union kbase_kinstr_jm_fd {
struct kbase_kinstr_jm_fd_out out;
};
-#define KBASE_IOCTL_KINSTR_JM_FD \
- _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd)
-
+#define KBASE_IOCTL_KINSTR_JM_FD _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd)
#define KBASE_IOCTL_VERSION_CHECK_RESERVED \
_IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
diff --git a/common/include/uapi/gpu/arm/midgard/mali_base_common_kernel.h b/common/include/uapi/gpu/arm/midgard/mali_base_common_kernel.h
index f837814..c009d5d 100644
--- a/common/include/uapi/gpu/arm/midgard/mali_base_common_kernel.h
+++ b/common/include/uapi/gpu/arm/midgard/mali_base_common_kernel.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2022-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -162,7 +162,7 @@ struct base_mem_handle {
/* A mask for all input bits, including IN/OUT bits.
*/
-#define BASE_MEM_FLAGS_INPUT_MASK \
+#define BASE_MEM_FLAGS_INPUT_MASK \
(((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
/* Special base mem handles.
@@ -206,13 +206,13 @@ typedef __u32 base_context_create_flags;
/* Bitmask used to encode a memory group ID in base_context_create_flags
*/
-#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
/* Bitpattern describing the base_context_create_flags that can be
* passed to the kernel
*/
-#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
(BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | BASEP_CONTEXT_MMU_GROUP_ID_MASK)
/* Flags for base tracepoint
@@ -221,11 +221,11 @@ typedef __u32 base_context_create_flags;
/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
* TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST)
*/
-#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1U << 0)
/* Indicate that job dumping is enabled. This could affect certain timers
* to account for the performance impact.
*/
-#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1U << 1)
#endif /* _UAPI_BASE_COMMON_KERNEL_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/mali_base_kernel.h b/common/include/uapi/gpu/arm/midgard/mali_base_kernel.h
index e6cac0e..cb1a1e8 100644
--- a/common/include/uapi/gpu/arm/midgard/mali_base_kernel.h
+++ b/common/include/uapi/gpu/arm/midgard/mali_base_kernel.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,26 +27,31 @@
#define _UAPI_BASE_KERNEL_H_
#include <linux/types.h>
+#include "mali_gpu_props.h"
#include "mali_base_mem_priv.h"
#include "gpu/mali_kbase_gpu_id.h"
#include "gpu/mali_kbase_gpu_coherency.h"
-#define BASE_MAX_COHERENT_GROUPS 16
+#ifdef __KERNEL__
+#include <linux/mm.h>
#if defined(PAGE_MASK) && defined(PAGE_SHIFT)
#define LOCAL_PAGE_SHIFT PAGE_SHIFT
#define LOCAL_PAGE_LSB ~PAGE_MASK
#else
-#ifndef OSU_CONFIG_CPU_PAGE_SIZE_LOG2
-#define OSU_CONFIG_CPU_PAGE_SIZE_LOG2 12
+#error "Missing kernel definitions: PAGE_MASK, PAGE_SHIFT"
#endif
-#if defined(OSU_CONFIG_CPU_PAGE_SIZE_LOG2)
-#define LOCAL_PAGE_SHIFT OSU_CONFIG_CPU_PAGE_SIZE_LOG2
-#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
#else
-#error Failed to find page size
+
+#if defined(MALI_PAGE_SIZE_AGNOSTIC)
+#define LOCAL_PAGE_SHIFT (__builtin_ctz((unsigned int)sysconf(_SC_PAGESIZE)))
+#else
+#define LOCAL_PAGE_SHIFT 12
#endif
+
+#define LOCAL_PAGE_LSB ((1ul << LOCAL_PAGE_SHIFT) - 1)
+
#endif
/* Physical memory group ID for normal usage.
@@ -71,21 +76,23 @@
*/
typedef __u32 base_mem_alloc_flags;
+#define BASE_MEM_FLAGS_MODIFIABLE_NATIVE (BASE_MEM_DONT_NEED)
+
+#define BASE_MEM_FLAGS_MODIFIABLE_IMPORTED_UMM (BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL)
+
/* A mask for all the flags which are modifiable via the base_mem_set_flags
* interface.
*/
#define BASE_MEM_FLAGS_MODIFIABLE \
- (BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
- BASE_MEM_COHERENT_LOCAL)
+ (BASE_MEM_FLAGS_MODIFIABLE_NATIVE | BASE_MEM_FLAGS_MODIFIABLE_IMPORTED_UMM)
/* A mask of all the flags that can be returned via the base_mem_get_flags()
* interface.
*/
-#define BASE_MEM_FLAGS_QUERYABLE \
- (BASE_MEM_FLAGS_INPUT_MASK & ~(BASE_MEM_SAME_VA | \
- BASE_MEM_COHERENT_SYSTEM_REQUIRED | BASE_MEM_DONT_NEED | \
- BASE_MEM_IMPORT_SHARED | BASE_MEM_FLAGS_RESERVED | \
- BASEP_MEM_FLAGS_KERNEL_ONLY))
+#define BASE_MEM_FLAGS_QUERYABLE \
+ (BASE_MEM_FLAGS_INPUT_MASK & \
+ ~(BASE_MEM_SAME_VA | BASE_MEM_COHERENT_SYSTEM_REQUIRED | BASE_MEM_IMPORT_SHARED | \
+ BASE_MEM_FLAGS_RESERVED | BASEP_MEM_FLAGS_KERNEL_ONLY))
/**
* enum base_mem_import_type - Memory types supported by @a base_mem_import
@@ -127,22 +134,21 @@ struct base_mem_import_user_buffer {
};
/* Mask to detect 4GB boundary alignment */
-#define BASE_MEM_MASK_4GB 0xfffff000UL
+#define BASE_MEM_MASK_4GB 0xfffff000UL
/* Mask to detect 4GB boundary (in page units) alignment */
-#define BASE_MEM_PFN_MASK_4GB (BASE_MEM_MASK_4GB >> LOCAL_PAGE_SHIFT)
+#define BASE_MEM_PFN_MASK_4GB (BASE_MEM_MASK_4GB >> LOCAL_PAGE_SHIFT)
/* Limit on the 'extension' parameter for an allocation with the
* BASE_MEM_TILER_ALIGN_TOP flag set
*
* This is the same as the maximum limit for a Buffer Descriptor's chunk size
*/
-#define BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES_LOG2 \
- (21u - (LOCAL_PAGE_SHIFT))
-#define BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES \
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES_LOG2 (21u - (LOCAL_PAGE_SHIFT))
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES \
(1ull << (BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES_LOG2))
/* Bit mask of cookies used for memory allocation setup */
-#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
+#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
/* Maximum size allowed in a single KBASE_IOCTL_MEM_ALLOC call */
#define KBASE_MEM_ALLOC_MAX_SIZE ((8ull << 30) >> PAGE_SHIFT) /* 8 GB */
@@ -243,10 +249,7 @@ struct base_jit_alloc_info {
__u64 heap_info_gpu_addr;
};
-enum base_external_resource_access {
- BASE_EXT_RES_ACCESS_SHARED,
- BASE_EXT_RES_ACCESS_EXCLUSIVE
-};
+enum base_external_resource_access { BASE_EXT_RES_ACCESS_SHARED, BASE_EXT_RES_ACCESS_EXCLUSIVE };
struct base_external_resource {
__u64 ext_resource;
@@ -276,8 +279,6 @@ struct base_jd_debug_copy_buffer {
struct base_external_resource extres;
};
-#define GPU_MAX_JOB_SLOTS 16
-
/**
* DOC: User-side Base GPU Property Queries
*
@@ -402,8 +403,8 @@ struct mali_base_gpu_l2_cache_props {
};
struct mali_base_gpu_tiler_props {
- __u32 bin_size_bytes; /* Max is 4*2^15 */
- __u32 max_active_levels; /* Max is 2^15 */
+ __u32 bin_size_bytes; /* Max is 4*2^15 */
+ __u32 max_active_levels; /* Max is 2^15 */
};
/**
@@ -428,11 +429,11 @@ struct mali_base_gpu_thread_props {
__u32 max_threads;
__u32 max_workgroup_size;
__u32 max_barrier_size;
- __u16 max_registers;
+ __u32 max_registers;
__u8 max_task_queue;
__u8 max_thread_group_split;
__u8 impl_tech;
- __u8 padding[3];
+ __u8 padding;
__u32 tls_alloc;
};
@@ -591,24 +592,20 @@ struct base_gpu_props {
struct mali_base_gpu_coherent_group_info coherency_info;
};
-#define BASE_MEM_GROUP_ID_GET(flags) \
- ((flags & BASE_MEM_GROUP_ID_MASK) >> BASEP_MEM_GROUP_ID_SHIFT)
+#define BASE_MEM_GROUP_ID_GET(flags) ((flags & BASE_MEM_GROUP_ID_MASK) >> BASEP_MEM_GROUP_ID_SHIFT)
-#define BASE_MEM_GROUP_ID_SET(id) \
- (((base_mem_alloc_flags)((id < 0 || id >= BASE_MEM_GROUP_COUNT) ? \
- BASE_MEM_GROUP_DEFAULT : \
- id) \
- << BASEP_MEM_GROUP_ID_SHIFT) & \
+#define BASE_MEM_GROUP_ID_SET(id) \
+ (((base_mem_alloc_flags)((id < 0 || id >= BASE_MEM_GROUP_COUNT) ? BASE_MEM_GROUP_DEFAULT : \
+ id) \
+ << BASEP_MEM_GROUP_ID_SHIFT) & \
BASE_MEM_GROUP_ID_MASK)
-#define BASE_CONTEXT_MMU_GROUP_ID_SET(group_id) \
- (BASEP_CONTEXT_MMU_GROUP_ID_MASK & \
- ((base_context_create_flags)(group_id) \
- << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT))
+#define BASE_CONTEXT_MMU_GROUP_ID_SET(group_id) \
+ (BASEP_CONTEXT_MMU_GROUP_ID_MASK & \
+ ((base_context_create_flags)(group_id) << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT))
-#define BASE_CONTEXT_MMU_GROUP_ID_GET(flags) \
- ((flags & BASEP_CONTEXT_MMU_GROUP_ID_MASK) >> \
- BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+#define BASE_CONTEXT_MMU_GROUP_ID_GET(flags) \
+ ((flags & BASEP_CONTEXT_MMU_GROUP_ID_MASK) >> BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
/*
* A number of bit flags are defined for requesting cpu_gpu_timeinfo. These
@@ -617,22 +614,20 @@ struct base_gpu_props {
*/
/* For monotonic (counter) timefield */
-#define BASE_TIMEINFO_MONOTONIC_FLAG (1UL << 0)
+#define BASE_TIMEINFO_MONOTONIC_FLAG (1U << 0)
/* For system wide timestamp */
-#define BASE_TIMEINFO_TIMESTAMP_FLAG (1UL << 1)
+#define BASE_TIMEINFO_TIMESTAMP_FLAG (1U << 1)
/* For GPU cycle counter */
-#define BASE_TIMEINFO_CYCLE_COUNTER_FLAG (1UL << 2)
+#define BASE_TIMEINFO_CYCLE_COUNTER_FLAG (1U << 2)
/* Specify kernel GPU register timestamp */
-#define BASE_TIMEINFO_KERNEL_SOURCE_FLAG (1UL << 30)
+#define BASE_TIMEINFO_KERNEL_SOURCE_FLAG (1U << 30)
/* Specify userspace cntvct_el0 timestamp source */
-#define BASE_TIMEINFO_USER_SOURCE_FLAG (1UL << 31)
-
-#define BASE_TIMEREQUEST_ALLOWED_FLAGS (\
- BASE_TIMEINFO_MONOTONIC_FLAG | \
- BASE_TIMEINFO_TIMESTAMP_FLAG | \
- BASE_TIMEINFO_CYCLE_COUNTER_FLAG | \
- BASE_TIMEINFO_KERNEL_SOURCE_FLAG | \
- BASE_TIMEINFO_USER_SOURCE_FLAG)
+#define BASE_TIMEINFO_USER_SOURCE_FLAG (1U << 31)
+
+#define BASE_TIMEREQUEST_ALLOWED_FLAGS \
+ (BASE_TIMEINFO_MONOTONIC_FLAG | BASE_TIMEINFO_TIMESTAMP_FLAG | \
+ BASE_TIMEINFO_CYCLE_COUNTER_FLAG | BASE_TIMEINFO_KERNEL_SOURCE_FLAG | \
+ BASE_TIMEINFO_USER_SOURCE_FLAG)
/* Maximum number of source allocations allowed to create an alias allocation.
* This needs to be 4096 * 6 to allow cube map arrays with up to 4096 array
diff --git a/common/include/uapi/gpu/arm/midgard/mali_base_mem_priv.h b/common/include/uapi/gpu/arm/midgard/mali_base_mem_priv.h
index 70f5b09..994da42 100644
--- a/common/include/uapi/gpu/arm/midgard/mali_base_mem_priv.h
+++ b/common/include/uapi/gpu/arm/midgard/mali_base_mem_priv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2010-2015, 2020-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,8 +25,8 @@
#include <linux/types.h>
#include "mali_base_common_kernel.h"
-#define BASE_SYNCSET_OP_MSYNC (1U << 0)
-#define BASE_SYNCSET_OP_CSYNC (1U << 1)
+#define BASE_SYNCSET_OP_MSYNC (1U << 0)
+#define BASE_SYNCSET_OP_CSYNC (1U << 1)
/*
* This structure describe a basic memory coherency operation.
diff --git a/common/include/uapi/gpu/arm/midgard/mali_gpu_props.h b/common/include/uapi/gpu/arm/midgard/mali_gpu_props.h
new file mode 100644
index 0000000..3640ad0
--- /dev/null
+++ b/common/include/uapi/gpu/arm/midgard/mali_gpu_props.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2023 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_MALI_GPUPROPS_H_
+#define _UAPI_MALI_GPUPROPS_H_
+
+#include <linux/types.h>
+#include "mali_base_common_kernel.h"
+
+#define BASE_MAX_COHERENT_GROUPS 16
+#define GPU_MAX_JOB_SLOTS 16
+
+/**
+ * struct gpu_props_user_data - structure for gpu props user buffer.
+ * @core_props: Core props.
+ * @l2_props: L2 props.
+ * @tiler_props: Tiler props.
+ * @thread_props: Thread props.
+ * @raw_props: Raw register values kept for backwards compatibility. Kbase
+ * and base should never reference values within this struct.
+ * @coherency_info: Coherency information.
+ *
+ * This structure is used solely for the encoding and decoding of the prop_buffer
+ * returned by kbase.
+ */
+struct gpu_props_user_data {
+ struct {
+ __u32 product_id;
+ __u16 version_status;
+ __u16 minor_revision;
+ __u16 major_revision;
+ __u32 gpu_freq_khz_max;
+ __u32 log2_program_counter_size;
+ __u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+ __u64 gpu_available_memory_size;
+ __u8 num_exec_engines;
+ } core_props;
+ struct {
+ __u8 log2_line_size;
+ __u8 log2_cache_size;
+ __u8 num_l2_slices;
+ } l2_props;
+ struct {
+ __u32 bin_size_bytes;
+ __u32 max_active_levels;
+ } tiler_props;
+ struct {
+ __u32 max_threads;
+ __u32 max_workgroup_size;
+ __u32 max_barrier_size;
+ __u32 max_registers;
+ __u8 max_task_queue;
+ __u8 max_thread_group_split;
+ __u8 impl_tech;
+ __u32 tls_alloc;
+ } thread_props;
+
+ /* kept for backward compatibility, should not be used in the future. */
+ struct {
+ __u64 shader_present;
+ __u64 tiler_present;
+ __u64 l2_present;
+ __u64 stack_present;
+ __u64 l2_features;
+ __u64 core_features;
+ __u64 mem_features;
+ __u64 mmu_features;
+ __u32 as_present;
+ __u32 js_present;
+ __u32 js_features[GPU_MAX_JOB_SLOTS];
+ __u64 tiler_features;
+ __u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+ __u64 gpu_id;
+ __u32 thread_max_threads;
+ __u32 thread_max_workgroup_size;
+ __u32 thread_max_barrier_size;
+ __u32 thread_features;
+ __u32 coherency_mode;
+ __u32 thread_tls_alloc;
+ __u64 gpu_features;
+ } raw_props;
+ struct {
+ __u32 num_groups;
+ __u32 num_core_groups;
+ __u32 coherency;
+ struct {
+ __u64 core_mask;
+ __u32 num_cores;
+ } group[BASE_MAX_COHERENT_GROUPS];
+ } coherency_info;
+};
+
+#endif /* _UAPI_MALI_GPUPROPS_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/mali_kbase_hwcnt_reader.h b/common/include/uapi/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
index 5089bf2..81e3980 100644
--- a/common/include/uapi/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
+++ b/common/include/uapi/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2015, 2020-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,25 +27,26 @@
/* The ids of ioctl commands. */
#define KBASE_HWCNT_READER 0xBE
-#define KBASE_HWCNT_READER_GET_HWVER _IOR(KBASE_HWCNT_READER, 0x00, __u32)
+#define KBASE_HWCNT_READER_GET_HWVER _IOR(KBASE_HWCNT_READER, 0x00, __u32)
#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, __u32)
-#define KBASE_HWCNT_READER_DUMP _IOW(KBASE_HWCNT_READER, 0x10, __u32)
-#define KBASE_HWCNT_READER_CLEAR _IOW(KBASE_HWCNT_READER, 0x11, __u32)
-#define KBASE_HWCNT_READER_GET_BUFFER _IOC(_IOC_READ, KBASE_HWCNT_READER, 0x20,\
- offsetof(struct kbase_hwcnt_reader_metadata, cycles))
-#define KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES _IOR(KBASE_HWCNT_READER, 0x20,\
- struct kbase_hwcnt_reader_metadata)
-#define KBASE_HWCNT_READER_PUT_BUFFER _IOC(_IOC_WRITE, KBASE_HWCNT_READER, 0x21,\
- offsetof(struct kbase_hwcnt_reader_metadata, cycles))
-#define KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES _IOW(KBASE_HWCNT_READER, 0x21,\
- struct kbase_hwcnt_reader_metadata)
-#define KBASE_HWCNT_READER_SET_INTERVAL _IOW(KBASE_HWCNT_READER, 0x30, __u32)
-#define KBASE_HWCNT_READER_ENABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x40, __u32)
-#define KBASE_HWCNT_READER_DISABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x41, __u32)
+#define KBASE_HWCNT_READER_DUMP _IOW(KBASE_HWCNT_READER, 0x10, __u32)
+#define KBASE_HWCNT_READER_CLEAR _IOW(KBASE_HWCNT_READER, 0x11, __u32)
+#define KBASE_HWCNT_READER_GET_BUFFER \
+ _IOC(_IOC_READ, KBASE_HWCNT_READER, 0x20, \
+ offsetof(struct kbase_hwcnt_reader_metadata, cycles))
+#define KBASE_HWCNT_READER_GET_BUFFER_WITH_CYCLES \
+ _IOR(KBASE_HWCNT_READER, 0x20, struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_PUT_BUFFER \
+ _IOC(_IOC_WRITE, KBASE_HWCNT_READER, 0x21, \
+ offsetof(struct kbase_hwcnt_reader_metadata, cycles))
+#define KBASE_HWCNT_READER_PUT_BUFFER_WITH_CYCLES \
+ _IOW(KBASE_HWCNT_READER, 0x21, struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_SET_INTERVAL _IOW(KBASE_HWCNT_READER, 0x30, __u32)
+#define KBASE_HWCNT_READER_ENABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x40, __u32)
+#define KBASE_HWCNT_READER_DISABLE_EVENT _IOW(KBASE_HWCNT_READER, 0x41, __u32)
#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, __u32)
#define KBASE_HWCNT_READER_GET_API_VERSION_WITH_FEATURES \
- _IOW(KBASE_HWCNT_READER, 0xFF, \
- struct kbase_hwcnt_reader_api_version)
+ _IOW(KBASE_HWCNT_READER, 0xFF, struct kbase_hwcnt_reader_api_version)
/**
* struct kbase_hwcnt_reader_metadata_cycles - GPU clock cycles
@@ -117,8 +118,7 @@ enum prfcnt_list_type {
PRFCNT_LIST_TYPE_SAMPLE_META,
};
-#define FLEX_LIST_TYPE(type, subtype) \
- ((__u16)(((type & 0xf) << 12) | (subtype & 0xfff)))
+#define FLEX_LIST_TYPE(type, subtype) ((__u16)(((type & 0xf) << 12) | (subtype & 0xfff)))
#define FLEX_LIST_TYPE_NONE FLEX_LIST_TYPE(0, 0)
#define PRFCNT_ENUM_TYPE_BLOCK FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_ENUM, 0)
@@ -129,12 +129,9 @@ enum prfcnt_list_type {
#define PRFCNT_REQUEST_TYPE_ENABLE FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_REQUEST, 1)
#define PRFCNT_REQUEST_TYPE_SCOPE FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_REQUEST, 2)
-#define PRFCNT_SAMPLE_META_TYPE_SAMPLE \
- FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_SAMPLE_META, 0)
-#define PRFCNT_SAMPLE_META_TYPE_CLOCK \
- FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_SAMPLE_META, 1)
-#define PRFCNT_SAMPLE_META_TYPE_BLOCK \
- FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_SAMPLE_META, 2)
+#define PRFCNT_SAMPLE_META_TYPE_SAMPLE FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_SAMPLE_META, 0)
+#define PRFCNT_SAMPLE_META_TYPE_CLOCK FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_SAMPLE_META, 1)
+#define PRFCNT_SAMPLE_META_TYPE_BLOCK FLEX_LIST_TYPE(PRFCNT_LIST_TYPE_SAMPLE_META, 2)
/**
* struct prfcnt_item_header - Header for an item of the list.
@@ -152,6 +149,8 @@ struct prfcnt_item_header {
* @PRFCNT_BLOCK_TYPE_TILER: Tiler.
* @PRFCNT_BLOCK_TYPE_MEMORY: Memory System.
* @PRFCNT_BLOCK_TYPE_SHADER_CORE: Shader Core.
+ * @PRFCNT_BLOCK_TYPE_FW: Firmware.
+ * @PRFCNT_BLOCK_TYPE_CSG: CSG.
* @PRFCNT_BLOCK_TYPE_RESERVED: Reserved.
*/
enum prfcnt_block_type {
@@ -159,6 +158,8 @@ enum prfcnt_block_type {
PRFCNT_BLOCK_TYPE_TILER,
PRFCNT_BLOCK_TYPE_MEMORY,
PRFCNT_BLOCK_TYPE_SHADER_CORE,
+ PRFCNT_BLOCK_TYPE_FW,
+ PRFCNT_BLOCK_TYPE_CSG,
PRFCNT_BLOCK_TYPE_RESERVED = 255,
};
@@ -491,13 +492,13 @@ struct prfcnt_sample_access {
/* The ids of ioctl commands, on a reader file descriptor, magic number */
#define KBASE_KINSTR_PRFCNT_READER 0xBF
/* Ioctl ID for issuing a session operational command */
-#define KBASE_IOCTL_KINSTR_PRFCNT_CMD \
+#define KBASE_IOCTL_KINSTR_PRFCNT_CMD \
_IOW(KBASE_KINSTR_PRFCNT_READER, 0x00, struct prfcnt_control_cmd)
/* Ioctl ID for fetching a dumpped sample */
-#define KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE \
+#define KBASE_IOCTL_KINSTR_PRFCNT_GET_SAMPLE \
_IOR(KBASE_KINSTR_PRFCNT_READER, 0x01, struct prfcnt_sample_access)
/* Ioctl ID for release internal buffer of the previously fetched sample */
-#define KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE \
+#define KBASE_IOCTL_KINSTR_PRFCNT_PUT_SAMPLE \
_IOW(KBASE_KINSTR_PRFCNT_READER, 0x10, struct prfcnt_sample_access)
#endif /* _UAPI_KBASE_HWCNT_READER_H_ */
diff --git a/common/include/uapi/gpu/arm/midgard/mali_kbase_ioctl.h b/common/include/uapi/gpu/arm/midgard/mali_kbase_ioctl.h
index c8a54f9..d60745f 100644
--- a/common/include/uapi/gpu/arm/midgard/mali_kbase_ioctl.h
+++ b/common/include/uapi/gpu/arm/midgard/mali_kbase_ioctl.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2017-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,7 +22,7 @@
#ifndef _UAPI_KBASE_IOCTL_H_
#define _UAPI_KBASE_IOCTL_H_
-#ifdef __cpluscplus
+#ifdef __cplusplus
extern "C" {
#endif
@@ -162,7 +162,7 @@ struct kbase_ioctl_hwcnt_reader_setup {
__u32 mmu_l2_bm;
};
-#define KBASE_IOCTL_HWCNT_READER_SETUP \
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
_IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
/**
@@ -276,7 +276,7 @@ union kbase_ioctl_mem_find_cpu_offset {
} out;
};
-#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
_IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
/**
@@ -445,7 +445,7 @@ struct kbase_ioctl_sticky_resource_map {
__u64 address;
};
-#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
+#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
_IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map)
/**
@@ -459,7 +459,7 @@ struct kbase_ioctl_sticky_resource_unmap {
__u64 address;
};
-#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
+#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
_IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap)
/**
@@ -487,7 +487,7 @@ union kbase_ioctl_mem_find_gpu_start_and_offset {
} out;
};
-#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
+#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
_IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
#define KBASE_IOCTL_CINSTR_GWT_START _IO(KBASE_IOCTL_TYPE, 33)
@@ -565,7 +565,7 @@ union kbase_ioctl_get_cpu_gpu_timeinfo {
} out;
};
-#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
+#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
_IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo)
/**
@@ -577,7 +577,7 @@ struct kbase_ioctl_context_priority_check {
__u8 priority;
};
-#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \
+#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \
_IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check)
/**
@@ -589,7 +589,7 @@ struct kbase_ioctl_set_limited_core_count {
__u8 max_core_count;
};
-#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \
+#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \
_IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count)
/**
@@ -610,7 +610,7 @@ struct kbase_ioctl_kinstr_prfcnt_enum_info {
__u64 info_list_ptr;
};
-#define KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO \
+#define KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO \
_IOWR(KBASE_IOCTL_TYPE, 56, struct kbase_ioctl_kinstr_prfcnt_enum_info)
/**
@@ -639,7 +639,7 @@ union kbase_ioctl_kinstr_prfcnt_setup {
} out;
};
-#define KBASE_IOCTL_KINSTR_PRFCNT_SETUP \
+#define KBASE_IOCTL_KINSTR_PRFCNT_SETUP \
_IOWR(KBASE_IOCTL_TYPE, 57, union kbase_ioctl_kinstr_prfcnt_setup)
/***************
@@ -782,7 +782,7 @@ struct kbase_ioctl_tlstream_stats {
#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83
#define KBASE_GPUPROP_TLS_ALLOC 84
#define KBASE_GPUPROP_RAW_GPU_FEATURES 85
-#ifdef __cpluscplus
+#ifdef __cplusplus
}
#endif
diff --git a/common/include/uapi/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h b/common/include/uapi/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
index 3298450..11c51d9 100644
--- a/common/include/uapi/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
+++ b/common/include/uapi/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2014, 2017-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,6 +30,6 @@
* KBASE_MEM_PROFILE_MAX_BUF_SIZE - The size of the buffer to accumulate the histogram report text
* in @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
*/
-#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t)(64 + ((80 + (56 * 64)) * 54) + 56))
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t)(64 + ((80 + (56 * 64)) * 57) + 56))
#endif /*_UAPI_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/