summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_core_linux.c
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
committerJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
commit049a542207ed694271316782397b78b2e202086a (patch)
tree105e9378d4d5062dc72109fdd4a77c915bd9425d /mali_kbase/mali_kbase_core_linux.c
parente61eb93296e9f940b32d4ad4b0c3a5557cbeaf17 (diff)
downloadgpu-049a542207ed694271316782397b78b2e202086a.tar.gz
Update KMD to r47p0
Provenance: ipdelivery@ad01e50d640910a99224382bb227e6d4de627657 Change-Id: I19ac9bce34a5c5a319c1b4a388e8b037b3dfe6e7
Diffstat (limited to 'mali_kbase/mali_kbase_core_linux.c')
-rw-r--r--mali_kbase/mali_kbase_core_linux.c2399
1 files changed, 1186 insertions, 1213 deletions
diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c
index 1b265f0..9ea5d74 100644
--- a/mali_kbase/mali_kbase_core_linux.c
+++ b/mali_kbase/mali_kbase_core_linux.c
@@ -21,8 +21,9 @@
#include <mali_kbase.h>
#include <mali_kbase_config_defaults.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase_gator.h>
+#include <mali_kbase_reg_track.h>
#include <mali_kbase_mem_linux.h>
#ifdef CONFIG_MALI_DEVFREQ
#include <linux/devfreq.h>
@@ -54,12 +55,11 @@
#include "hwcnt/mali_kbase_hwcnt_context.h"
#include "hwcnt/mali_kbase_hwcnt_virtualizer.h"
#include "mali_kbase_kinstr_prfcnt.h"
-#include "mali_kbase_vinstr.h"
#if MALI_USE_CSF
#include "csf/mali_kbase_csf_firmware.h"
#include "csf/mali_kbase_csf_tiler_heap.h"
#include "csf/mali_kbase_csf_csg_debugfs.h"
-#include "csf/mali_kbase_csf_cpu_queue_debugfs.h"
+#include "csf/mali_kbase_csf_cpu_queue.h"
#include "csf/mali_kbase_csf_event.h"
#endif
#ifdef CONFIG_MALI_ARBITER_SUPPORT
@@ -94,7 +94,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mm.h>
-#include <linux/compat.h> /* is_compat_task/in_compat_syscall */
+#include <linux/compat.h> /* is_compat_task/in_compat_syscall */
#include <linux/mman.h>
#include <linux/version.h>
#include <linux/version_compat_defs.h>
@@ -127,9 +127,8 @@
* @major: Kernel major version
* @minor: Kernel minor version
*/
-#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \
- (((minor) & 0xFFF) << 8) | \
- ((0 & 0xFF) << 0))
+#define KBASE_API_VERSION(major, minor) \
+ ((((major)&0xFFFU) << 20U) | (((minor)&0xFFFU) << 8U) | ((0U & 0xFFU) << 0U))
/**
* struct mali_kbase_capability_def - kbase capabilities table
@@ -150,15 +149,15 @@ struct mali_kbase_capability_def {
*/
static const struct mali_kbase_capability_def kbase_caps_table[MALI_KBASE_NUM_CAPS] = {
#if MALI_USE_CSF
- { 1, 0 }, /* SYSTEM_MONITOR */
- { 1, 0 }, /* JIT_PRESSURE_LIMIT */
- { 1, 0 }, /* MEM_GROW_ON_GPF */
- { 1, 0 } /* MEM_PROTECTED */
+ { 1, 0 }, /* SYSTEM_MONITOR */
+ { 1, 0 }, /* JIT_PRESSURE_LIMIT */
+ { 1, 0 }, /* MEM_GROW_ON_GPF */
+ { 1, 0 } /* MEM_PROTECTED */
#else
- { 11, 15 }, /* SYSTEM_MONITOR */
- { 11, 25 }, /* JIT_PRESSURE_LIMIT */
- { 11, 2 }, /* MEM_GROW_ON_GPF */
- { 11, 2 } /* MEM_PROTECTED */
+ { 11, 15 }, /* SYSTEM_MONITOR */
+ { 11, 25 }, /* JIT_PRESSURE_LIMIT */
+ { 11, 2 }, /* MEM_GROW_ON_GPF */
+ { 11, 2 } /* MEM_PROTECTED */
#endif
};
@@ -214,8 +213,7 @@ bool mali_kbase_supports_cap(unsigned long api_version, enum mali_kbase_cap cap)
*
* Note: This function always gets called in Userspace context.
*/
-static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
- struct file *const filp)
+static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev, struct file *const filp)
{
struct kbase_file *const kfile = kmalloc(sizeof(*kfile), GFP_KERNEL);
@@ -235,6 +233,7 @@ static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
#if IS_ENABLED(CONFIG_DEBUG_FS)
init_waitqueue_head(&kfile->zero_fops_count_wait);
#endif
+ init_waitqueue_head(&kfile->event_queue);
}
return kfile;
}
@@ -254,8 +253,8 @@ static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
*
* Return: 0 if successful, otherwise a negative error code.
*/
-static int kbase_file_set_api_version(struct kbase_file *const kfile,
- u16 const major, u16 const minor)
+static int kbase_file_set_api_version(struct kbase_file *const kfile, u16 const major,
+ u16 const minor)
{
if (WARN_ON(!kfile))
return -EINVAL;
@@ -263,8 +262,8 @@ static int kbase_file_set_api_version(struct kbase_file *const kfile,
/* setup pending, try to signal that we'll do the setup,
* if setup was already in progress, err this call
*/
- if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN,
- KBASE_FILE_VSN_IN_PROGRESS) != KBASE_FILE_NEED_VSN)
+ if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN, KBASE_FILE_VSN_IN_PROGRESS) !=
+ KBASE_FILE_NEED_VSN)
return -EPERM;
/* save the proposed version number for later use */
@@ -311,8 +310,7 @@ static unsigned long kbase_file_get_api_version(struct kbase_file *const kfile)
*
* Return: 0 if a new context was created, otherwise a negative error code.
*/
-static int kbase_file_create_kctx(struct kbase_file *kfile,
- base_context_create_flags flags);
+static int kbase_file_create_kctx(struct kbase_file *kfile, base_context_create_flags flags);
/**
* kbase_file_inc_fops_count_if_allowed - Increment the kfile::fops_count value if the file
@@ -356,12 +354,10 @@ static bool kbase_file_inc_fops_count_if_allowed(struct kbase_file *const kfile)
*
* Note: This function shall always be called in Userspace context.
*/
-static struct kbase_context *kbase_file_get_kctx_if_setup_complete(
- struct kbase_file *const kfile)
+static struct kbase_context *kbase_file_get_kctx_if_setup_complete(struct kbase_file *const kfile)
{
- if (WARN_ON(!kfile) ||
- atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
- WARN_ON(!kfile->kctx))
+ if (WARN_ON(!kfile) || atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
+ WARN_ON(!kfile->kctx))
return NULL;
return kfile->kctx;
@@ -374,8 +370,8 @@ static struct kbase_context *kbase_file_get_kctx_if_setup_complete(
*/
static void kbase_file_destroy_kctx(struct kbase_file *const kfile)
{
- if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_COMPLETE,
- KBASE_FILE_DESTROY_CTX) != KBASE_FILE_COMPLETE)
+ if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_COMPLETE, KBASE_FILE_DESTROY_CTX) !=
+ KBASE_FILE_COMPLETE)
return;
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -397,8 +393,7 @@ static void kbase_file_destroy_kctx(struct kbase_file *const kfile)
*/
static void kbase_file_destroy_kctx_worker(struct work_struct *work)
{
- struct kbase_file *kfile =
- container_of(work, struct kbase_file, destroy_kctx_work);
+ struct kbase_file *kfile = container_of(work, struct kbase_file, destroy_kctx_work);
WARN_ON_ONCE(kfile->owner);
WARN_ON_ONCE(kfile->map_count);
@@ -468,16 +463,14 @@ static void kbase_file_delete(struct kbase_file *const kfile)
kfree(kfile);
}
-static int kbase_api_handshake(struct kbase_file *kfile,
- struct kbase_ioctl_version_check *version)
+static int kbase_api_handshake(struct kbase_file *kfile, struct kbase_ioctl_version_check *version)
{
int err = 0;
switch (version->major) {
case BASE_UK_VERSION_MAJOR:
/* set minor to be the lowest common */
- version->minor = min_t(int, BASE_UK_VERSION_MINOR,
- (int)version->minor);
+ version->minor = min_t(int, BASE_UK_VERSION_MINOR, (int)version->minor);
break;
default:
/* We return our actual version regardless if it
@@ -500,32 +493,31 @@ static int kbase_api_handshake(struct kbase_file *kfile,
* (with job submission disabled) but we don't support that usage.
*/
if (!mali_kbase_supports_system_monitor(kbase_file_get_api_version(kfile)))
- err = kbase_file_create_kctx(kfile,
- BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
+ err = kbase_file_create_kctx(kfile, BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
return err;
}
static int kbase_api_handshake_dummy(struct kbase_file *kfile,
- struct kbase_ioctl_version_check *version)
+ struct kbase_ioctl_version_check *version)
{
+ CSTD_UNUSED(kfile);
+ CSTD_UNUSED(version);
+
return -EPERM;
}
-static int kbase_api_kinstr_prfcnt_enum_info(
- struct kbase_file *kfile,
- struct kbase_ioctl_kinstr_prfcnt_enum_info *prfcnt_enum_info)
+static int
+kbase_api_kinstr_prfcnt_enum_info(struct kbase_file *kfile,
+ struct kbase_ioctl_kinstr_prfcnt_enum_info *prfcnt_enum_info)
{
- return kbase_kinstr_prfcnt_enum_info(kfile->kbdev->kinstr_prfcnt_ctx,
- prfcnt_enum_info);
+ return kbase_kinstr_prfcnt_enum_info(kfile->kbdev->kinstr_prfcnt_ctx, prfcnt_enum_info);
}
-static int kbase_api_kinstr_prfcnt_setup(
- struct kbase_file *kfile,
- union kbase_ioctl_kinstr_prfcnt_setup *prfcnt_setup)
+static int kbase_api_kinstr_prfcnt_setup(struct kbase_file *kfile,
+ union kbase_ioctl_kinstr_prfcnt_setup *prfcnt_setup)
{
- return kbase_kinstr_prfcnt_setup(kfile->kbdev->kinstr_prfcnt_ctx,
- prfcnt_setup);
+ return kbase_kinstr_prfcnt_setup(kfile->kbdev->kinstr_prfcnt_ctx, prfcnt_setup);
}
static struct kbase_device *to_kbase_device(struct device *dev)
@@ -533,26 +525,28 @@ static struct kbase_device *to_kbase_device(struct device *dev)
return dev_get_drvdata(dev);
}
-int assign_irqs(struct kbase_device *kbdev)
+/**
+ * get_irqs - Get interrupts information from the device tree.
+ *
+ * @kbdev: Kbase device.
+ * @pdev: Platform device of the kbase device
+ *
+ * Read interrupt number and flag for 'JOB', 'MMU' and 'GPU' interrupts
+ * from the device tree and fill them into the struct of the kbase device.
+ *
+ * Return: 0 on successful reading of all the entries JOB, MMU and GPU interrupts.
+ * -EINVAL on failure for all other cases.
+ *
+ */
+static int get_irqs(struct kbase_device *kbdev, struct platform_device *pdev)
{
- static const char *const irq_names_caps[] = { "JOB", "MMU", "GPU" };
-
-#if IS_ENABLED(CONFIG_OF)
- static const char *const irq_names[] = { "job", "mmu", "gpu" };
-#endif
-
- struct platform_device *pdev;
int i;
-
- if (!kbdev)
- return -ENODEV;
-
- pdev = to_platform_device(kbdev->dev);
+ static const char *const irq_names_caps[] = { "JOB", "MMU", "GPU" };
for (i = 0; i < ARRAY_SIZE(irq_names_caps); i++) {
+ struct irq_data *irqdata;
int irq;
-#if IS_ENABLED(CONFIG_OF)
/* We recommend using Upper case for the irq names in dts, but if
* there are devices in the world using Lower case then we should
* avoid breaking support for them. So try using names in Upper case
@@ -560,24 +554,42 @@ int assign_irqs(struct kbase_device *kbdev)
* we assume there is no IRQ resource specified for the GPU.
*/
irq = platform_get_irq_byname(pdev, irq_names_caps[i]);
- if (irq < 0)
+ if (irq < 0) {
+ static const char *const irq_names[] = { "job", "mmu", "gpu" };
+
irq = platform_get_irq_byname(pdev, irq_names[i]);
-#else
- irq = platform_get_irq(pdev, i);
-#endif /* CONFIG_OF */
+ }
- if (irq < 0) {
- dev_err(kbdev->dev, "No IRQ resource '%s'\n", irq_names_caps[i]);
+ if (irq < 0)
return irq;
- }
- kbdev->irqs[i].irq = irq;
- kbdev->irqs[i].flags = irqd_get_trigger_type(irq_get_irq_data(irq));
+ kbdev->irqs[i].irq = (u32)irq;
+ irqdata = irq_get_irq_data((unsigned int)irq);
+ if (likely(irqdata))
+ kbdev->irqs[i].flags = irqd_get_trigger_type(irqdata);
+ else
+ return -EINVAL;
+
+ kbdev->nr_irqs++;
}
return 0;
}
+
+int kbase_get_irqs(struct kbase_device *kbdev)
+{
+ int result;
+ struct platform_device *pdev = to_platform_device(kbdev->dev);
+
+ kbdev->nr_irqs = 0;
+ result = get_irqs(kbdev, pdev);
+ if (result)
+ dev_err(kbdev->dev, "Invalid or No interrupt resources");
+
+ return result;
+}
+
/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
struct kbase_device *kbase_find_device(int minor)
{
@@ -608,12 +620,15 @@ void kbase_release_device(struct kbase_device *kbdev)
EXPORT_SYMBOL(kbase_release_device);
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
+static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size,
+ loff_t *off)
{
struct kbase_context *kctx = f->private_data;
int err;
bool value;
+ CSTD_UNUSED(off);
+
err = kstrtobool_from_user(ubuf, size, &value);
if (err)
return err;
@@ -623,19 +638,19 @@ static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf,
else
kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
- return size;
+ return (ssize_t)size;
}
static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
{
struct kbase_context *kctx = f->private_data;
char buf[32];
- int count;
+ size_t count;
bool value;
value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
- count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+ count = (size_t)scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
return simple_read_from_buffer(ubuf, size, off, buf, count);
}
@@ -647,13 +662,15 @@ static const struct file_operations kbase_infinite_cache_fops = {
.read = read_ctx_infinite_cache,
};
-static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
- size_t size, loff_t *off)
+static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf, size_t size,
+ loff_t *off)
{
struct kbase_context *kctx = f->private_data;
int err;
bool value;
+ CSTD_UNUSED(off);
+
err = kstrtobool_from_user(ubuf, size, &value);
if (err)
return err;
@@ -672,20 +689,19 @@ static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
kbase_ctx_flag_clear(kctx, KCTX_FORCE_SAME_VA);
}
- return size;
+ return (ssize_t)size;
}
-static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf,
- size_t size, loff_t *off)
+static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf, size_t size, loff_t *off)
{
struct kbase_context *kctx = f->private_data;
char buf[32];
- int count;
+ size_t count;
bool value;
value = kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA);
- count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+ count = (size_t)scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
return simple_read_from_buffer(ubuf, size, off, buf, count);
}
@@ -699,7 +715,7 @@ static const struct file_operations kbase_force_same_va_fops = {
#endif /* CONFIG_DEBUG_FS */
static int kbase_file_create_kctx(struct kbase_file *const kfile,
- base_context_create_flags const flags)
+ base_context_create_flags const flags)
{
struct kbase_device *kbdev = NULL;
struct kbase_context *kctx = NULL;
@@ -713,14 +729,13 @@ static int kbase_file_create_kctx(struct kbase_file *const kfile,
/* setup pending, try to signal that we'll do the setup,
* if setup was already in progress, err this call
*/
- if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX,
- KBASE_FILE_CTX_IN_PROGRESS) != KBASE_FILE_NEED_CTX)
+ if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX, KBASE_FILE_CTX_IN_PROGRESS) !=
+ KBASE_FILE_NEED_CTX)
return -EPERM;
kbdev = kfile->kbdev;
- kctx = kbase_create_context(kbdev, in_compat_syscall(),
- flags, kfile->api_version, kfile);
+ kctx = kbase_create_context(kbdev, in_compat_syscall(), flags, kfile->api_version, kfile);
/* if bad flags, will stay stuck in setup mode */
if (!kctx)
@@ -735,17 +750,16 @@ static int kbase_file_create_kctx(struct kbase_file *const kfile,
mutex_init(&kctx->mem_profile_lock);
- kctx->kctx_dentry = debugfs_create_dir(kctx_name,
- kbdev->debugfs_ctx_directory);
+ kctx->kctx_dentry = debugfs_create_dir(kctx_name, kbdev->debugfs_ctx_directory);
if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
/* we don't treat this as a fail - just warn about it */
dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
} else {
- debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
- kctx, &kbase_infinite_cache_fops);
- debugfs_create_file("force_same_va", 0600, kctx->kctx_dentry,
- kctx, &kbase_force_same_va_fops);
+ debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry, kctx,
+ &kbase_infinite_cache_fops);
+ debugfs_create_file("force_same_va", 0600, kctx->kctx_dentry, kctx,
+ &kbase_force_same_va_fops);
kbase_context_debugfs_init(kctx);
}
@@ -765,7 +779,7 @@ static int kbase_open(struct inode *inode, struct file *filp)
struct kbase_file *kfile;
int ret = 0;
- kbdev = kbase_find_device(iminor(inode));
+ kbdev = kbase_find_device((int)iminor(inode));
if (!kbdev)
return -ENODEV;
@@ -802,6 +816,8 @@ static int kbase_release(struct inode *inode, struct file *filp)
{
struct kbase_file *const kfile = filp->private_data;
+ CSTD_UNUSED(inode);
+
kbase_file_delete(kfile);
return 0;
}
@@ -836,16 +852,14 @@ static int kbase_flush(struct file *filp, fl_owner_t id)
return 0;
}
-static int kbase_api_set_flags(struct kbase_file *kfile,
- struct kbase_ioctl_set_flags *flags)
+static int kbase_api_set_flags(struct kbase_file *kfile, struct kbase_ioctl_set_flags *flags)
{
int err = 0;
unsigned long const api_version = kbase_file_get_api_version(kfile);
struct kbase_context *kctx = NULL;
/* Validate flags */
- if (flags->create_flags !=
- (flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS))
+ if (flags->create_flags != (flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS))
return -EINVAL;
/* For backward compatibility, the context may have been created before
@@ -867,7 +881,7 @@ static int kbase_api_set_flags(struct kbase_file *kfile,
return -EPERM;
#if MALI_USE_CSF
- /* On CSF GPUs Job Manager interface isn't used to submit jobs
+ /* On CSF GPUs Job Manager interface isn't used to submit jobs
* (there are no job slots). So the legacy job manager path to
* submit jobs needs to remain disabled for CSF GPUs.
*/
@@ -876,11 +890,9 @@ static int kbase_api_set_flags(struct kbase_file *kfile,
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
/* Translate the flags */
- if ((flags->create_flags &
- BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+ if ((flags->create_flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
-
spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
#endif
@@ -890,17 +902,15 @@ static int kbase_api_set_flags(struct kbase_file *kfile,
}
#if !MALI_USE_CSF
-static int kbase_api_job_submit(struct kbase_context *kctx,
- struct kbase_ioctl_job_submit *submit)
+static int kbase_api_job_submit(struct kbase_context *kctx, struct kbase_ioctl_job_submit *submit)
{
- return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
- submit->nr_atoms,
- submit->stride, false);
+ return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr), submit->nr_atoms,
+ submit->stride, false);
}
#endif /* !MALI_USE_CSF */
static int kbase_api_get_gpuprops(struct kbase_file *kfile,
- struct kbase_ioctl_get_gpuprops *get_props)
+ struct kbase_ioctl_get_gpuprops *get_props)
{
struct kbase_gpu_props *kprops = &kfile->kbdev->gpu_props;
int err;
@@ -911,16 +921,15 @@ static int kbase_api_get_gpuprops(struct kbase_file *kfile,
}
if (get_props->size == 0)
- return kprops->prop_buffer_size;
+ return (int)kprops->prop_buffer_size;
if (get_props->size < kprops->prop_buffer_size)
return -EINVAL;
- err = copy_to_user(u64_to_user_ptr(get_props->buffer),
- kprops->prop_buffer,
- kprops->prop_buffer_size);
+ err = copy_to_user(u64_to_user_ptr(get_props->buffer), kprops->prop_buffer,
+ kprops->prop_buffer_size);
if (err)
return -EFAULT;
- return kprops->prop_buffer_size;
+ return (int)kprops->prop_buffer_size;
}
#if !MALI_USE_CSF
@@ -992,8 +1001,7 @@ static int kbase_api_mem_alloc_ex(struct kbase_context *kctx,
* In both cases, the executable and fixed/fixable zones, and
* the executable+fixed/fixable zone, are all above this range.
*/
- if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
- kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
+ if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) && kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
if (!gpu_executable && !fixed_or_fixable)
flags |= BASE_MEM_SAME_VA;
}
@@ -1010,9 +1018,7 @@ static int kbase_api_mem_alloc_ex(struct kbase_context *kctx,
if (gpu_executable || fixed_or_fixable)
return -ENOMEM;
- flags |= (BASE_MEM_SAME_VA |
- BASE_MEM_CACHED_CPU |
- BASE_MEM_COHERENT_SYSTEM);
+ flags |= (BASE_MEM_SAME_VA | BASE_MEM_CACHED_CPU | BASE_MEM_COHERENT_SYSTEM);
}
reg = kbase_mem_alloc(kctx, alloc_ex->in.va_pages, alloc_ex->in.commit_pages,
@@ -1086,35 +1092,25 @@ static int kbase_api_mem_alloc(struct kbase_context *kctx, union kbase_ioctl_mem
}
#endif /* MALI_USE_CSF */
-static int kbase_api_mem_query(struct kbase_context *kctx,
- union kbase_ioctl_mem_query *query)
+static int kbase_api_mem_query(struct kbase_context *kctx, union kbase_ioctl_mem_query *query)
{
- return kbase_mem_query(kctx, query->in.gpu_addr,
- query->in.query, &query->out.value);
+ return kbase_mem_query(kctx, query->in.gpu_addr, query->in.query, &query->out.value);
}
-static int kbase_api_mem_free(struct kbase_context *kctx,
- struct kbase_ioctl_mem_free *free)
+static int kbase_api_mem_free(struct kbase_context *kctx, struct kbase_ioctl_mem_free *free)
{
return kbase_mem_free(kctx, free->gpu_addr);
}
#if !MALI_USE_CSF
-static int kbase_api_kinstr_jm_fd(struct kbase_context *kctx,
- union kbase_kinstr_jm_fd *arg)
+static int kbase_api_kinstr_jm_fd(struct kbase_context *kctx, union kbase_kinstr_jm_fd *arg)
{
return kbase_kinstr_jm_get_fd(kctx->kinstr_jm, arg);
}
#endif
-static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
- struct kbase_ioctl_hwcnt_reader_setup *setup)
-{
- return kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
-}
-
static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
- union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
+ union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
{
u32 flags = timeinfo->in.request_flags;
struct timespec64 ts = { 0 };
@@ -1124,9 +1120,9 @@ static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
kbase_pm_context_active(kctx->kbdev);
kbase_backend_get_gpu_time(kctx->kbdev,
- (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
- (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? &timestamp : NULL,
- (flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
+ (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
+ (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? &timestamp : NULL,
+ (flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
if (flags & BASE_TIMEINFO_TIMESTAMP_FLAG)
timeinfo->out.timestamp = timestamp;
@@ -1135,8 +1131,8 @@ static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
timeinfo->out.cycle_counter = cycle_cnt;
if (flags & BASE_TIMEINFO_MONOTONIC_FLAG) {
- timeinfo->out.sec = ts.tv_sec;
- timeinfo->out.nsec = ts.tv_nsec;
+ timeinfo->out.sec = (u64)ts.tv_sec;
+ timeinfo->out.nsec = (u32)ts.tv_nsec;
}
kbase_pm_context_idle(kctx->kbdev);
@@ -1145,15 +1141,14 @@ static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
}
#if IS_ENABLED(CONFIG_MALI_NO_MALI)
-static int kbase_api_hwcnt_set(struct kbase_context *kctx,
- struct kbase_ioctl_hwcnt_values *values)
+static int kbase_api_hwcnt_set(struct kbase_context *kctx, struct kbase_ioctl_hwcnt_values *values)
{
return gpu_model_set_dummy_prfcnt_user_sample(u64_to_user_ptr(values->data), values->size);
}
#endif /* CONFIG_MALI_NO_MALI */
static int kbase_api_disjoint_query(struct kbase_context *kctx,
- struct kbase_ioctl_disjoint_query *query)
+ struct kbase_ioctl_disjoint_query *query)
{
query->counter = kbase_disjoint_event_get(kctx->kbdev);
@@ -1161,20 +1156,21 @@ static int kbase_api_disjoint_query(struct kbase_context *kctx,
}
static int kbase_api_get_ddk_version(struct kbase_context *kctx,
- struct kbase_ioctl_get_ddk_version *version)
+ struct kbase_ioctl_get_ddk_version *version)
{
int ret;
int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+ CSTD_UNUSED(kctx);
+
if (version->version_buffer == 0)
return len;
- if (version->size < len)
+ if (version->size < (u32)len)
return -EOVERFLOW;
- ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
- KERNEL_SIDE_DDK_VERSION_STRING,
- sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
+ ret = copy_to_user(u64_to_user_ptr(version->version_buffer), KERNEL_SIDE_DDK_VERSION_STRING,
+ sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
if (ret)
return -EFAULT;
@@ -1183,9 +1179,9 @@ static int kbase_api_get_ddk_version(struct kbase_context *kctx,
}
static int kbase_api_mem_jit_init(struct kbase_context *kctx,
- struct kbase_ioctl_mem_jit_init *jit_init)
+ struct kbase_ioctl_mem_jit_init *jit_init)
{
- int i;
+ size_t i;
for (i = 0; i < sizeof(jit_init->padding); i++) {
/* Ensure all padding bytes are 0 for potential future
@@ -1195,53 +1191,44 @@ static int kbase_api_mem_jit_init(struct kbase_context *kctx,
return -EINVAL;
}
- return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
- jit_init->max_allocations, jit_init->trim_level,
- jit_init->group_id, jit_init->phys_pages);
+ return kbase_region_tracker_init_jit(kctx, jit_init->va_pages, jit_init->max_allocations,
+ jit_init->trim_level, jit_init->group_id,
+ jit_init->phys_pages);
}
static int kbase_api_mem_exec_init(struct kbase_context *kctx,
- struct kbase_ioctl_mem_exec_init *exec_init)
+ struct kbase_ioctl_mem_exec_init *exec_init)
{
return kbase_region_tracker_init_exec(kctx, exec_init->va_pages);
}
-static int kbase_api_mem_sync(struct kbase_context *kctx,
- struct kbase_ioctl_mem_sync *sync)
+static int kbase_api_mem_sync(struct kbase_context *kctx, struct kbase_ioctl_mem_sync *sync)
{
- struct basep_syncset sset = {
- .mem_handle.basep.handle = sync->handle,
- .user_addr = sync->user_addr,
- .size = sync->size,
- .type = sync->type
- };
+ struct basep_syncset sset = { .mem_handle.basep.handle = sync->handle,
+ .user_addr = sync->user_addr,
+ .size = sync->size,
+ .type = sync->type };
return kbase_sync_now(kctx, &sset);
}
static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
- union kbase_ioctl_mem_find_cpu_offset *find)
+ union kbase_ioctl_mem_find_cpu_offset *find)
{
- return kbasep_find_enclosing_cpu_mapping_offset(
- kctx,
- find->in.cpu_addr,
- find->in.size,
- &find->out.offset);
+ return kbasep_find_enclosing_cpu_mapping_offset(kctx, find->in.cpu_addr, find->in.size,
+ &find->out.offset);
}
-static int kbase_api_mem_find_gpu_start_and_offset(struct kbase_context *kctx,
- union kbase_ioctl_mem_find_gpu_start_and_offset *find)
+static int
+kbase_api_mem_find_gpu_start_and_offset(struct kbase_context *kctx,
+ union kbase_ioctl_mem_find_gpu_start_and_offset *find)
{
return kbasep_find_enclosing_gpu_mapping_start_and_offset(
- kctx,
- find->in.gpu_addr,
- find->in.size,
- &find->out.start,
- &find->out.offset);
+ kctx, find->in.gpu_addr, find->in.size, &find->out.start, &find->out.offset);
}
static int kbase_api_get_context_id(struct kbase_context *kctx,
- struct kbase_ioctl_get_context_id *info)
+ struct kbase_ioctl_get_context_id *info)
{
info->id = kctx->id;
@@ -1249,7 +1236,7 @@ static int kbase_api_get_context_id(struct kbase_context *kctx,
}
static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
- struct kbase_ioctl_tlstream_acquire *acquire)
+ struct kbase_ioctl_tlstream_acquire *acquire)
{
return kbase_timeline_io_acquire(kctx->kbdev, acquire->flags);
}
@@ -1261,14 +1248,12 @@ static int kbase_api_tlstream_flush(struct kbase_context *kctx)
return 0;
}
-static int kbase_api_mem_commit(struct kbase_context *kctx,
- struct kbase_ioctl_mem_commit *commit)
+static int kbase_api_mem_commit(struct kbase_context *kctx, struct kbase_ioctl_mem_commit *commit)
{
return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
}
-static int kbase_api_mem_alias(struct kbase_context *kctx,
- union kbase_ioctl_mem_alias *alias)
+static int kbase_api_mem_alias(struct kbase_context *kctx, union kbase_ioctl_mem_alias *alias)
{
struct base_mem_aliasing_info *ai;
u64 flags;
@@ -1281,9 +1266,8 @@ static int kbase_api_mem_alias(struct kbase_context *kctx,
if (!ai)
return -ENOMEM;
- err = copy_from_user(ai,
- u64_to_user_ptr(alias->in.aliasing_info),
- sizeof(*ai) * alias->in.nents);
+ err = copy_from_user(ai, u64_to_user_ptr(alias->in.aliasing_info),
+ sizeof(*ai) * alias->in.nents);
if (err) {
vfree(ai);
return -EFAULT;
@@ -1295,9 +1279,8 @@ static int kbase_api_mem_alias(struct kbase_context *kctx,
return -EINVAL;
}
- alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
- alias->in.stride, alias->in.nents,
- ai, &alias->out.va_pages);
+ alias->out.gpu_va = kbase_mem_alias(kctx, &flags, alias->in.stride, alias->in.nents, ai,
+ &alias->out.va_pages);
alias->out.flags = flags;
@@ -1309,8 +1292,7 @@ static int kbase_api_mem_alias(struct kbase_context *kctx,
return 0;
}
-static int kbase_api_mem_import(struct kbase_context *kctx,
- union kbase_ioctl_mem_import *import)
+static int kbase_api_mem_import(struct kbase_context *kctx, union kbase_ioctl_mem_import *import)
{
int ret;
u64 flags = import->in.flags;
@@ -1318,13 +1300,9 @@ static int kbase_api_mem_import(struct kbase_context *kctx,
if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
return -ENOMEM;
- ret = kbase_mem_import(kctx,
- import->in.type,
- u64_to_user_ptr(import->in.phandle),
- import->in.padding,
- &import->out.gpu_va,
- &import->out.va_pages,
- &flags);
+ ret = kbase_mem_import(kctx, import->in.type, u64_to_user_ptr(import->in.phandle),
+ import->in.padding, &import->out.gpu_va, &import->out.va_pages,
+ &flags);
import->out.flags = flags;
@@ -1332,25 +1310,26 @@ static int kbase_api_mem_import(struct kbase_context *kctx,
}
static int kbase_api_mem_flags_change(struct kbase_context *kctx,
- struct kbase_ioctl_mem_flags_change *change)
+ struct kbase_ioctl_mem_flags_change *change)
{
if (change->flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
return -ENOMEM;
- return kbase_mem_flags_change(kctx, change->gpu_va,
- change->flags, change->mask);
+ return kbase_mem_flags_change(kctx, change->gpu_va, change->flags, change->mask);
}
static int kbase_api_stream_create(struct kbase_context *kctx,
- struct kbase_ioctl_stream_create *stream)
+ struct kbase_ioctl_stream_create *stream)
{
#if IS_ENABLED(CONFIG_SYNC_FILE)
int fd, ret;
+ CSTD_UNUSED(kctx);
+
/* Name must be NULL-terminated and padded with NULLs, so check last
* character is NULL
*/
- if (stream->name[sizeof(stream->name)-1] != 0)
+ if (stream->name[sizeof(stream->name) - 1] != 0)
return -EINVAL;
ret = kbase_sync_fence_stream_create(stream->name, &fd);
@@ -1359,13 +1338,17 @@ static int kbase_api_stream_create(struct kbase_context *kctx,
return ret;
return fd;
#else
+ CSTD_UNUSED(kctx);
+ CSTD_UNUSED(stream);
+
return -ENOENT;
#endif
}
static int kbase_api_fence_validate(struct kbase_context *kctx,
- struct kbase_ioctl_fence_validate *validate)
+ struct kbase_ioctl_fence_validate *validate)
{
+ CSTD_UNUSED(kctx);
#if IS_ENABLED(CONFIG_SYNC_FILE)
return kbase_sync_fence_validate(validate->fd);
#else
@@ -1374,7 +1357,7 @@ static int kbase_api_fence_validate(struct kbase_context *kctx,
}
static int kbase_api_mem_profile_add(struct kbase_context *kctx,
- struct kbase_ioctl_mem_profile_add *data)
+ struct kbase_ioctl_mem_profile_add *data)
{
char *buf;
int err;
@@ -1394,8 +1377,7 @@ static int kbase_api_mem_profile_add(struct kbase_context *kctx,
if (!buf)
return -ENOMEM;
- err = copy_from_user(buf, u64_to_user_ptr(data->buffer),
- data->len);
+ err = copy_from_user(buf, u64_to_user_ptr(data->buffer), data->len);
if (err) {
kfree(buf);
return -EFAULT;
@@ -1406,7 +1388,7 @@ static int kbase_api_mem_profile_add(struct kbase_context *kctx,
#if !MALI_USE_CSF
static int kbase_api_soft_event_update(struct kbase_context *kctx,
- struct kbase_ioctl_soft_event_update *update)
+ struct kbase_ioctl_soft_event_update *update)
{
if (update->flags != 0)
return -EINVAL;
@@ -1416,7 +1398,7 @@ static int kbase_api_soft_event_update(struct kbase_context *kctx,
#endif /* !MALI_USE_CSF */
static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
- struct kbase_ioctl_sticky_resource_map *map)
+ struct kbase_ioctl_sticky_resource_map *map)
{
int ret;
u64 i;
@@ -1425,8 +1407,7 @@ static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
if (!map->count || map->count > BASE_EXT_RES_COUNT_MAX)
return -EOVERFLOW;
- ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address),
- sizeof(u64) * map->count);
+ ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address), sizeof(u64) * map->count);
if (ret != 0)
return -EFAULT;
@@ -1454,7 +1435,7 @@ static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
}
static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
- struct kbase_ioctl_sticky_resource_unmap *unmap)
+ struct kbase_ioctl_sticky_resource_unmap *unmap)
{
int ret;
u64 i;
@@ -1463,8 +1444,7 @@ static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
if (!unmap->count || unmap->count > BASE_EXT_RES_COUNT_MAX)
return -EOVERFLOW;
- ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address),
- sizeof(u64) * unmap->count);
+ ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address), sizeof(u64) * unmap->count);
if (ret != 0)
return -EFAULT;
@@ -1486,11 +1466,10 @@ static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
#if MALI_UNIT_TEST
static int kbase_api_tlstream_stats(struct kbase_context *kctx,
- struct kbase_ioctl_tlstream_stats *stats)
+ struct kbase_ioctl_tlstream_stats *stats)
{
- kbase_timeline_stats(kctx->kbdev->timeline,
- &stats->bytes_collected,
- &stats->bytes_generated);
+ kbase_timeline_stats(kctx->kbdev->timeline, &stats->bytes_collected,
+ &stats->bytes_generated);
return 0;
}
@@ -1504,7 +1483,7 @@ static int kbasep_cs_event_signal(struct kbase_context *kctx)
}
static int kbasep_cs_queue_register(struct kbase_context *kctx,
- struct kbase_ioctl_cs_queue_register *reg)
+ struct kbase_ioctl_cs_queue_register *reg)
{
kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
@@ -1512,7 +1491,7 @@ static int kbasep_cs_queue_register(struct kbase_context *kctx,
}
static int kbasep_cs_queue_register_ex(struct kbase_context *kctx,
- struct kbase_ioctl_cs_queue_register_ex *reg)
+ struct kbase_ioctl_cs_queue_register_ex *reg)
{
kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
@@ -1520,35 +1499,32 @@ static int kbasep_cs_queue_register_ex(struct kbase_context *kctx,
}
static int kbasep_cs_queue_terminate(struct kbase_context *kctx,
- struct kbase_ioctl_cs_queue_terminate *term)
+ struct kbase_ioctl_cs_queue_terminate *term)
{
kbase_csf_queue_terminate(kctx, term);
return 0;
}
-static int kbasep_cs_queue_bind(struct kbase_context *kctx,
- union kbase_ioctl_cs_queue_bind *bind)
+static int kbasep_cs_queue_bind(struct kbase_context *kctx, union kbase_ioctl_cs_queue_bind *bind)
{
return kbase_csf_queue_bind(kctx, bind);
}
-static int kbasep_cs_queue_kick(struct kbase_context *kctx,
- struct kbase_ioctl_cs_queue_kick *kick)
+static int kbasep_cs_queue_kick(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_kick *kick)
{
return kbase_csf_queue_kick(kctx, kick);
}
-static int kbasep_cs_queue_group_create_1_6(
- struct kbase_context *kctx,
- union kbase_ioctl_cs_queue_group_create_1_6 *create)
+static int kbasep_cs_queue_group_create_1_6(struct kbase_context *kctx,
+ union kbase_ioctl_cs_queue_group_create_1_6 *create)
{
- int ret, i;
+ int ret;
+ size_t i;
union kbase_ioctl_cs_queue_group_create
new_create = { .in = {
.tiler_mask = create->in.tiler_mask,
- .fragment_mask =
- create->in.fragment_mask,
+ .fragment_mask = create->in.fragment_mask,
.compute_mask = create->in.compute_mask,
.cs_min = create->in.cs_min,
.priority = create->in.priority,
@@ -1575,7 +1551,8 @@ static int kbasep_cs_queue_group_create_1_6(
static int kbasep_cs_queue_group_create_1_18(struct kbase_context *kctx,
union kbase_ioctl_cs_queue_group_create_1_18 *create)
{
- int ret, i;
+ int ret;
+ size_t i;
union kbase_ioctl_cs_queue_group_create
new_create = { .in = {
.tiler_mask = create->in.tiler_mask,
@@ -1606,7 +1583,7 @@ static int kbasep_cs_queue_group_create_1_18(struct kbase_context *kctx,
}
static int kbasep_cs_queue_group_create(struct kbase_context *kctx,
- union kbase_ioctl_cs_queue_group_create *create)
+ union kbase_ioctl_cs_queue_group_create *create)
{
if (create->in.reserved != 0) {
dev_warn(kctx->kbdev->dev, "Invalid reserved field not 0 in queue group create\n");
@@ -1616,33 +1593,32 @@ static int kbasep_cs_queue_group_create(struct kbase_context *kctx,
}
static int kbasep_cs_queue_group_terminate(struct kbase_context *kctx,
- struct kbase_ioctl_cs_queue_group_term *term)
+ struct kbase_ioctl_cs_queue_group_term *term)
{
kbase_csf_queue_group_terminate(kctx, term->group_handle);
return 0;
}
-static int kbasep_kcpu_queue_new(struct kbase_context *kctx,
- struct kbase_ioctl_kcpu_queue_new *new)
+static int kbasep_kcpu_queue_new(struct kbase_context *kctx, struct kbase_ioctl_kcpu_queue_new *new)
{
return kbase_csf_kcpu_queue_new(kctx, new);
}
static int kbasep_kcpu_queue_delete(struct kbase_context *kctx,
- struct kbase_ioctl_kcpu_queue_delete *delete)
+ struct kbase_ioctl_kcpu_queue_delete *delete)
{
return kbase_csf_kcpu_queue_delete(kctx, delete);
}
static int kbasep_kcpu_queue_enqueue(struct kbase_context *kctx,
- struct kbase_ioctl_kcpu_queue_enqueue *enqueue)
+ struct kbase_ioctl_kcpu_queue_enqueue *enqueue)
{
return kbase_csf_kcpu_queue_enqueue(kctx, enqueue);
}
static int kbasep_cs_tiler_heap_init(struct kbase_context *kctx,
- union kbase_ioctl_cs_tiler_heap_init *heap_init)
+ union kbase_ioctl_cs_tiler_heap_init *heap_init)
{
if (heap_init->in.group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)
return -EINVAL;
@@ -1672,13 +1648,13 @@ static int kbasep_cs_tiler_heap_init_1_13(struct kbase_context *kctx,
}
static int kbasep_cs_tiler_heap_term(struct kbase_context *kctx,
- struct kbase_ioctl_cs_tiler_heap_term *heap_term)
+ struct kbase_ioctl_cs_tiler_heap_term *heap_term)
{
return kbase_csf_tiler_heap_term(kctx, heap_term->gpu_heap_va);
}
static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
- union kbase_ioctl_cs_get_glb_iface *param)
+ union kbase_ioctl_cs_get_glb_iface *param)
{
struct basep_cs_stream_control *stream_data = NULL;
struct basep_cs_group_control *group_data = NULL;
@@ -1690,8 +1666,7 @@ static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
if (max_group_num > MAX_SUPPORTED_CSGS)
return -EINVAL;
- if (max_total_stream_num >
- MAX_SUPPORTED_CSGS * MAX_SUPPORTED_STREAMS_PER_GROUP)
+ if (max_total_stream_num > MAX_SUPPORTED_CSGS * MAX_SUPPORTED_STREAMS_PER_GROUP)
return -EINVAL;
user_groups = u64_to_user_ptr(param->in.groups_ptr);
@@ -1701,8 +1676,7 @@ static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
if (!user_groups)
err = -EINVAL;
else {
- group_data = kcalloc(max_group_num,
- sizeof(*group_data), GFP_KERNEL);
+ group_data = kcalloc(max_group_num, sizeof(*group_data), GFP_KERNEL);
if (!group_data)
err = -ENOMEM;
}
@@ -1712,8 +1686,8 @@ static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
if (!user_streams)
err = -EINVAL;
else {
- stream_data = kcalloc(max_total_stream_num,
- sizeof(*stream_data), GFP_KERNEL);
+ stream_data =
+ kcalloc(max_total_stream_num, sizeof(*stream_data), GFP_KERNEL);
if (!stream_data)
err = -ENOMEM;
}
@@ -1721,21 +1695,19 @@ static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
if (!err) {
param->out.total_stream_num = kbase_csf_firmware_get_glb_iface(
- kctx->kbdev, group_data, max_group_num, stream_data,
- max_total_stream_num, &param->out.glb_version,
- &param->out.features, &param->out.group_num,
+ kctx->kbdev, group_data, max_group_num, stream_data, max_total_stream_num,
+ &param->out.glb_version, &param->out.features, &param->out.group_num,
&param->out.prfcnt_size, &param->out.instr_features);
if (copy_to_user(user_groups, group_data,
- MIN(max_group_num, param->out.group_num) *
- sizeof(*group_data)))
+ MIN(max_group_num, param->out.group_num) * sizeof(*group_data)))
err = -EFAULT;
}
if (!err)
if (copy_to_user(user_streams, stream_data,
- MIN(max_total_stream_num, param->out.total_stream_num) *
- sizeof(*stream_data)))
+ MIN(max_total_stream_num, param->out.total_stream_num) *
+ sizeof(*stream_data)))
err = -EFAULT;
kfree(group_data);
@@ -1744,10 +1716,9 @@ static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
}
static int kbasep_ioctl_cs_cpu_queue_dump(struct kbase_context *kctx,
- struct kbase_ioctl_cs_cpu_queue_info *cpu_queue_info)
+ struct kbase_ioctl_cs_cpu_queue_info *cpu_queue_info)
{
- return kbase_csf_cpu_queue_dump(kctx, cpu_queue_info->buffer,
- cpu_queue_info->size);
+ return kbase_csf_cpu_queue_dump_buffer(kctx, cpu_queue_info->buffer, cpu_queue_info->size);
}
static int kbase_ioctl_read_user_page(struct kbase_context *kctx,
@@ -1768,7 +1739,7 @@ static int kbase_ioctl_read_user_page(struct kbase_context *kctx,
if (!kbdev->pm.backend.gpu_powered)
user_page->out.val_lo = POWER_DOWN_LATEST_FLUSH_VALUE;
else
- user_page->out.val_lo = kbase_reg_read(kbdev, USER_REG(LATEST_FLUSH));
+ user_page->out.val_lo = kbase_reg_read32(kbdev, USER_ENUM(LATEST_FLUSH));
user_page->out.val_hi = 0;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -1776,8 +1747,9 @@ static int kbase_ioctl_read_user_page(struct kbase_context *kctx,
}
#endif /* MALI_USE_CSF */
-static int kbasep_ioctl_context_priority_check(struct kbase_context *kctx,
- struct kbase_ioctl_context_priority_check *priority_check)
+static int
+kbasep_ioctl_context_priority_check(struct kbase_context *kctx,
+ struct kbase_ioctl_context_priority_check *priority_check)
{
#if MALI_USE_CSF
priority_check->priority = kbase_csf_priority_check(kctx->kbdev, priority_check->priority);
@@ -1789,76 +1761,71 @@ static int kbasep_ioctl_context_priority_check(struct kbase_context *kctx,
return 0;
}
-#define KBASE_HANDLE_IOCTL(cmd, function, arg) \
- do { \
- int ret; \
- BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
- dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
- ret = function(arg); \
- dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
- #function); \
- return ret; \
+#define KBASE_HANDLE_IOCTL(cmd, function, arg) \
+ do { \
+ int ret; \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
+ dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
+ ret = function(arg); \
+ dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, #function); \
+ return ret; \
} while (0)
-#define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg) \
- do { \
- type param; \
- int ret, err; \
- dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
- BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
- BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
- err = copy_from_user(&param, uarg, sizeof(param)); \
- if (err) \
- return -EFAULT; \
- ret = function(arg, &param); \
- dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
- #function); \
- return ret; \
+#define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg) \
+ do { \
+ type param; \
+ int ret, err; \
+ dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
+ BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
+ err = copy_from_user(&param, uarg, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ ret = function(arg, &param); \
+ dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, #function); \
+ return ret; \
} while (0)
-#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg) \
- do { \
- type param; \
- int ret, err; \
- dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
- BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
- BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
- memset(&param, 0, sizeof(param)); \
- ret = function(arg, &param); \
- err = copy_to_user(uarg, &param, sizeof(param)); \
- if (err) \
- return -EFAULT; \
- dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
- #function); \
- return ret; \
+#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg) \
+ do { \
+ type param; \
+ int ret, err; \
+ dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
+ BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
+ memset(&param, 0, sizeof(param)); \
+ ret = function(arg, &param); \
+ err = copy_to_user(uarg, &param, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, #function); \
+ return ret; \
} while (0)
-#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg) \
- do { \
- type param; \
- int ret, err; \
- dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
- BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE | _IOC_READ)); \
- BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
- err = copy_from_user(&param, uarg, sizeof(param)); \
- if (err) \
- return -EFAULT; \
- ret = function(arg, &param); \
- err = copy_to_user(uarg, &param, sizeof(param)); \
- if (err) \
- return -EFAULT; \
- dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
- #function); \
- return ret; \
+#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg) \
+ do { \
+ type param; \
+ int ret, err; \
+ dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
+ BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE | _IOC_READ)); \
+ BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
+ err = copy_from_user(&param, uarg, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ ret = function(arg, &param); \
+ err = copy_to_user(uarg, &param, sizeof(param)); \
+ if (err) \
+ return -EFAULT; \
+ dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, #function); \
+ return ret; \
} while (0)
-static int kbasep_ioctl_set_limited_core_count(struct kbase_context *kctx,
- struct kbase_ioctl_set_limited_core_count *set_limited_core_count)
+static int kbasep_ioctl_set_limited_core_count(
+ struct kbase_context *kctx,
+ struct kbase_ioctl_set_limited_core_count *set_limited_core_count)
{
- const u64 shader_core_mask =
- kbase_pm_get_present_cores(kctx->kbdev, KBASE_PM_CORE_SHADER);
- const u64 limited_core_mask =
- ((u64)1 << (set_limited_core_count->max_core_count)) - 1;
+ const u64 shader_core_mask = kbase_pm_get_present_cores(kctx->kbdev, KBASE_PM_CORE_SHADER);
+ const u64 limited_core_mask = ((u64)1 << (set_limited_core_count->max_core_count)) - 1;
if ((shader_core_mask & limited_core_mask) == 0) {
/* At least one shader core must be available after applying the mask */
@@ -1878,38 +1845,31 @@ static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsign
/* Only these ioctls are available until setup is complete */
switch (cmd) {
case KBASE_IOCTL_VERSION_CHECK:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
- kbase_api_handshake,
- struct kbase_ioctl_version_check,
- kfile);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK, kbase_api_handshake,
+ struct kbase_ioctl_version_check, kfile);
break;
case KBASE_IOCTL_VERSION_CHECK_RESERVED:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK_RESERVED,
- kbase_api_handshake_dummy,
- struct kbase_ioctl_version_check,
- kfile);
+ kbase_api_handshake_dummy,
+ struct kbase_ioctl_version_check, kfile);
break;
case KBASE_IOCTL_SET_FLAGS:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
- kbase_api_set_flags,
- struct kbase_ioctl_set_flags,
- kfile);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS, kbase_api_set_flags,
+ struct kbase_ioctl_set_flags, kfile);
break;
case KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO:
- KBASE_HANDLE_IOCTL_INOUT(
- KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO,
- kbase_api_kinstr_prfcnt_enum_info,
- struct kbase_ioctl_kinstr_prfcnt_enum_info, kfile);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO,
+ kbase_api_kinstr_prfcnt_enum_info,
+ struct kbase_ioctl_kinstr_prfcnt_enum_info, kfile);
break;
case KBASE_IOCTL_KINSTR_PRFCNT_SETUP:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_PRFCNT_SETUP,
kbase_api_kinstr_prfcnt_setup,
- union kbase_ioctl_kinstr_prfcnt_setup,
- kfile);
+ union kbase_ioctl_kinstr_prfcnt_setup, kfile);
break;
case KBASE_IOCTL_GET_GPUPROPS:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS, kbase_api_get_gpuprops,
@@ -1925,24 +1885,18 @@ static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsign
switch (cmd) {
#if !MALI_USE_CSF
case KBASE_IOCTL_JOB_SUBMIT:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
- kbase_api_job_submit,
- struct kbase_ioctl_job_submit,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT, kbase_api_job_submit,
+ struct kbase_ioctl_job_submit, kctx);
break;
#endif /* !MALI_USE_CSF */
#if !MALI_USE_CSF
case KBASE_IOCTL_POST_TERM:
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
- kbase_api_post_term,
- kctx);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM, kbase_api_post_term, kctx);
break;
#endif /* !MALI_USE_CSF */
case KBASE_IOCTL_MEM_ALLOC:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
- kbase_api_mem_alloc,
- union kbase_ioctl_mem_alloc,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC, kbase_api_mem_alloc,
+ union kbase_ioctl_mem_alloc, kctx);
break;
#if MALI_USE_CSF
case KBASE_IOCTL_MEM_ALLOC_EX:
@@ -1951,229 +1905,159 @@ static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsign
break;
#endif
case KBASE_IOCTL_MEM_QUERY:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
- kbase_api_mem_query,
- union kbase_ioctl_mem_query,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY, kbase_api_mem_query,
+ union kbase_ioctl_mem_query, kctx);
break;
case KBASE_IOCTL_MEM_FREE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
- kbase_api_mem_free,
- struct kbase_ioctl_mem_free,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE, kbase_api_mem_free,
+ struct kbase_ioctl_mem_free, kctx);
break;
case KBASE_IOCTL_DISJOINT_QUERY:
- KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
- kbase_api_disjoint_query,
- struct kbase_ioctl_disjoint_query,
- kctx);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY, kbase_api_disjoint_query,
+ struct kbase_ioctl_disjoint_query, kctx);
break;
case KBASE_IOCTL_GET_DDK_VERSION:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
- kbase_api_get_ddk_version,
- struct kbase_ioctl_get_ddk_version,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION, kbase_api_get_ddk_version,
+ struct kbase_ioctl_get_ddk_version, kctx);
break;
case KBASE_IOCTL_MEM_JIT_INIT:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
- kbase_api_mem_jit_init,
- struct kbase_ioctl_mem_jit_init,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT, kbase_api_mem_jit_init,
+ struct kbase_ioctl_mem_jit_init, kctx);
break;
case KBASE_IOCTL_MEM_EXEC_INIT:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
- kbase_api_mem_exec_init,
- struct kbase_ioctl_mem_exec_init,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT, kbase_api_mem_exec_init,
+ struct kbase_ioctl_mem_exec_init, kctx);
break;
case KBASE_IOCTL_MEM_SYNC:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
- kbase_api_mem_sync,
- struct kbase_ioctl_mem_sync,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC, kbase_api_mem_sync,
+ struct kbase_ioctl_mem_sync, kctx);
break;
case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
- kbase_api_mem_find_cpu_offset,
- union kbase_ioctl_mem_find_cpu_offset,
- kctx);
+ kbase_api_mem_find_cpu_offset,
+ union kbase_ioctl_mem_find_cpu_offset, kctx);
break;
case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
- kbase_api_mem_find_gpu_start_and_offset,
- union kbase_ioctl_mem_find_gpu_start_and_offset,
- kctx);
+ kbase_api_mem_find_gpu_start_and_offset,
+ union kbase_ioctl_mem_find_gpu_start_and_offset, kctx);
break;
case KBASE_IOCTL_GET_CONTEXT_ID:
- KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
- kbase_api_get_context_id,
- struct kbase_ioctl_get_context_id,
- kctx);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID, kbase_api_get_context_id,
+ struct kbase_ioctl_get_context_id, kctx);
break;
case KBASE_IOCTL_TLSTREAM_ACQUIRE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
- kbase_api_tlstream_acquire,
- struct kbase_ioctl_tlstream_acquire,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE, kbase_api_tlstream_acquire,
+ struct kbase_ioctl_tlstream_acquire, kctx);
break;
case KBASE_IOCTL_TLSTREAM_FLUSH:
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
- kbase_api_tlstream_flush,
- kctx);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH, kbase_api_tlstream_flush, kctx);
break;
case KBASE_IOCTL_MEM_COMMIT:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
- kbase_api_mem_commit,
- struct kbase_ioctl_mem_commit,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT, kbase_api_mem_commit,
+ struct kbase_ioctl_mem_commit, kctx);
break;
case KBASE_IOCTL_MEM_ALIAS:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
- kbase_api_mem_alias,
- union kbase_ioctl_mem_alias,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS, kbase_api_mem_alias,
+ union kbase_ioctl_mem_alias, kctx);
break;
case KBASE_IOCTL_MEM_IMPORT:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
- kbase_api_mem_import,
- union kbase_ioctl_mem_import,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT, kbase_api_mem_import,
+ union kbase_ioctl_mem_import, kctx);
break;
case KBASE_IOCTL_MEM_FLAGS_CHANGE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
- kbase_api_mem_flags_change,
- struct kbase_ioctl_mem_flags_change,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE, kbase_api_mem_flags_change,
+ struct kbase_ioctl_mem_flags_change, kctx);
break;
case KBASE_IOCTL_STREAM_CREATE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
- kbase_api_stream_create,
- struct kbase_ioctl_stream_create,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE, kbase_api_stream_create,
+ struct kbase_ioctl_stream_create, kctx);
break;
case KBASE_IOCTL_FENCE_VALIDATE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
- kbase_api_fence_validate,
- struct kbase_ioctl_fence_validate,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE, kbase_api_fence_validate,
+ struct kbase_ioctl_fence_validate, kctx);
break;
case KBASE_IOCTL_MEM_PROFILE_ADD:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
- kbase_api_mem_profile_add,
- struct kbase_ioctl_mem_profile_add,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD, kbase_api_mem_profile_add,
+ struct kbase_ioctl_mem_profile_add, kctx);
break;
#if !MALI_USE_CSF
case KBASE_IOCTL_SOFT_EVENT_UPDATE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
- kbase_api_soft_event_update,
- struct kbase_ioctl_soft_event_update,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE, kbase_api_soft_event_update,
+ struct kbase_ioctl_soft_event_update, kctx);
break;
#endif /* !MALI_USE_CSF */
case KBASE_IOCTL_STICKY_RESOURCE_MAP:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
- kbase_api_sticky_resource_map,
- struct kbase_ioctl_sticky_resource_map,
- kctx);
+ kbase_api_sticky_resource_map,
+ struct kbase_ioctl_sticky_resource_map, kctx);
break;
case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
- kbase_api_sticky_resource_unmap,
- struct kbase_ioctl_sticky_resource_unmap,
- kctx);
+ kbase_api_sticky_resource_unmap,
+ struct kbase_ioctl_sticky_resource_unmap, kctx);
break;
- /* Instrumentation. */
+ /* Instrumentation. */
#if !MALI_USE_CSF
case KBASE_IOCTL_KINSTR_JM_FD:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_JM_FD,
- kbase_api_kinstr_jm_fd,
- union kbase_kinstr_jm_fd,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_JM_FD, kbase_api_kinstr_jm_fd,
+ union kbase_kinstr_jm_fd, kctx);
break;
#endif
- case KBASE_IOCTL_HWCNT_READER_SETUP:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
- kbase_api_hwcnt_reader_setup,
- struct kbase_ioctl_hwcnt_reader_setup,
- kctx);
- break;
case KBASE_IOCTL_GET_CPU_GPU_TIMEINFO:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_GET_CPU_GPU_TIMEINFO,
- kbase_api_get_cpu_gpu_timeinfo,
- union kbase_ioctl_get_cpu_gpu_timeinfo,
- kctx);
+ kbase_api_get_cpu_gpu_timeinfo,
+ union kbase_ioctl_get_cpu_gpu_timeinfo, kctx);
break;
#if IS_ENABLED(CONFIG_MALI_NO_MALI)
case KBASE_IOCTL_HWCNT_SET:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
- kbase_api_hwcnt_set,
- struct kbase_ioctl_hwcnt_values,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET, kbase_api_hwcnt_set,
+ struct kbase_ioctl_hwcnt_values, kctx);
break;
#endif /* CONFIG_MALI_NO_MALI */
#ifdef CONFIG_MALI_CINSTR_GWT
case KBASE_IOCTL_CINSTR_GWT_START:
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
- kbase_gpu_gwt_start,
- kctx);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START, kbase_gpu_gwt_start, kctx);
break;
case KBASE_IOCTL_CINSTR_GWT_STOP:
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
- kbase_gpu_gwt_stop,
- kctx);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP, kbase_gpu_gwt_stop, kctx);
break;
case KBASE_IOCTL_CINSTR_GWT_DUMP:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
- kbase_gpu_gwt_dump,
- union kbase_ioctl_cinstr_gwt_dump,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP, kbase_gpu_gwt_dump,
+ union kbase_ioctl_cinstr_gwt_dump, kctx);
break;
#endif
#if MALI_USE_CSF
case KBASE_IOCTL_CS_EVENT_SIGNAL:
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_CS_EVENT_SIGNAL,
- kbasep_cs_event_signal,
- kctx);
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_CS_EVENT_SIGNAL, kbasep_cs_event_signal, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_REGISTER:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER,
- kbasep_cs_queue_register,
- struct kbase_ioctl_cs_queue_register,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER, kbasep_cs_queue_register,
+ struct kbase_ioctl_cs_queue_register, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_REGISTER_EX:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER_EX,
- kbasep_cs_queue_register_ex,
- struct kbase_ioctl_cs_queue_register_ex,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER_EX, kbasep_cs_queue_register_ex,
+ struct kbase_ioctl_cs_queue_register_ex, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_TERMINATE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_TERMINATE,
- kbasep_cs_queue_terminate,
- struct kbase_ioctl_cs_queue_terminate,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_TERMINATE, kbasep_cs_queue_terminate,
+ struct kbase_ioctl_cs_queue_terminate, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_BIND:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_BIND,
- kbasep_cs_queue_bind,
- union kbase_ioctl_cs_queue_bind,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_BIND, kbasep_cs_queue_bind,
+ union kbase_ioctl_cs_queue_bind, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_KICK:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_KICK,
- kbasep_cs_queue_kick,
- struct kbase_ioctl_cs_queue_kick,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_KICK, kbasep_cs_queue_kick,
+ struct kbase_ioctl_cs_queue_kick, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6:
- KBASE_HANDLE_IOCTL_INOUT(
- KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6,
- kbasep_cs_queue_group_create_1_6,
- union kbase_ioctl_cs_queue_group_create_1_6, kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6,
+ kbasep_cs_queue_group_create_1_6,
+ union kbase_ioctl_cs_queue_group_create_1_6, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18,
@@ -2182,39 +2066,29 @@ static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsign
break;
case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_GROUP_CREATE,
- kbasep_cs_queue_group_create,
- union kbase_ioctl_cs_queue_group_create,
- kctx);
+ kbasep_cs_queue_group_create,
+ union kbase_ioctl_cs_queue_group_create, kctx);
break;
case KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE,
- kbasep_cs_queue_group_terminate,
- struct kbase_ioctl_cs_queue_group_term,
- kctx);
+ kbasep_cs_queue_group_terminate,
+ struct kbase_ioctl_cs_queue_group_term, kctx);
break;
case KBASE_IOCTL_KCPU_QUEUE_CREATE:
- KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_KCPU_QUEUE_CREATE,
- kbasep_kcpu_queue_new,
- struct kbase_ioctl_kcpu_queue_new,
- kctx);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_KCPU_QUEUE_CREATE, kbasep_kcpu_queue_new,
+ struct kbase_ioctl_kcpu_queue_new, kctx);
break;
case KBASE_IOCTL_KCPU_QUEUE_DELETE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_DELETE,
- kbasep_kcpu_queue_delete,
- struct kbase_ioctl_kcpu_queue_delete,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_DELETE, kbasep_kcpu_queue_delete,
+ struct kbase_ioctl_kcpu_queue_delete, kctx);
break;
case KBASE_IOCTL_KCPU_QUEUE_ENQUEUE:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_ENQUEUE,
- kbasep_kcpu_queue_enqueue,
- struct kbase_ioctl_kcpu_queue_enqueue,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_ENQUEUE, kbasep_kcpu_queue_enqueue,
+ struct kbase_ioctl_kcpu_queue_enqueue, kctx);
break;
case KBASE_IOCTL_CS_TILER_HEAP_INIT:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT,
- kbasep_cs_tiler_heap_init,
- union kbase_ioctl_cs_tiler_heap_init,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT, kbasep_cs_tiler_heap_init,
+ union kbase_ioctl_cs_tiler_heap_init, kctx);
break;
case KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13,
@@ -2222,22 +2096,16 @@ static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsign
union kbase_ioctl_cs_tiler_heap_init_1_13, kctx);
break;
case KBASE_IOCTL_CS_TILER_HEAP_TERM:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_TILER_HEAP_TERM,
- kbasep_cs_tiler_heap_term,
- struct kbase_ioctl_cs_tiler_heap_term,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_TILER_HEAP_TERM, kbasep_cs_tiler_heap_term,
+ struct kbase_ioctl_cs_tiler_heap_term, kctx);
break;
case KBASE_IOCTL_CS_GET_GLB_IFACE:
- KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_GET_GLB_IFACE,
- kbase_ioctl_cs_get_glb_iface,
- union kbase_ioctl_cs_get_glb_iface,
- kctx);
+ KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_GET_GLB_IFACE, kbase_ioctl_cs_get_glb_iface,
+ union kbase_ioctl_cs_get_glb_iface, kctx);
break;
case KBASE_IOCTL_CS_CPU_QUEUE_DUMP:
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_CPU_QUEUE_DUMP,
- kbasep_ioctl_cs_cpu_queue_dump,
- struct kbase_ioctl_cs_cpu_queue_info,
- kctx);
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_CPU_QUEUE_DUMP, kbasep_ioctl_cs_cpu_queue_dump,
+ struct kbase_ioctl_cs_cpu_queue_info, kctx);
break;
/* This IOCTL will be kept for backward compatibility */
case KBASE_IOCTL_READ_USER_PAGE:
@@ -2247,23 +2115,19 @@ static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsign
#endif /* MALI_USE_CSF */
#if MALI_UNIT_TEST
case KBASE_IOCTL_TLSTREAM_STATS:
- KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
- kbase_api_tlstream_stats,
- struct kbase_ioctl_tlstream_stats,
- kctx);
+ KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS, kbase_api_tlstream_stats,
+ struct kbase_ioctl_tlstream_stats, kctx);
break;
#endif /* MALI_UNIT_TEST */
case KBASE_IOCTL_CONTEXT_PRIORITY_CHECK:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CONTEXT_PRIORITY_CHECK,
- kbasep_ioctl_context_priority_check,
- struct kbase_ioctl_context_priority_check,
- kctx);
+ kbasep_ioctl_context_priority_check,
+ struct kbase_ioctl_context_priority_check, kctx);
break;
case KBASE_IOCTL_SET_LIMITED_CORE_COUNT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_LIMITED_CORE_COUNT,
- kbasep_ioctl_set_limited_core_count,
- struct kbase_ioctl_set_limited_core_count,
- kctx);
+ kbasep_ioctl_set_limited_core_count,
+ struct kbase_ioctl_set_limited_core_count, kctx);
break;
}
@@ -2291,12 +2155,13 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
{
struct kbase_file *const kfile = filp->private_data;
struct kbase_context *kctx;
- struct base_csf_notification event_data = {
- .type = BASE_CSF_NOTIFICATION_EVENT };
+ struct base_csf_notification event_data = { .type = BASE_CSF_NOTIFICATION_EVENT };
const size_t data_size = sizeof(event_data);
bool read_event = false, read_error = false;
ssize_t err = 0;
+ CSTD_UNUSED(f_pos);
+
if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
return -EPERM;
@@ -2317,8 +2182,7 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
read_error = kbase_csf_event_read_error(kctx, &event_data);
if (!read_event && !read_error) {
- bool dump = kbase_csf_cpu_queue_read_dump_req(kctx,
- &event_data);
+ bool dump = kbase_csf_cpu_queue_read_dump_req(kctx, &event_data);
/* This condition is not treated as an error.
* It is possible that event handling thread was woken up due
* to a fault/error that occurred for a queue group, but before
@@ -2326,13 +2190,11 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
* queue group was already terminated by the userspace.
*/
if (!dump)
- dev_dbg(kctx->kbdev->dev,
- "Neither event nor error signaled");
+ dev_dbg(kctx->kbdev->dev, "Neither event nor error signaled");
}
if (copy_to_user(buf, &event_data, data_size) != 0) {
- dev_warn(kctx->kbdev->dev,
- "Failed to copy data\n");
+ dev_warn(kctx->kbdev->dev, "Failed to copy data\n");
err = -EFAULT;
goto out;
}
@@ -2342,7 +2204,7 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
out:
kbase_file_dec_fops_count(kfile);
- return err ? err : data_size;
+ return err ? err : (ssize_t)data_size;
}
#else /* MALI_USE_CSF */
static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
@@ -2350,9 +2212,11 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
struct kbase_file *const kfile = filp->private_data;
struct kbase_context *kctx;
struct base_jd_event_v2 uevent;
- int out_count = 0;
+ size_t out_count = 0;
ssize_t err = 0;
+ CSTD_UNUSED(f_pos);
+
if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
return -EPERM;
@@ -2379,8 +2243,8 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
goto out;
}
- if (wait_event_interruptible(kctx->event_queue,
- kbase_event_pending(kctx)) != 0) {
+ if (wait_event_interruptible(kfile->event_queue,
+ kbase_event_pending(kctx)) != 0) {
err = -ERESTARTSYS;
goto out;
}
@@ -2405,7 +2269,7 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
out:
kbase_file_dec_fops_count(kfile);
- return err ? err : (out_count * sizeof(uevent));
+ return err ? err : (ssize_t)(out_count * sizeof(uevent));
}
#endif /* MALI_USE_CSF */
@@ -2434,7 +2298,7 @@ static __poll_t kbase_poll(struct file *filp, poll_table *wait)
goto out;
}
- poll_wait(filp, &kctx->event_queue, wait);
+ poll_wait(filp, &kfile->event_queue, wait);
if (kbase_event_pending(kctx)) {
#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
ret = POLLIN | POLLRDNORM;
@@ -2451,35 +2315,36 @@ out:
void kbase_event_wakeup(struct kbase_context *kctx)
{
KBASE_DEBUG_ASSERT(kctx);
- dev_dbg(kctx->kbdev->dev, "Waking event queue for context %pK\n",
- (void *)kctx);
- wake_up_interruptible(&kctx->event_queue);
+ dev_dbg(kctx->kbdev->dev, "Waking event queue for context %pK\n", (void *)kctx);
+#ifdef CONFIG_MALI_DEBUG
+ if (WARN_ON_ONCE(!kctx->kfile))
+ return;
+#endif
+ wake_up_interruptible(&kctx->kfile->event_queue);
}
KBASE_EXPORT_TEST_API(kbase_event_wakeup);
#if MALI_USE_CSF
-int kbase_event_pending(struct kbase_context *ctx)
+int kbase_event_pending(struct kbase_context *kctx)
{
- KBASE_DEBUG_ASSERT(ctx);
+ KBASE_DEBUG_ASSERT(kctx);
- if (unlikely(!ctx))
+ if (unlikely(!kctx))
return -EPERM;
- return (atomic_read(&ctx->event_count) != 0) ||
- kbase_csf_event_error_pending(ctx) ||
- kbase_csf_cpu_queue_dump_needed(ctx);
+ return (atomic_read(&kctx->event_count) != 0) || kbase_csf_event_error_pending(kctx) ||
+ kbase_csf_cpu_queue_dump_needed(kctx);
}
#else
-int kbase_event_pending(struct kbase_context *ctx)
+int kbase_event_pending(struct kbase_context *kctx)
{
- KBASE_DEBUG_ASSERT(ctx);
+ KBASE_DEBUG_ASSERT(kctx);
- if (unlikely(!ctx))
+ if (unlikely(!kctx))
return -EPERM;
- return (atomic_read(&ctx->event_count) != 0) ||
- (atomic_read(&ctx->event_closed) != 0);
+ return (atomic_read(&kctx->event_count) != 0) || (atomic_read(&kctx->event_closed) != 0);
}
#endif
@@ -2515,9 +2380,9 @@ static int kbase_check_flags(int flags)
return 0;
}
-static unsigned long kbase_get_unmapped_area(struct file *const filp,
- const unsigned long addr, const unsigned long len,
- const unsigned long pgoff, const unsigned long flags)
+static unsigned long kbase_get_unmapped_area(struct file *const filp, const unsigned long addr,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
{
struct kbase_file *const kfile = filp->private_data;
struct kbase_context *kctx;
@@ -2568,10 +2433,12 @@ static ssize_t power_policy_show(struct device *dev, struct device_attribute *at
struct kbase_device *kbdev;
const struct kbase_pm_policy *current_policy;
const struct kbase_pm_policy *const *policy_list;
- int policy_count;
- int i;
+ uint policy_count;
+ uint i;
ssize_t ret = 0;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
@@ -2579,17 +2446,19 @@ static ssize_t power_policy_show(struct device *dev, struct device_attribute *at
current_policy = kbase_pm_get_policy(kbdev);
- policy_count = kbase_pm_list_policies(kbdev, &policy_list);
+ policy_count = (uint)kbase_pm_list_policies(kbdev, &policy_list);
- for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+ for (i = 0; i < policy_count && ret < (ssize_t)PAGE_SIZE; i++) {
if (policy_list[i] == current_policy)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "[%s] ",
+ policy_list[i]->name);
else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "%s ",
+ policy_list[i]->name);
}
- if (ret < PAGE_SIZE - 1) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ if (ret < (ssize_t)PAGE_SIZE - 1) {
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "\n");
} else {
buf[PAGE_SIZE - 2] = '\n';
buf[PAGE_SIZE - 1] = '\0';
@@ -2614,20 +2483,23 @@ static ssize_t power_policy_show(struct device *dev, struct device_attribute *at
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t power_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t power_policy_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *kbdev;
const struct kbase_pm_policy *new_policy = NULL;
const struct kbase_pm_policy *const *policy_list;
- int policy_count;
- int i;
+ uint policy_count;
+ uint i;
+
+ CSTD_UNUSED(attr);
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- policy_count = kbase_pm_list_policies(kbdev, &policy_list);
+ policy_count = (uint)kbase_pm_list_policies(kbdev, &policy_list);
for (i = 0; i < policy_count; i++) {
if (sysfs_streq(policy_list[i]->name, buf)) {
@@ -2643,7 +2515,7 @@ static ssize_t power_policy_store(struct device *dev, struct device_attribute *a
kbase_pm_set_policy(kbdev, new_policy);
- return count;
+ return (ssize_t)count;
}
/*
@@ -2666,12 +2538,14 @@ static DEVICE_ATTR_RW(power_policy);
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t core_mask_show(struct device *dev, struct device_attribute *attr, char * const buf)
+static ssize_t core_mask_show(struct device *dev, struct device_attribute *attr, char *const buf)
{
struct kbase_device *kbdev;
unsigned long flags;
ssize_t ret = 0;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
@@ -2680,30 +2554,23 @@ static ssize_t core_mask_show(struct device *dev, struct device_attribute *attr,
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
#if MALI_USE_CSF
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current debug core mask : 0x%llX\n",
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "Current debug core mask : 0x%llX\n",
kbdev->pm.debug_core_mask);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current desired core mask : 0x%llX\n",
- kbase_pm_ca_get_core_mask(kbdev));
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current in use core mask : 0x%llX\n",
- kbdev->pm.backend.shaders_avail);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret),
+ "Current desired core mask : 0x%llX\n", kbase_pm_ca_get_core_mask(kbdev));
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret),
+ "Current in use core mask : 0x%llX\n", kbdev->pm.backend.shaders_avail);
#else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current core mask (JS0) : 0x%llX\n",
- kbdev->pm.debug_core_mask[0]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current core mask (JS1) : 0x%llX\n",
- kbdev->pm.debug_core_mask[1]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Current core mask (JS2) : 0x%llX\n",
- kbdev->pm.debug_core_mask[2]);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "Current core mask (JS0) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[0]);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "Current core mask (JS1) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[1]);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "Current core mask (JS2) : 0x%llX\n",
+ kbdev->pm.debug_core_mask[2]);
#endif /* MALI_USE_CSF */
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "Available core mask : 0x%llX\n",
- kbdev->gpu_props.props.raw_props.shader_present);
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "Available core mask : 0x%llX\n",
+ kbdev->gpu_props.shader_present);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -2722,22 +2589,25 @@ static ssize_t core_mask_show(struct device *dev, struct device_attribute *attr,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct kbase_device *kbdev;
#if MALI_USE_CSF
u64 new_core_mask;
#else
u64 new_core_mask[3];
- u64 group0_core_mask;
+ u64 group_core_mask;
int i;
#endif /* MALI_USE_CSF */
int items;
- ssize_t err = count;
+ ssize_t err = (ssize_t)count;
unsigned long flags;
u64 shader_present;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
@@ -2747,21 +2617,19 @@ static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr
items = sscanf(buf, "%llx", &new_core_mask);
if (items != 1) {
- dev_err(kbdev->dev,
- "Couldn't process core mask write operation.\n"
- "Use format <core_mask>\n");
+ dev_err(kbdev->dev, "Couldn't process core mask write operation.\n"
+ "Use format <core_mask>\n");
err = -EINVAL;
goto end;
}
#else
- items = sscanf(buf, "%llx %llx %llx",
- &new_core_mask[0], &new_core_mask[1],
- &new_core_mask[2]);
+ items = sscanf(buf, "%llx %llx %llx", &new_core_mask[0], &new_core_mask[1],
+ &new_core_mask[2]);
if (items != 1 && items != 3) {
dev_err(kbdev->dev, "Couldn't process core mask write operation.\n"
- "Use format <core_mask>\n"
- "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
+ "Use format <core_mask>\n"
+ "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
err = -EINVAL;
goto end;
}
@@ -2773,7 +2641,7 @@ static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr
mutex_lock(&kbdev->pm.lock);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- shader_present = kbdev->gpu_props.props.raw_props.shader_present;
+ shader_present = kbdev->gpu_props.shader_present;
#if MALI_USE_CSF
if ((new_core_mask & shader_present) != new_core_mask) {
@@ -2783,12 +2651,10 @@ static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr
err = -EINVAL;
goto unlock;
- } else if (!(new_core_mask & shader_present &
- kbdev->pm.backend.ca_cores_enabled)) {
+ } else if (!(new_core_mask & shader_present & kbdev->pm.backend.ca_cores_enabled)) {
dev_err(dev,
"Invalid core mask 0x%llX: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
- new_core_mask,
- kbdev->gpu_props.props.raw_props.shader_present,
+ new_core_mask, kbdev->gpu_props.shader_present,
kbdev->pm.backend.ca_cores_enabled);
err = -EINVAL;
goto unlock;
@@ -2797,44 +2663,44 @@ static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr
if (kbdev->pm.debug_core_mask != new_core_mask)
kbase_pm_set_debug_core_mask(kbdev, new_core_mask);
#else
- group0_core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+ group_core_mask = kbdev->gpu_props.coherency_info.group.core_mask;
for (i = 0; i < 3; ++i) {
if ((new_core_mask[i] & shader_present) != new_core_mask[i]) {
- dev_err(dev, "Invalid core mask 0x%llX for JS %d: Includes non-existent cores (present = 0x%llX)",
- new_core_mask[i], i, shader_present);
+ dev_err(dev,
+ "Invalid core mask 0x%llX for JS %d: Includes non-existent cores (present = 0x%llX)",
+ new_core_mask[i], i, shader_present);
err = -EINVAL;
goto unlock;
- } else if (!(new_core_mask[i] & shader_present & kbdev->pm.backend.ca_cores_enabled)) {
- dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
- new_core_mask[i], i,
- kbdev->gpu_props.props.raw_props.shader_present,
- kbdev->pm.backend.ca_cores_enabled);
+ } else if (!(new_core_mask[i] & shader_present &
+ kbdev->pm.backend.ca_cores_enabled)) {
+ dev_err(dev,
+ "Invalid core mask 0x%llX for JS %d: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
+ new_core_mask[i], i, kbdev->gpu_props.shader_present,
+ kbdev->pm.backend.ca_cores_enabled);
err = -EINVAL;
goto unlock;
-
- } else if (!(new_core_mask[i] & group0_core_mask)) {
- dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with group 0 core mask 0x%llX\n",
- new_core_mask[i], i, group0_core_mask);
+ } else if (!(new_core_mask[i] & group_core_mask)) {
+ dev_err(dev,
+ "Invalid core mask 0x%llX for JS %d: No intersection with group 0 core mask 0x%llX\n",
+ new_core_mask[i], i, group_core_mask);
err = -EINVAL;
goto unlock;
} else if (!(new_core_mask[i] & kbdev->gpu_props.curr_config.shader_present)) {
- dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with current core mask 0x%llX\n",
- new_core_mask[i], i, kbdev->gpu_props.curr_config.shader_present);
+ dev_err(dev,
+ "Invalid core mask 0x%llX for JS %d: No intersection with current core mask 0x%llX\n",
+ new_core_mask[i], i, kbdev->gpu_props.curr_config.shader_present);
err = -EINVAL;
goto unlock;
}
}
if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
- kbdev->pm.debug_core_mask[1] !=
- new_core_mask[1] ||
- kbdev->pm.debug_core_mask[2] !=
- new_core_mask[2]) {
-
- kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
- new_core_mask[1], new_core_mask[2]);
+ kbdev->pm.debug_core_mask[1] != new_core_mask[1] ||
+ kbdev->pm.debug_core_mask[2] != new_core_mask[2]) {
+ kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0], new_core_mask[1],
+ new_core_mask[2]);
}
#endif /* MALI_USE_CSF */
@@ -2872,25 +2738,24 @@ static DEVICE_ATTR_RW(core_mask);
*
* Return: count if the function succeeded. An error code on failure.
*/
-static ssize_t soft_job_timeout_store(struct device *dev,
- struct device_attribute *attr,
+static ssize_t soft_job_timeout_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct kbase_device *kbdev;
int soft_job_timeout_ms;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
- (soft_job_timeout_ms <= 0))
+ if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) || (soft_job_timeout_ms <= 0))
return -EINVAL;
- atomic_set(&kbdev->js_data.soft_job_timeout_ms,
- soft_job_timeout_ms);
+ atomic_set(&kbdev->js_data.soft_job_timeout_ms, soft_job_timeout_ms);
- return count;
+ return (ssize_t)count;
}
/**
@@ -2905,34 +2770,34 @@ static ssize_t soft_job_timeout_store(struct device *dev,
*
* Return: The number of bytes output to buf.
*/
-static ssize_t soft_job_timeout_show(struct device *dev,
- struct device_attribute *attr,
- char * const buf)
+static ssize_t soft_job_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- return scnprintf(buf, PAGE_SIZE, "%i\n",
- atomic_read(&kbdev->js_data.soft_job_timeout_ms));
+ return scnprintf(buf, PAGE_SIZE, "%i\n", atomic_read(&kbdev->js_data.soft_job_timeout_ms));
}
static DEVICE_ATTR_RW(soft_job_timeout);
-static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
- int default_ticks, u32 old_ticks)
+static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms, int default_ticks,
+ u32 old_ticks)
{
if (timeout_ms > 0) {
- u64 ticks = timeout_ms * 1000000ULL;
+ u64 ticks = (u64)timeout_ms * 1000000ULL;
do_div(ticks, kbdev->js_data.scheduling_period_ns);
if (!ticks)
return 1;
return ticks;
} else if (timeout_ms < 0) {
- return default_ticks;
+ return (u32)default_ticks;
} else {
return old_ticks;
}
@@ -2959,7 +2824,8 @@ static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t js_timeouts_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t js_timeouts_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct kbase_device *kbdev;
int items;
@@ -2972,15 +2838,16 @@ static ssize_t js_timeouts_store(struct device *dev, struct device_attribute *at
long js_reset_ms_cl;
long js_reset_ms_dumping;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
- &js_soft_stop_ms, &js_soft_stop_ms_cl,
- &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
- &js_hard_stop_ms_dumping, &js_reset_ms_ss,
- &js_reset_ms_cl, &js_reset_ms_dumping);
+ items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld", &js_soft_stop_ms,
+ &js_soft_stop_ms_cl, &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
+ &js_hard_stop_ms_dumping, &js_reset_ms_ss, &js_reset_ms_cl,
+ &js_reset_ms_dumping);
if (items == 8) {
struct kbasep_js_device_data *js_data = &kbdev->js_data;
@@ -2988,49 +2855,43 @@ static ssize_t js_timeouts_store(struct device *dev, struct device_attribute *at
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
-#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
- js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
- default, js_data->ticks_name); \
- dev_dbg(kbdev->dev, "Overriding " #ticks_name \
- " with %lu ticks (%lu ms)\n", \
- (unsigned long)js_data->ticks_name, \
- ms_name); \
+#define UPDATE_TIMEOUT(ticks_name, ms_name, default) \
+ do { \
+ js_data->ticks_name = \
+ timeout_ms_to_ticks(kbdev, ms_name, default, js_data->ticks_name); \
+ dev_dbg(kbdev->dev, "Overriding " #ticks_name " with %lu ticks (%lu ms)\n", \
+ (unsigned long)js_data->ticks_name, ms_name); \
} while (0)
- UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
- DEFAULT_JS_SOFT_STOP_TICKS);
+ UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms, DEFAULT_JS_SOFT_STOP_TICKS);
UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
- DEFAULT_JS_SOFT_STOP_TICKS_CL);
+ DEFAULT_JS_SOFT_STOP_TICKS_CL);
UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
- DEFAULT_JS_HARD_STOP_TICKS_SS);
+ DEFAULT_JS_HARD_STOP_TICKS_SS);
UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
- DEFAULT_JS_HARD_STOP_TICKS_CL);
- UPDATE_TIMEOUT(hard_stop_ticks_dumping,
- js_hard_stop_ms_dumping,
- DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
- UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
- DEFAULT_JS_RESET_TICKS_SS);
- UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
- DEFAULT_JS_RESET_TICKS_CL);
+ DEFAULT_JS_HARD_STOP_TICKS_CL);
+ UPDATE_TIMEOUT(hard_stop_ticks_dumping, js_hard_stop_ms_dumping,
+ DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
+ UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss, DEFAULT_JS_RESET_TICKS_SS);
+ UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl, DEFAULT_JS_RESET_TICKS_CL);
UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
- DEFAULT_JS_RESET_TICKS_DUMPING);
+ DEFAULT_JS_RESET_TICKS_DUMPING);
kbase_js_set_timeouts(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- return count;
+ return (ssize_t)count;
}
- dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
- "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
- "Write 0 for no change, -1 to restore default timeout\n");
+ dev_err(kbdev->dev,
+ "Couldn't process js_timeouts write operation.\n"
+ "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
+ "Write 0 for no change, -1 to restore default timeout\n");
return -EINVAL;
}
-static unsigned long get_js_timeout_in_ms(
- u32 scheduling_period_ns,
- u32 ticks)
+static unsigned long get_js_timeout_in_ms(u32 scheduling_period_ns, u32 ticks)
{
u64 ms = (u64)ticks * scheduling_period_ns;
@@ -3052,7 +2913,7 @@ static unsigned long get_js_timeout_in_ms(
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *attr, char * const buf)
+static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *attr, char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
@@ -3066,15 +2927,15 @@ static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *att
unsigned long js_reset_ms_dumping;
u32 scheduling_period_ns;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
-#define GET_TIMEOUT(name) get_js_timeout_in_ms(\
- scheduling_period_ns, \
- kbdev->js_data.name)
+#define GET_TIMEOUT(name) get_js_timeout_in_ms(scheduling_period_ns, kbdev->js_data.name)
js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
@@ -3087,13 +2948,12 @@ static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *att
#undef GET_TIMEOUT
- ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
- js_soft_stop_ms, js_soft_stop_ms_cl,
- js_hard_stop_ms_ss, js_hard_stop_ms_cl,
- js_hard_stop_ms_dumping, js_reset_ms_ss,
- js_reset_ms_cl, js_reset_ms_dumping);
+ ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n", js_soft_stop_ms,
+ js_soft_stop_ms_cl, js_hard_stop_ms_ss, js_hard_stop_ms_cl,
+ js_hard_stop_ms_dumping, js_reset_ms_ss, js_reset_ms_cl,
+ js_reset_ms_dumping);
- if (ret >= PAGE_SIZE) {
+ if (ret >= (ssize_t)PAGE_SIZE) {
buf[PAGE_SIZE - 2] = '\n';
buf[PAGE_SIZE - 1] = '\0';
ret = PAGE_SIZE - 1;
@@ -3117,15 +2977,12 @@ static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *att
*/
static DEVICE_ATTR_RW(js_timeouts);
-static u32 get_new_js_timeout(
- u32 old_period,
- u32 old_ticks,
- u32 new_scheduling_period_ns)
+static u32 get_new_js_timeout(u32 old_period, u32 old_ticks, u32 new_scheduling_period_ns)
{
u64 ticks = (u64)old_period * (u64)old_ticks;
do_div(ticks, new_scheduling_period_ns);
- return ticks?ticks:1;
+ return ticks ? ticks : 1;
}
/**
@@ -3142,8 +2999,8 @@ static u32 get_new_js_timeout(
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t js_scheduling_period_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t js_scheduling_period_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *kbdev;
int ret;
@@ -3153,6 +3010,8 @@ static ssize_t js_scheduling_period_store(struct device *dev,
struct kbasep_js_device_data *js_data;
unsigned long flags;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3162,7 +3021,7 @@ static ssize_t js_scheduling_period_store(struct device *dev,
ret = kstrtouint(buf, 0, &js_scheduling_period);
if (ret || !js_scheduling_period) {
dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
- "Use format <js_scheduling_period_ms>\n");
+ "Use format <js_scheduling_period_ms>\n");
return -EINVAL;
}
@@ -3181,10 +3040,8 @@ static ssize_t js_scheduling_period_store(struct device *dev,
old_period = js_data->scheduling_period_ns;
#define SET_TIMEOUT(name) \
- (js_data->name = get_new_js_timeout(\
- old_period, \
- kbdev->js_data.name, \
- new_scheduling_period_ns))
+ (js_data->name = \
+ get_new_js_timeout(old_period, kbdev->js_data.name, new_scheduling_period_ns))
SET_TIMEOUT(soft_stop_ticks);
SET_TIMEOUT(soft_stop_ticks_cl);
@@ -3204,10 +3061,9 @@ static ssize_t js_scheduling_period_store(struct device *dev,
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&js_data->runpool_mutex);
- dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
- js_scheduling_period);
+ dev_dbg(kbdev->dev, "JS scheduling period: %dms\n", js_scheduling_period);
- return count;
+ return (ssize_t)count;
}
/**
@@ -3222,36 +3078,38 @@ static ssize_t js_scheduling_period_store(struct device *dev,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t js_scheduling_period_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t js_scheduling_period_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
u32 period;
ssize_t ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
period = kbdev->js_data.scheduling_period_ns;
- ret = scnprintf(buf, PAGE_SIZE, "%d\n",
- period / 1000000);
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", period / 1000000);
return ret;
}
static DEVICE_ATTR_RW(js_scheduling_period);
-
#ifdef CONFIG_MALI_DEBUG
-static ssize_t js_softstop_always_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t js_softstop_always_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *kbdev;
int ret;
int softstop_always;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3259,30 +3117,31 @@ static ssize_t js_softstop_always_store(struct device *dev,
ret = kstrtoint(buf, 0, &softstop_always);
if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
- "Use format <soft_stop_always>\n");
+ "Use format <soft_stop_always>\n");
return -EINVAL;
}
- kbdev->js_data.softstop_always = (bool) softstop_always;
+ kbdev->js_data.softstop_always = (bool)softstop_always;
dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
- (kbdev->js_data.softstop_always) ?
- "Enabled" : "Disabled");
- return count;
+ (kbdev->js_data.softstop_always) ? "Enabled" : "Disabled");
+ return (ssize_t)count;
}
-static ssize_t js_softstop_always_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t js_softstop_always_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
- if (ret >= PAGE_SIZE) {
+ if (ret >= (ssize_t)PAGE_SIZE) {
buf[PAGE_SIZE - 2] = '\n';
buf[PAGE_SIZE - 1] = '\0';
ret = PAGE_SIZE - 1;
@@ -3322,12 +3181,10 @@ static void kbasep_ktrace_dump_wrapper(struct kbase_device *kbdev)
}
/* Debug commands supported by the driver */
-static const struct kbasep_debug_command debug_commands[] = {
- {
- .str = "dumptrace",
- .func = &kbasep_ktrace_dump_wrapper,
- }
-};
+static const struct kbasep_debug_command debug_commands[] = { {
+ .str = "dumptrace",
+ .func = &kbasep_ktrace_dump_wrapper,
+} };
/**
* debug_command_show - Show callback for the debug_command sysfs file.
@@ -3341,21 +3198,25 @@ static const struct kbasep_debug_command debug_commands[] = {
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t debug_command_show(struct device *dev, struct device_attribute *attr, char * const buf)
+static ssize_t debug_command_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
- int i;
+ size_t i;
ssize_t ret = 0;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
+ for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < (ssize_t)PAGE_SIZE; i++)
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "%s\n",
+ debug_commands[i].str);
- if (ret >= PAGE_SIZE) {
+ if (ret >= (ssize_t)PAGE_SIZE) {
buf[PAGE_SIZE - 2] = '\n';
buf[PAGE_SIZE - 1] = '\0';
ret = PAGE_SIZE - 1;
@@ -3379,11 +3240,14 @@ static ssize_t debug_command_show(struct device *dev, struct device_attribute *a
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t debug_command_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t debug_command_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *kbdev;
int i;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
@@ -3392,7 +3256,7 @@ static ssize_t debug_command_store(struct device *dev, struct device_attribute *
for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
if (sysfs_streq(debug_commands[i].str, buf)) {
debug_commands[i].func(kbdev);
- return count;
+ return (ssize_t)count;
}
}
@@ -3425,77 +3289,61 @@ static DEVICE_ATTR_RW(debug_command);
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t gpuinfo_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t gpuinfo_show(struct device *dev, struct device_attribute *attr, char *buf)
{
static const struct gpu_product_id_name {
unsigned int id;
char *name;
} gpu_product_id_names[] = {
- { .id = GPU_ID2_PRODUCT_TMIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G71" },
- { .id = GPU_ID2_PRODUCT_THEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G72" },
- { .id = GPU_ID2_PRODUCT_TSIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G51" },
- { .id = GPU_ID2_PRODUCT_TNOX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G76" },
- { .id = GPU_ID2_PRODUCT_TDVX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G31" },
- { .id = GPU_ID2_PRODUCT_TGOX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G52" },
- { .id = GPU_ID2_PRODUCT_TTRX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G77" },
- { .id = GPU_ID2_PRODUCT_TBEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G78" },
- { .id = GPU_ID2_PRODUCT_TBAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G78AE" },
- { .id = GPU_ID2_PRODUCT_LBEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G68" },
- { .id = GPU_ID2_PRODUCT_TNAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G57" },
- { .id = GPU_ID2_PRODUCT_TODX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G710" },
- { .id = GPU_ID2_PRODUCT_LODX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G610" },
- { .id = GPU_ID2_PRODUCT_TGRX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G510" },
- { .id = GPU_ID2_PRODUCT_TVAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G310" },
- { .id = GPU_ID2_PRODUCT_LTIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-G620" },
+ { .id = GPU_ID_PRODUCT_TMIX, .name = "Mali-G71" },
+ { .id = GPU_ID_PRODUCT_THEX, .name = "Mali-G72" },
+ { .id = GPU_ID_PRODUCT_TSIX, .name = "Mali-G51" },
+ { .id = GPU_ID_PRODUCT_TNOX, .name = "Mali-G76" },
+ { .id = GPU_ID_PRODUCT_TDVX, .name = "Mali-G31" },
+ { .id = GPU_ID_PRODUCT_TGOX, .name = "Mali-G52" },
+ { .id = GPU_ID_PRODUCT_TTRX, .name = "Mali-G77" },
+ { .id = GPU_ID_PRODUCT_TBEX, .name = "Mali-G78" },
+ { .id = GPU_ID_PRODUCT_TBAX, .name = "Mali-G78AE" },
+ { .id = GPU_ID_PRODUCT_LBEX, .name = "Mali-G68" },
+ { .id = GPU_ID_PRODUCT_TNAX, .name = "Mali-G57" },
+ { .id = GPU_ID_PRODUCT_TODX, .name = "Mali-G710" },
+ { .id = GPU_ID_PRODUCT_LODX, .name = "Mali-G610" },
+ { .id = GPU_ID_PRODUCT_TGRX, .name = "Mali-G510" },
+ { .id = GPU_ID_PRODUCT_TVAX, .name = "Mali-G310" },
+ { .id = GPU_ID_PRODUCT_LTUX, .name = "Mali-G615" },
+ { .id = GPU_ID_PRODUCT_LTIX, .name = "Mali-G620" },
+ { .id = GPU_ID_PRODUCT_TKRX, .name = "Mali-TKRX" },
+ { .id = GPU_ID_PRODUCT_LKRX, .name = "Mali-LKRX" },
};
const char *product_name = "(Unknown Mali GPU)";
struct kbase_device *kbdev;
- u32 gpu_id;
- unsigned int product_id, product_id_mask;
+ u32 product_id;
+ u32 product_model;
unsigned int i;
struct kbase_gpu_props *gpu_props;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
gpu_props = &kbdev->gpu_props;
- gpu_id = gpu_props->props.raw_props.gpu_id;
- product_id = gpu_id >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
- product_id_mask = GPU_ID2_PRODUCT_MODEL >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ product_id = gpu_props->gpu_id.product_id;
+ product_model = gpu_props->gpu_id.product_model;
for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
const struct gpu_product_id_name *p = &gpu_product_id_names[i];
- if ((p->id & product_id_mask) ==
- (product_id & product_id_mask)) {
+ if (p->id == product_model) {
product_name = p->name;
break;
}
}
#if MALI_USE_CSF
- if ((product_id & product_id_mask) ==
- ((GPU_ID2_PRODUCT_TTUX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT) & product_id_mask)) {
- const bool rt_supported =
- GPU_FEATURES_RAY_TRACING_GET(gpu_props->props.raw_props.gpu_features);
+ if (product_model == GPU_ID_PRODUCT_TTUX) {
+ const bool rt_supported = gpu_props->gpu_features.ray_intersection;
const u8 nr_cores = gpu_props->num_cores;
/* Mali-G715-Immortalis if 10 < number of cores with ray tracing supproted.
@@ -3516,10 +3364,8 @@ static ssize_t gpuinfo_show(struct device *dev,
nr_cores);
}
- if ((product_id & product_id_mask) ==
- ((GPU_ID2_PRODUCT_TTIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT) & product_id_mask)) {
- const bool rt_supported =
- GPU_FEATURES_RAY_TRACING_GET(gpu_props->props.raw_props.gpu_features);
+ if (product_model == GPU_ID_PRODUCT_TTIX) {
+ const bool rt_supported = gpu_props->gpu_features.ray_intersection;
const u8 nr_cores = gpu_props->num_cores;
if ((nr_cores >= 10) && rt_supported)
@@ -3528,15 +3374,14 @@ static ssize_t gpuinfo_show(struct device *dev,
product_name = (nr_cores >= 6) ? "Mali-G720" : "Mali-G620";
dev_dbg(kbdev->dev, "GPU ID_Name: %s (ID: 0x%x), nr_cores(%u)\n", product_name,
- nr_cores, product_id & product_id_mask);
+ product_id, nr_cores);
}
+
#endif /* MALI_USE_CSF */
- return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n", product_name,
- kbdev->gpu_props.num_cores,
- (gpu_id & GPU_ID_VERSION_MAJOR) >> KBASE_GPU_ID_VERSION_MAJOR_SHIFT,
- (gpu_id & GPU_ID_VERSION_MINOR) >> KBASE_GPU_ID_VERSION_MINOR_SHIFT,
- product_id);
+ return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%08X\n", product_name,
+ kbdev->gpu_props.num_cores, gpu_props->gpu_id.version_major,
+ gpu_props->gpu_id.version_minor, product_id);
}
static DEVICE_ATTR_RO(gpuinfo);
@@ -3552,13 +3397,15 @@ static DEVICE_ATTR_RO(gpuinfo);
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t dvfs_period_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t dvfs_period_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct kbase_device *kbdev;
int ret;
int dvfs_period;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3566,14 +3413,14 @@ static ssize_t dvfs_period_store(struct device *dev,
ret = kstrtoint(buf, 0, &dvfs_period);
if (ret || dvfs_period <= 0) {
dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
- "Use format <dvfs_period_ms>\n");
+ "Use format <dvfs_period_ms>\n");
return -EINVAL;
}
- kbdev->pm.dvfs_period = dvfs_period;
+ kbdev->pm.dvfs_period = (u32)dvfs_period;
dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
- return count;
+ return (ssize_t)count;
}
/**
@@ -3587,12 +3434,13 @@ static ssize_t dvfs_period_store(struct device *dev,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t dvfs_period_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t dvfs_period_show(struct device *dev, struct device_attribute *attr, char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3649,7 +3497,7 @@ int kbase_pm_gpu_freq_init(struct kbase_device *kbdev)
dev_dbg(kbdev->dev, "Lowest frequency identified is %llu kHz", kbdev->lowest_gpu_freq_khz);
dev_dbg(kbdev->dev,
"Setting default highest frequency to %u kHz (pending devfreq initialization",
- kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
+ kbdev->gpu_props.gpu_freq_khz_max);
return 0;
}
@@ -3671,8 +3519,8 @@ int kbase_pm_gpu_freq_init(struct kbase_device *kbdev)
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t pm_poweroff_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pm_poweroff_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct kbase_device *kbdev;
struct kbasep_pm_tick_timer_state *stt;
@@ -3681,16 +3529,18 @@ static ssize_t pm_poweroff_store(struct device *dev,
unsigned int poweroff_shader_ticks, poweroff_gpu_ticks;
unsigned long flags;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
- &poweroff_shader_ticks,
- &poweroff_gpu_ticks);
+ items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time, &poweroff_shader_ticks,
+ &poweroff_gpu_ticks);
if (items != 3) {
- dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
- "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
+ dev_err(kbdev->dev,
+ "Couldn't process pm_poweroff write operation.\n"
+ "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
return -EINVAL;
}
@@ -3706,7 +3556,7 @@ static ssize_t pm_poweroff_store(struct device *dev,
if (poweroff_gpu_ticks != 0)
dev_warn(kbdev->dev, "Separate GPU poweroff delay no longer supported.\n");
- return count;
+ return (ssize_t)count;
}
/**
@@ -3720,14 +3570,15 @@ static ssize_t pm_poweroff_store(struct device *dev,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t pm_poweroff_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t pm_poweroff_show(struct device *dev, struct device_attribute *attr, char *const buf)
{
struct kbase_device *kbdev;
struct kbasep_pm_tick_timer_state *stt;
ssize_t ret;
unsigned long flags;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3735,8 +3586,7 @@ static ssize_t pm_poweroff_show(struct device *dev,
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
stt = &kbdev->pm.backend.shader_tick_timer;
- ret = scnprintf(buf, PAGE_SIZE, "%llu %u 0\n",
- ktime_to_ns(stt->configured_interval),
+ ret = scnprintf(buf, PAGE_SIZE, "%llu %u 0\n", ktime_to_ns(stt->configured_interval),
stt->default_ticks);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -3758,14 +3608,16 @@ static DEVICE_ATTR_RW(pm_poweroff);
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t reset_timeout_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t reset_timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *kbdev;
int ret;
u32 reset_timeout;
u32 default_reset_timeout;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3773,7 +3625,7 @@ static ssize_t reset_timeout_store(struct device *dev,
ret = kstrtou32(buf, 0, &reset_timeout);
if (ret || reset_timeout == 0) {
dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
- "Use format <reset_timeout_ms>\n");
+ "Use format <reset_timeout_ms>\n");
return -EINVAL;
}
@@ -3790,7 +3642,7 @@ static ssize_t reset_timeout_store(struct device *dev,
kbdev->reset_timeout_ms = reset_timeout;
dev_dbg(kbdev->dev, "Reset timeout: %ums\n", reset_timeout);
- return count;
+ return (ssize_t)count;
}
/**
@@ -3803,12 +3655,14 @@ static ssize_t reset_timeout_store(struct device *dev,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t reset_timeout_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t reset_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -3820,64 +3674,72 @@ static ssize_t reset_timeout_show(struct device *dev,
static DEVICE_ATTR_RW(reset_timeout);
-static ssize_t mem_pool_size_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t mem_pool_size_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
- kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_size);
+ return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE, kbdev->mem_pools.small,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_size);
}
-static ssize_t mem_pool_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t mem_pool_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
- int err;
+ ssize_t err;
+
+ CSTD_UNUSED(attr);
if (!kbdev)
return -ENODEV;
- err = kbase_debugfs_helper_set_attr_from_string(buf,
- kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_trim);
+ err = kbase_debugfs_helper_set_attr_from_string(buf, kbdev->mem_pools.small,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_trim);
- return err ? err : count;
+ return err ? err : (ssize_t)count;
}
static DEVICE_ATTR_RW(mem_pool_size);
-static ssize_t mem_pool_max_size_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t mem_pool_max_size_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
- kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_max_size);
+ return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE, kbdev->mem_pools.small,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_max_size);
}
-static ssize_t mem_pool_max_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t mem_pool_max_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
- int err;
+ ssize_t err;
+
+ CSTD_UNUSED(attr);
if (!kbdev)
return -ENODEV;
- err = kbase_debugfs_helper_set_attr_from_string(buf,
- kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_set_max_size);
+ err = kbase_debugfs_helper_set_attr_from_string(buf, kbdev->mem_pools.small,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_set_max_size);
- return err ? err : count;
+ return err ? err : (ssize_t)count;
}
static DEVICE_ATTR_RW(mem_pool_max_size);
@@ -3892,17 +3754,19 @@ static DEVICE_ATTR_RW(mem_pool_max_size);
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t lp_mem_pool_size_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t lp_mem_pool_size_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
- kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_size);
+ return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE, kbdev->mem_pools.large,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_size);
}
/**
@@ -3917,20 +3781,22 @@ static ssize_t lp_mem_pool_size_show(struct device *dev,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t lp_mem_pool_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t lp_mem_pool_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
- int err;
+ ssize_t err;
+
+ CSTD_UNUSED(attr);
if (!kbdev)
return -ENODEV;
- err = kbase_debugfs_helper_set_attr_from_string(buf,
- kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_trim);
+ err = kbase_debugfs_helper_set_attr_from_string(buf, kbdev->mem_pools.large,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_trim);
- return err ? err : count;
+ return err ? err : (ssize_t)count;
}
static DEVICE_ATTR_RW(lp_mem_pool_size);
@@ -3945,17 +3811,19 @@ static DEVICE_ATTR_RW(lp_mem_pool_size);
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t lp_mem_pool_max_size_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t lp_mem_pool_max_size_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
- kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_max_size);
+ return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE, kbdev->mem_pools.large,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_max_size);
}
/**
@@ -3969,70 +3837,76 @@ static ssize_t lp_mem_pool_max_size_show(struct device *dev,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t lp_mem_pool_max_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t lp_mem_pool_max_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
- int err;
+ ssize_t err;
+
+ CSTD_UNUSED(attr);
if (!kbdev)
return -ENODEV;
- err = kbase_debugfs_helper_set_attr_from_string(buf,
- kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_debugfs_set_max_size);
+ err = kbase_debugfs_helper_set_attr_from_string(buf, kbdev->mem_pools.large,
+ MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_debugfs_set_max_size);
- return err ? err : count;
+ return err ? err : (ssize_t)count;
}
static DEVICE_ATTR_RW(lp_mem_pool_max_size);
/**
* show_simplified_mem_pool_max_size - Show the maximum size for the memory
- * pool 0 of small (4KiB) pages.
+ * pool 0 of small (4KiB/16KiB/64KiB) pages.
* @dev: The device this sysfs file is for.
* @attr: The attributes of the sysfs file.
* @buf: The output buffer to receive the max size.
*
* This function is called to get the maximum size for the memory pool 0 of
- * small (4KiB) pages. It is assumed that the maximum size value is same for
+ * small pages. It is assumed that the maximum size value is same for
* all the pools.
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t show_simplified_mem_pool_max_size(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t show_simplified_mem_pool_max_size(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
- kbdev->mem_pools.small, 1, kbase_mem_pool_debugfs_max_size);
+ return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE, kbdev->mem_pools.small, 1,
+ kbase_mem_pool_debugfs_max_size);
}
/**
* set_simplified_mem_pool_max_size - Set the same maximum size for all the
- * memory pools of small (4KiB) pages.
+ * memory pools of small (4KiB/16KiB/64KiB) pages.
* @dev: The device with sysfs file is for
* @attr: The attributes of the sysfs file
* @buf: The value written to the sysfs file
* @count: The number of bytes written to the sysfs file
*
* This function is called to set the same maximum size for all the memory
- * pools of small (4KiB) pages.
+ * pools of small pages.
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t set_simplified_mem_pool_max_size(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_simplified_mem_pool_max_size(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
unsigned long new_size;
- int gid;
+ size_t gid;
int err;
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
@@ -4041,14 +3915,13 @@ static ssize_t set_simplified_mem_pool_max_size(struct device *dev,
return -EINVAL;
for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid)
- kbase_mem_pool_debugfs_set_max_size(
- kbdev->mem_pools.small, gid, (size_t)new_size);
+ kbase_mem_pool_debugfs_set_max_size(kbdev->mem_pools.small, gid, (size_t)new_size);
- return count;
+ return (ssize_t)count;
}
static DEVICE_ATTR(max_size, 0600, show_simplified_mem_pool_max_size,
- set_simplified_mem_pool_max_size);
+ set_simplified_mem_pool_max_size);
/**
* show_simplified_lp_mem_pool_max_size - Show the maximum size for the memory
@@ -4064,15 +3937,17 @@ static DEVICE_ATTR(max_size, 0600, show_simplified_mem_pool_max_size,
* Return: The number of bytes output to @buf.
*/
static ssize_t show_simplified_lp_mem_pool_max_size(struct device *dev,
- struct device_attribute *attr, char * const buf)
+ struct device_attribute *attr, char *const buf)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
- kbdev->mem_pools.large, 1, kbase_mem_pool_debugfs_max_size);
+ return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE, kbdev->mem_pools.large, 1,
+ kbase_mem_pool_debugfs_max_size);
}
/**
@@ -4089,13 +3964,16 @@ static ssize_t show_simplified_lp_mem_pool_max_size(struct device *dev,
* Return: The number of bytes output to @buf.
*/
static ssize_t set_simplified_lp_mem_pool_max_size(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct kbase_device *const kbdev = to_kbase_device(dev);
unsigned long new_size;
- int gid;
+ size_t gid;
int err;
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
@@ -4104,40 +3982,40 @@ static ssize_t set_simplified_lp_mem_pool_max_size(struct device *dev,
return -EINVAL;
for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid)
- kbase_mem_pool_debugfs_set_max_size(
- kbdev->mem_pools.large, gid, (size_t)new_size);
+ kbase_mem_pool_debugfs_set_max_size(kbdev->mem_pools.large, gid, (size_t)new_size);
- return count;
+ return (ssize_t)count;
}
static DEVICE_ATTR(lp_max_size, 0600, show_simplified_lp_mem_pool_max_size,
- set_simplified_lp_mem_pool_max_size);
+ set_simplified_lp_mem_pool_max_size);
/**
* show_simplified_ctx_default_max_size - Show the default maximum size for the
- * memory pool 0 of small (4KiB) pages.
+ * memory pool 0 of small (4KiB/16KiB/64KiB) pages.
* @dev: The device this sysfs file is for.
* @attr: The attributes of the sysfs file.
* @buf: The output buffer to receive the pool size.
*
* This function is called to get the default ctx maximum size for the memory
- * pool 0 of small (4KiB) pages. It is assumed that maximum size value is same
+ * pool 0 of small pages. It is assumed that maximum size value is same
* for all the pools. The maximum size for the pool of large (2MiB) pages will
- * be same as max size of the pool of small (4KiB) pages in terms of bytes.
+ * be same as max size of the pool of small pages in terms of bytes.
*
* Return: The number of bytes output to @buf.
*/
static ssize_t show_simplified_ctx_default_max_size(struct device *dev,
- struct device_attribute *attr, char * const buf)
+ struct device_attribute *attr, char *const buf)
{
struct kbase_device *kbdev = to_kbase_device(dev);
size_t max_size;
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- max_size = kbase_mem_pool_config_debugfs_max_size(
- kbdev->mem_pool_defaults.small, 0);
+ max_size = kbase_mem_pool_config_debugfs_max_size(kbdev->mem_pool_defaults.small, 0);
return scnprintf(buf, PAGE_SIZE, "%zu\n", max_size);
}
@@ -4160,12 +4038,15 @@ static ssize_t show_simplified_ctx_default_max_size(struct device *dev,
* Return: @count if the function succeeded. An error code on failure.
*/
static ssize_t set_simplified_ctx_default_max_size(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct kbase_device *kbdev;
unsigned long new_size;
int err;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -4174,15 +4055,13 @@ static ssize_t set_simplified_ctx_default_max_size(struct device *dev,
if (err)
return -EINVAL;
- kbase_mem_pool_group_config_set_max_size(
- &kbdev->mem_pool_defaults, (size_t)new_size);
+ kbase_mem_pool_group_config_set_max_size(&kbdev->mem_pool_defaults, (size_t)new_size);
- return count;
+ return (ssize_t)count;
}
-static DEVICE_ATTR(ctx_default_max_size, 0600,
- show_simplified_ctx_default_max_size,
- set_simplified_ctx_default_max_size);
+static DEVICE_ATTR(ctx_default_max_size, 0600, show_simplified_ctx_default_max_size,
+ set_simplified_ctx_default_max_size);
#if !MALI_USE_CSF
/**
@@ -4196,11 +4075,13 @@ static DEVICE_ATTR(ctx_default_max_size, 0600,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t js_ctx_scheduling_mode_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t js_ctx_scheduling_mode_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -4222,8 +4103,8 @@ static ssize_t js_ctx_scheduling_mode_show(struct device *dev,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t js_ctx_scheduling_mode_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t js_ctx_scheduling_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_context *kctx;
u32 new_js_ctx_scheduling_mode;
@@ -4231,6 +4112,8 @@ static ssize_t js_ctx_scheduling_mode_store(struct device *dev,
unsigned long flags;
int ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -4238,13 +4121,13 @@ static ssize_t js_ctx_scheduling_mode_store(struct device *dev,
ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
- " write operation.\n"
- "Use format <js_ctx_scheduling_mode>\n");
+ " write operation.\n"
+ "Use format <js_ctx_scheduling_mode>\n");
return -EINVAL;
}
if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode)
- return count;
+ return (ssize_t)count;
mutex_lock(&kbdev->kctx_list_lock);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
@@ -4261,7 +4144,7 @@ static ssize_t js_ctx_scheduling_mode_store(struct device *dev,
dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n", new_js_ctx_scheduling_mode);
- return count;
+ return (ssize_t)count;
}
static DEVICE_ATTR_RW(js_ctx_scheduling_mode);
@@ -4271,17 +4154,16 @@ static DEVICE_ATTR_RW(js_ctx_scheduling_mode);
/* Maximum string length in serialize_jobs_settings[].name */
#define MAX_SERIALIZE_JOBS_NAME_LEN 16
-static struct
-{
+static struct {
char *name;
u8 setting;
} serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
- {"none", 0},
- {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
- {"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
- {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
- {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
- KBASE_SERIALIZE_RESET}
+ { "none", 0 },
+ { "intra-slot", KBASE_SERIALIZE_INTRA_SLOT },
+ { "inter-slot", KBASE_SERIALIZE_INTER_SLOT },
+ { "full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT },
+ { "full-reset",
+ KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT | KBASE_SERIALIZE_RESET }
};
/**
@@ -4299,16 +4181,15 @@ static struct
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev,
- const char *buf, size_t count)
+static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev, const char *buf,
+ size_t count)
{
int i;
bool valid = false;
for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
- kbdev->serialize_jobs =
- serialize_jobs_settings[i].setting;
+ kbdev->serialize_jobs = serialize_jobs_settings[i].setting;
valid = true;
break;
}
@@ -4319,7 +4200,7 @@ static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev,
return -EINVAL;
}
- return count;
+ return (ssize_t)count;
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -4335,8 +4216,7 @@ static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev,
*
* Return: 0 on success, or an error code on error
*/
-static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile,
- void *data)
+static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile, void *data)
{
struct kbase_device *kbdev = sfile->private;
int i;
@@ -4345,11 +4225,9 @@ static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile,
for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
- seq_printf(sfile, "[%s] ",
- serialize_jobs_settings[i].name);
+ seq_printf(sfile, "[%s] ", serialize_jobs_settings[i].name);
else
- seq_printf(sfile, "%s ",
- serialize_jobs_settings[i].name);
+ seq_printf(sfile, "%s ", serialize_jobs_settings[i].name);
}
seq_puts(sfile, "\n");
@@ -4371,8 +4249,8 @@ static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
- const char __user *ubuf, size_t count, loff_t *ppos)
+static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct kbase_device *kbdev = s->private;
@@ -4397,11 +4275,9 @@ static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
*
* Return: Zero on success, error code on failure
*/
-static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
- struct file *file)
+static int kbasep_serialize_jobs_debugfs_open(struct inode *in, struct file *file)
{
- return single_open(file, kbasep_serialize_jobs_seq_debugfs_show,
- in->i_private);
+ return single_open(file, kbasep_serialize_jobs_seq_debugfs_show, in->i_private);
}
static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
@@ -4428,26 +4304,26 @@ static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t show_serialize_jobs_sysfs(struct device *dev,
- struct device_attribute *attr,
+static ssize_t show_serialize_jobs_sysfs(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct kbase_device *kbdev = to_kbase_device(dev);
ssize_t ret = 0;
int i;
+ CSTD_UNUSED(attr);
+
for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
- if (kbdev->serialize_jobs ==
- serialize_jobs_settings[i].setting)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s]",
+ if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "[%s]",
serialize_jobs_settings[i].name);
else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ",
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "%s ",
serialize_jobs_settings[i].name);
}
- if (ret < PAGE_SIZE - 1) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ if (ret < (ssize_t)(PAGE_SIZE - 1)) {
+ ret += scnprintf(buf + ret, (size_t)(PAGE_SIZE - ret), "\n");
} else {
buf[PAGE_SIZE - 2] = '\n';
buf[PAGE_SIZE - 1] = '\0';
@@ -4471,21 +4347,20 @@ static ssize_t show_serialize_jobs_sysfs(struct device *dev,
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t store_serialize_jobs_sysfs(struct device *dev,
- struct device_attribute *attr,
+static ssize_t store_serialize_jobs_sysfs(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ CSTD_UNUSED(attr);
return update_serialize_jobs_setting(to_kbase_device(dev), buf, count);
}
-static DEVICE_ATTR(serialize_jobs, 0600, show_serialize_jobs_sysfs,
- store_serialize_jobs_sysfs);
+static DEVICE_ATTR(serialize_jobs, 0600, show_serialize_jobs_sysfs, store_serialize_jobs_sysfs);
#endif /* !MALI_USE_CSF */
static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
{
- struct kbase_device *kbdev = container_of(data, struct kbase_device,
- protected_mode_hwcnt_disable_work);
+ struct kbase_device *kbdev =
+ container_of(data, struct kbase_device, protected_mode_hwcnt_disable_work);
spinlock_t *backend_lock;
unsigned long flags;
@@ -4498,8 +4373,7 @@ static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
#endif
spin_lock_irqsave(backend_lock, flags);
- do_disable = !kbdev->protected_mode_hwcnt_desired &&
- !kbdev->protected_mode_hwcnt_disabled;
+ do_disable = !kbdev->protected_mode_hwcnt_desired && !kbdev->protected_mode_hwcnt_disabled;
spin_unlock_irqrestore(backend_lock, flags);
if (!do_disable)
@@ -4508,8 +4382,7 @@ static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
spin_lock_irqsave(backend_lock, flags);
- do_disable = !kbdev->protected_mode_hwcnt_desired &&
- !kbdev->protected_mode_hwcnt_disabled;
+ do_disable = !kbdev->protected_mode_hwcnt_desired && !kbdev->protected_mode_hwcnt_disabled;
if (do_disable) {
/* Protected mode state did not change while we were doing the
@@ -4556,16 +4429,14 @@ static const struct protected_mode_ops kbasep_native_protected_ops = {
int kbase_protected_mode_init(struct kbase_device *kbdev)
{
/* Use native protected ops */
- kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
- GFP_KERNEL);
+ kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev), GFP_KERNEL);
if (!kbdev->protected_dev)
return -ENOMEM;
kbdev->protected_dev->data = kbdev;
kbdev->protected_ops = PLATFORM_PROTECTED_CALLBACKS;
INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
- kbasep_protected_mode_hwcnt_disable_worker);
+ kbasep_protected_mode_hwcnt_disable_worker);
kbdev->protected_mode_hwcnt_desired = true;
- kbdev->protected_mode_hwcnt_disabled = false;
return 0;
}
@@ -4580,7 +4451,7 @@ static int kbase_common_reg_map(struct kbase_device *kbdev)
{
return 0;
}
-static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+static void kbase_common_reg_unmap(struct kbase_device *const kbdev)
{
}
#else /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
@@ -4609,7 +4480,7 @@ out_region:
return err;
}
-static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+static void kbase_common_reg_unmap(struct kbase_device *const kbdev)
{
if (kbdev->reg) {
iounmap(kbdev->reg);
@@ -4621,7 +4492,7 @@ static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
}
#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
-int registers_map(struct kbase_device * const kbdev)
+int registers_map(struct kbase_device *const kbdev)
{
/* the first memory resource is the physical address of the GPU
* registers.
@@ -4641,11 +4512,11 @@ int registers_map(struct kbase_device * const kbdev)
#if MALI_USE_CSF
if (kbdev->reg_size <
- (CSF_HW_DOORBELL_PAGE_OFFSET +
- CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE)) {
- dev_err(kbdev->dev, "Insufficient register space, will override to the required size\n");
- kbdev->reg_size = CSF_HW_DOORBELL_PAGE_OFFSET +
- CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE;
+ (CSF_HW_DOORBELL_PAGE_OFFSET + CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE)) {
+ dev_err(kbdev->dev,
+ "Insufficient register space, will override to the required size\n");
+ kbdev->reg_size =
+ CSF_HW_DOORBELL_PAGE_OFFSET + CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE;
}
#endif
@@ -4679,13 +4550,11 @@ static bool kbase_is_pm_enabled(const struct device_node *gpu_node)
if (power_model_node)
is_pm_enable = true;
- cooling_cells_node = of_get_property(gpu_node,
- "#cooling-cells", NULL);
+ cooling_cells_node = of_get_property(gpu_node, "#cooling-cells", NULL);
if (cooling_cells_node)
is_pm_enable = true;
- operating_point_node = of_get_property(gpu_node,
- "operating-points", NULL);
+ operating_point_node = of_get_property(gpu_node, "operating-points", NULL);
if (operating_point_node)
is_pm_enable = true;
@@ -4708,9 +4577,7 @@ static bool kbase_is_full_coherency_enabled(const struct device_node *gpu_node)
const void *coherency_dts;
u32 coherency;
- coherency_dts = of_get_property(gpu_node,
- "system-coherency",
- NULL);
+ coherency_dts = of_get_property(gpu_node, "system-coherency", NULL);
if (coherency_dts) {
coherency = be32_to_cpup(coherency_dts);
if (coherency == COHERENCY_ACE)
@@ -4726,21 +4593,20 @@ int kbase_device_pm_init(struct kbase_device *kbdev)
int err = 0;
#if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
-
- u32 gpu_id;
- u32 product_id;
- u32 gpu_model_id;
+ u32 product_model;
if (kbase_is_pv_enabled(kbdev->dev->of_node)) {
dev_info(kbdev->dev, "Arbitration interface enabled\n");
if (kbase_is_pm_enabled(kbdev->dev->of_node)) {
/* Arbitration AND power management invalid */
- dev_err(kbdev->dev, "Invalid combination of arbitration AND power management\n");
+ dev_err(kbdev->dev,
+ "Invalid combination of arbitration AND power management\n");
return -EPERM;
}
if (kbase_is_full_coherency_enabled(kbdev->dev->of_node)) {
/* Arbitration AND full coherency invalid */
- dev_err(kbdev->dev, "Invalid combination of arbitration AND full coherency\n");
+ dev_err(kbdev->dev,
+ "Invalid combination of arbitration AND full coherency\n");
return -EPERM;
}
err = kbase_arbiter_pm_early_init(kbdev);
@@ -4749,15 +4615,14 @@ int kbase_device_pm_init(struct kbase_device *kbdev)
* supported GPU platform
*/
kbase_pm_register_access_enable(kbdev);
- gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
+ kbase_gpuprops_parse_gpu_id(&kbdev->gpu_props.gpu_id,
+ kbase_reg_get_gpu_id(kbdev));
kbase_pm_register_access_disable(kbdev);
- product_id =
- KBASE_UBFX32(gpu_id, KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT, 16);
- gpu_model_id = GPU_ID2_MODEL_MATCH_VALUE(product_id);
+ product_model = kbdev->gpu_props.gpu_id.product_model;
- if (gpu_model_id != GPU_ID2_PRODUCT_TGOX
- && gpu_model_id != GPU_ID2_PRODUCT_TNOX
- && gpu_model_id != GPU_ID2_PRODUCT_TBAX) {
+ if (product_model != GPU_ID_PRODUCT_TGOX &&
+ product_model != GPU_ID_PRODUCT_TNOX &&
+ product_model != GPU_ID_PRODUCT_TBAX) {
kbase_arbiter_pm_early_term(kbdev);
dev_err(kbdev->dev, "GPU platform not suitable for arbitration\n");
return -EPERM;
@@ -4804,9 +4669,7 @@ int power_control_init(struct kbase_device *kbdev)
int err = 0;
unsigned int i;
#if defined(CONFIG_REGULATOR)
- static const char * const regulator_names[] = {
- "mali", "shadercores"
- };
+ static const char *const regulator_names[] = { "mali", "shadercores" };
BUILD_BUG_ON(ARRAY_SIZE(regulator_names) < BASE_MAX_NR_CLOCKS_REGULATORS);
#endif /* CONFIG_REGULATOR */
@@ -4824,8 +4687,7 @@ int power_control_init(struct kbase_device *kbdev)
* operating with a partial initialization of regulators.
*/
for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
- kbdev->regulators[i] = regulator_get_optional(kbdev->dev,
- regulator_names[i]);
+ kbdev->regulators[i] = regulator_get_optional(kbdev->dev, regulator_names[i]);
if (IS_ERR(kbdev->regulators[i])) {
err = PTR_ERR(kbdev->regulators[i]);
kbdev->regulators[i] = NULL;
@@ -4853,7 +4715,7 @@ int power_control_init(struct kbase_device *kbdev)
* operating with a partial initialization of clocks.
*/
for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
- kbdev->clocks[i] = of_clk_get(kbdev->dev->of_node, i);
+ kbdev->clocks[i] = of_clk_get(kbdev->dev->of_node, (int)i);
if (IS_ERR(kbdev->clocks[i])) {
err = PTR_ERR(kbdev->clocks[i]);
kbdev->clocks[i] = NULL;
@@ -4862,9 +4724,7 @@ int power_control_init(struct kbase_device *kbdev)
err = clk_prepare_enable(kbdev->clocks[i]);
if (err) {
- dev_err(kbdev->dev,
- "Failed to prepare and enable clock (%d)\n",
- err);
+ dev_err(kbdev->dev, "Failed to prepare and enable clock (%d)\n", err);
clk_put(kbdev->clocks[i]);
break;
}
@@ -4895,12 +4755,11 @@ int power_control_init(struct kbase_device *kbdev)
err = kbdev->token;
goto regulators_probe_defer;
}
-
}
#elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
if (kbdev->nr_regulators > 0) {
- kbdev->opp_table = dev_pm_opp_set_regulators(kbdev->dev,
- regulator_names, BASE_MAX_NR_CLOCKS_REGULATORS);
+ kbdev->opp_table = dev_pm_opp_set_regulators(kbdev->dev, regulator_names,
+ BASE_MAX_NR_CLOCKS_REGULATORS);
if (IS_ERR(kbdev->opp_table)) {
err = PTR_ERR(kbdev->opp_table);
@@ -4914,7 +4773,7 @@ int power_control_init(struct kbase_device *kbdev)
#endif /* CONFIG_PM_OPP */
return 0;
-#if defined(CONFIG_PM_OPP) && \
+#if defined(CONFIG_PM_OPP) && \
((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) && defined(CONFIG_REGULATOR))
regulators_probe_defer:
for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
@@ -4984,25 +4843,25 @@ static void trigger_reset(struct kbase_device *kbdev)
kbase_pm_context_idle(kbdev);
}
-#define MAKE_QUIRK_ACCESSORS(type) \
-static int type##_quirks_set(void *data, u64 val) \
-{ \
- struct kbase_device *kbdev; \
- kbdev = (struct kbase_device *)data; \
- kbdev->hw_quirks_##type = (u32)val; \
- trigger_reset(kbdev); \
- return 0; \
-} \
-\
-static int type##_quirks_get(void *data, u64 *val) \
-{ \
- struct kbase_device *kbdev; \
- kbdev = (struct kbase_device *)data; \
- *val = kbdev->hw_quirks_##type; \
- return 0; \
-} \
-DEFINE_DEBUGFS_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get, \
- type##_quirks_set, "%llu\n")
+#define MAKE_QUIRK_ACCESSORS(type) \
+ static int type##_quirks_set(void *data, u64 val) \
+ { \
+ struct kbase_device *kbdev; \
+ kbdev = (struct kbase_device *)data; \
+ kbdev->hw_quirks_##type = (u32)val; \
+ trigger_reset(kbdev); \
+ return 0; \
+ } \
+ \
+ static int type##_quirks_get(void *data, u64 *val) \
+ { \
+ struct kbase_device *kbdev; \
+ kbdev = (struct kbase_device *)data; \
+ *val = kbdev->hw_quirks_##type; \
+ return 0; \
+ } \
+ DEFINE_DEBUGFS_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get, type##_quirks_set, \
+ "%llu\n")
MAKE_QUIRK_ACCESSORS(sc);
MAKE_QUIRK_ACCESSORS(tiler);
@@ -5046,18 +4905,18 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_trigger_reset, NULL, &kbase_device_debugfs_reset_w
*
* Return: Number of bytes added to user buffer
*/
-static ssize_t debugfs_protected_debug_mode_read(struct file *file,
- char __user *buf, size_t len, loff_t *ppos)
+static ssize_t debugfs_protected_debug_mode_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
{
struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
u32 gpu_status;
ssize_t ret_val;
kbase_pm_context_active(kbdev);
- gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
+ gpu_status = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(GPU_STATUS));
kbase_pm_context_idle(kbdev);
- if (gpu_status & GPU_DBGEN)
+ if (gpu_status & GPU_STATUS_GPU_DBG_ENABLED)
ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
else
ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
@@ -5077,37 +4936,32 @@ static const struct file_operations fops_protected_debug_mode = {
.llseek = default_llseek,
};
-static int kbase_device_debugfs_mem_pool_max_size_show(struct seq_file *sfile,
- void *data)
+static int kbase_device_debugfs_mem_pool_max_size_show(struct seq_file *sfile, void *data)
{
CSTD_UNUSED(data);
- return kbase_debugfs_helper_seq_read(sfile,
- MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_config_debugfs_max_size);
+ return kbase_debugfs_helper_seq_read(sfile, MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_config_debugfs_max_size);
}
static ssize_t kbase_device_debugfs_mem_pool_max_size_write(struct file *file,
- const char __user *ubuf, size_t count, loff_t *ppos)
+ const char __user *ubuf, size_t count,
+ loff_t *ppos)
{
- int err = 0;
+ ssize_t err = 0;
CSTD_UNUSED(ppos);
- err = kbase_debugfs_helper_seq_write(file, ubuf, count,
- MEMORY_GROUP_MANAGER_NR_GROUPS,
- kbase_mem_pool_config_debugfs_set_max_size);
+ err = kbase_debugfs_helper_seq_write(file, ubuf, count, MEMORY_GROUP_MANAGER_NR_GROUPS,
+ kbase_mem_pool_config_debugfs_set_max_size);
- return err ? err : count;
+ return err ? err : (ssize_t)count;
}
-static int kbase_device_debugfs_mem_pool_max_size_open(struct inode *in,
- struct file *file)
+static int kbase_device_debugfs_mem_pool_max_size_open(struct inode *in, struct file *file)
{
- return single_open(file, kbase_device_debugfs_mem_pool_max_size_show,
- in->i_private);
+ return single_open(file, kbase_device_debugfs_mem_pool_max_size_show, in->i_private);
}
-static const struct file_operations
- kbase_device_debugfs_mem_pool_max_size_fops = {
+static const struct file_operations kbase_device_debugfs_mem_pool_max_size_fops = {
.owner = THIS_MODULE,
.open = kbase_device_debugfs_mem_pool_max_size_open,
.read = seq_read,
@@ -5136,21 +4990,20 @@ static struct dentry *debugfs_ctx_defaults_init(struct kbase_device *const kbdev
return dentry;
}
- debugfs_create_bool("infinite_cache", mode,
- debugfs_ctx_defaults_directory,
- &kbdev->infinite_cache_active_default);
+ debugfs_create_bool("infinite_cache", mode, debugfs_ctx_defaults_directory,
+ &kbdev->infinite_cache_active_default);
dentry = debugfs_create_file("mem_pool_max_size", mode, debugfs_ctx_defaults_directory,
- &kbdev->mem_pool_defaults.small,
- &kbase_device_debugfs_mem_pool_max_size_fops);
+ &kbdev->mem_pool_defaults.small,
+ &kbase_device_debugfs_mem_pool_max_size_fops);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create mem_pool_max_size debugfs entry\n");
return dentry;
}
dentry = debugfs_create_file("lp_mem_pool_max_size", mode, debugfs_ctx_defaults_directory,
- &kbdev->mem_pool_defaults.large,
- &kbase_device_debugfs_mem_pool_max_size_fops);
+ &kbdev->mem_pool_defaults.large,
+ &kbase_device_debugfs_mem_pool_max_size_fops);
if (IS_ERR_OR_NULL(dentry))
dev_err(kbdev->dev, "Unable to create lp_mem_pool_max_size debugfs entry\n");
@@ -5169,9 +5022,7 @@ static struct dentry *init_debugfs(struct kbase_device *kbdev)
kbdev->mali_debugfs_directory = dentry;
if (IS_ERR_OR_NULL(dentry)) {
- dev_err(kbdev->dev,
- "Couldn't create mali debugfs directory: %s\n",
- kbdev->devname);
+ dev_err(kbdev->dev, "Couldn't create mali debugfs directory: %s\n", kbdev->devname);
return dentry;
}
@@ -5207,32 +5058,29 @@ static struct dentry *init_debugfs(struct kbase_device *kbdev)
/* fops_* variables created by invocations of macro
* MAKE_QUIRK_ACCESSORS() above.
*/
- dentry = debugfs_create_file("quirks_sc", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_sc_quirks);
+ dentry = debugfs_create_file("quirks_sc", 0644, kbdev->mali_debugfs_directory, kbdev,
+ &fops_sc_quirks);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create quirks_sc debugfs entry\n");
return dentry;
}
- dentry = debugfs_create_file("quirks_tiler", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_tiler_quirks);
+ dentry = debugfs_create_file("quirks_tiler", 0644, kbdev->mali_debugfs_directory, kbdev,
+ &fops_tiler_quirks);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create quirks_tiler debugfs entry\n");
return dentry;
}
- dentry = debugfs_create_file("quirks_mmu", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_mmu_quirks);
+ dentry = debugfs_create_file("quirks_mmu", 0644, kbdev->mali_debugfs_directory, kbdev,
+ &fops_mmu_quirks);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create quirks_mmu debugfs entry\n");
return dentry;
}
- dentry = debugfs_create_file("quirks_gpu", 0644, kbdev->mali_debugfs_directory,
- kbdev, &fops_gpu_quirks);
+ dentry = debugfs_create_file("quirks_gpu", 0644, kbdev->mali_debugfs_directory, kbdev,
+ &fops_gpu_quirks);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create quirks_gpu debugfs entry\n");
return dentry;
@@ -5244,17 +5092,17 @@ static struct dentry *init_debugfs(struct kbase_device *kbdev)
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
dentry = debugfs_create_file("protected_debug_mode", 0444,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_protected_debug_mode);
+ kbdev->mali_debugfs_directory, kbdev,
+ &fops_protected_debug_mode);
if (IS_ERR_OR_NULL(dentry)) {
- dev_err(kbdev->dev, "Unable to create protected_debug_mode debugfs entry\n");
+ dev_err(kbdev->dev,
+ "Unable to create protected_debug_mode debugfs entry\n");
return dentry;
}
}
- dentry = debugfs_create_file("reset", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &fops_trigger_reset);
+ dentry = debugfs_create_file("reset", 0644, kbdev->mali_debugfs_directory, kbdev,
+ &fops_trigger_reset);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create reset debugfs entry\n");
return dentry;
@@ -5270,9 +5118,8 @@ static struct dentry *init_debugfs(struct kbase_device *kbdev)
#endif /* CONFIG_MALI_DEVFREQ */
#if !MALI_USE_CSF
- dentry = debugfs_create_file("serialize_jobs", 0644,
- kbdev->mali_debugfs_directory, kbdev,
- &kbasep_serialize_jobs_debugfs_fops);
+ dentry = debugfs_create_file("serialize_jobs", 0644, kbdev->mali_debugfs_directory, kbdev,
+ &kbasep_serialize_jobs_debugfs_fops);
if (IS_ERR_OR_NULL(dentry)) {
dev_err(kbdev->dev, "Unable to create serialize_jobs debugfs entry\n");
return dentry;
@@ -5302,91 +5149,216 @@ void kbase_device_debugfs_term(struct kbase_device *kbdev)
}
#endif /* CONFIG_DEBUG_FS */
-int kbase_device_coherency_init(struct kbase_device *kbdev)
+/**
+ * kbase_device_normalize_coherency_bitmap - Update the supported coherency
+ * bitmap for devices where the flags were incorrectly documented.
+ *
+ * @kbdev: Kbase device
+ *
+ * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly documented for tMIx,
+ * so force the correct value here.
+ *
+ * Return: u32 bitmap of the supported coherency modes.
+ */
+static u32 kbase_device_normalize_coherency_bitmap(struct kbase_device *kbdev)
{
-#if IS_ENABLED(CONFIG_OF)
- u32 supported_coherency_bitmap =
- kbdev->gpu_props.props.raw_props.coherency_mode;
- const void *coherency_override_dts;
- bool dma_coherent;
- u32 override_coherency, gpu_id;
- unsigned int prod_id;
-
- gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
- gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
- prod_id = gpu_id >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
-
- /* Only for tMIx :
- * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
- * documented for tMIx so force correct value here.
+ u32 supported_coherency_bitmap = kbdev->gpu_props.coherency_mode;
+
+ if ((kbdev->gpu_props.gpu_id.product_model == GPU_ID_PRODUCT_TMIX) &&
+ (supported_coherency_bitmap == COHERENCY_FEATURE_BIT(COHERENCY_ACE)))
+ supported_coherency_bitmap |= COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+
+ return supported_coherency_bitmap;
+}
+
+/**
+ * kbase_device_supports_coherency_mode - Check if the GPU supports a coherency mode.
+ *
+ * @kbdev: Kbase device instance.
+ * @coherency_mode: Bitmask of requested coherency modes.
+ *
+ * The coherency interfaces supported by the individual GPU vary based on the
+ * hardware revision and architecture. For instance:
+ * * JM supports both ACE and ACE-lite.
+ * * CSF supports ACE-lite only.
+ * * Some GPUs explicitly don't support it
+ *
+ * Return: boolean indicating whether the current GPU supports the
+ * coherency mode.
+ */
+static bool kbase_device_supports_coherency_mode(struct kbase_device *kbdev, u32 coherency_mode)
+{
+ u32 supported_coherency_bitmap = kbase_device_normalize_coherency_bitmap(kbdev);
+
+ /* In the case of invalid flags specified from the DT node, we want to exit
+ * early.
*/
- if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
- GPU_ID2_PRODUCT_TMIX)
- if (supported_coherency_bitmap ==
- COHERENCY_FEATURE_BIT(COHERENCY_ACE))
- supported_coherency_bitmap |=
- COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+ if (coherency_mode > COHERENCY_NONE) {
+ dev_warn(kbdev->dev, "Ignoring unsupported coherency mode %u set from dtb",
+ coherency_mode);
+ return false;
+ }
-#endif /* CONFIG_OF */
+ /* ACE coherency is a little different, since it is explicitly not supported
+ * on CSF GPUs.
+ */
+ if (coherency_mode == COHERENCY_ACE) {
+ if (IS_ENABLED(MALI_USE_CSF) && !IS_ENABLED(CONFIG_MALI_NO_MALI)) {
+ dev_err(kbdev->dev,
+ "ACE coherency not supported on CSF, wrong DT configuration");
+ return false;
+ }
+ }
+
+ /* Finally, we need to know if the hardware supports it at all. */
+ if (!(supported_coherency_bitmap & COHERENCY_FEATURE_BIT(coherency_mode))) {
+ dev_warn(kbdev->dev, "Device does not support coherency mode %u set from dtb",
+ coherency_mode);
+ return false;
+ }
+
+ return true;
+}
+
+int kbase_device_coherency_init(struct kbase_device *kbdev)
+{
+ int err = 0;
kbdev->system_coherency = COHERENCY_NONE;
/* device tree may override the coherency */
-#if IS_ENABLED(CONFIG_OF)
- /* treat "dma-coherency" as a synonym for ACE-lite */
- dma_coherent = of_dma_is_coherent(kbdev->dev->of_node);
- coherency_override_dts = of_get_property(kbdev->dev->of_node,
- "system-coherency",
- NULL);
- if (coherency_override_dts || dma_coherent) {
- if (coherency_override_dts) {
+ if (IS_ENABLED(CONFIG_OF)) {
+ u32 override_coherency;
+ const void *coherency_override_dts;
+ bool dma_coherent;
+
+ /* treat "dma-coherency" as a synonym for ACE-lite */
+ dma_coherent = of_dma_is_coherent(kbdev->dev->of_node);
+ coherency_override_dts =
+ of_get_property(kbdev->dev->of_node, "system-coherency", NULL);
+
+ /* If there's no override, then we can skip the rest of the checks, and
+ * keep the default value of no coherency.
+ */
+ if (!coherency_override_dts && !dma_coherent)
+ goto early_exit;
+
+ if (coherency_override_dts)
override_coherency = be32_to_cpup(coherency_override_dts);
- if (dma_coherent && override_coherency != COHERENCY_ACE_LITE) {
- dev_err(kbdev->dev,
- "system-coherency needs to be 0 when dma-coherent is set\n");
- return -EINVAL;
- }
- } else {
- /* dma-coherent set and system-coherency not specified */
+ else
override_coherency = COHERENCY_ACE_LITE;
- }
-#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_NO_MALI)
- /* ACE coherency mode is not supported by Driver on CSF GPUs.
- * Return an error to signal the invalid device tree configuration.
- */
- if (override_coherency == COHERENCY_ACE) {
+ if (dma_coherent && override_coherency != COHERENCY_ACE_LITE) {
dev_err(kbdev->dev,
- "ACE coherency not supported, wrong DT configuration");
- return -EINVAL;
+ "system-coherency needs to be 0 when dma-coherent is set!");
+ err = -EINVAL;
+ goto early_exit;
}
-#endif
- if ((override_coherency <= COHERENCY_NONE) &&
- (supported_coherency_bitmap &
- COHERENCY_FEATURE_BIT(override_coherency))) {
+ if (!kbase_device_supports_coherency_mode(kbdev, override_coherency)) {
+ err = -EINVAL;
+ goto early_exit;
+ }
- kbdev->system_coherency = override_coherency;
+ kbdev->system_coherency = override_coherency;
- dev_info(kbdev->dev,
- "Using coherency mode %u set from dtb",
- override_coherency);
- } else
- dev_warn(kbdev->dev,
- "Ignoring unsupported coherency mode %u set from dtb",
- override_coherency);
+ dev_info(kbdev->dev, "Using coherency mode %u set from dtb", override_coherency);
}
+early_exit:
+ kbdev->gpu_props.coherency_mode = kbdev->system_coherency;
-#endif /* CONFIG_OF */
+ return err;
+}
- kbdev->gpu_props.props.raw_props.coherency_mode =
- kbdev->system_coherency;
- return 0;
+#if MALI_USE_CSF
+
+bool kbasep_adjust_prioritized_process(struct kbase_device *kbdev, bool add, uint32_t tgid)
+{
+ struct kbase_context *kctx;
+ bool found_contexts = false;
+
+ mutex_lock(&kbdev->kctx_list_lock);
+ list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
+ if (kctx->tgid == tgid) {
+ if (add)
+ dev_dbg(kbdev->dev,
+ "Adding context %pK of process %u to prioritized list\n",
+ (void *)kctx, tgid);
+ else
+ dev_dbg(kbdev->dev,
+ "Removing context %pK of process %u from prioritized list\n",
+ (void *)kctx, tgid);
+ atomic_set(&kctx->prioritized, add);
+ found_contexts = true;
+ }
+ }
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ if (found_contexts)
+ kbase_csf_scheduler_kick(kbdev);
+
+ return found_contexts;
}
+static ssize_t add_prioritized_process_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ unsigned int tgid;
+
+ CSTD_UNUSED(attr);
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtouint(buf, 0, &tgid);
+ if (ret || tgid == 0) {
+ dev_err(kbdev->dev, "Invalid PID specified\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!kbasep_adjust_prioritized_process(kbdev, true, tgid))) {
+ dev_err(kbdev->dev, "Non-existent PID specified\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(add_prioritized_process);
+
+static ssize_t remove_prioritized_process_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbase_device *kbdev;
+ int ret;
+ unsigned int tgid;
+
+ CSTD_UNUSED(attr);
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtouint(buf, 0, &tgid);
+ if (ret || tgid == 0) {
+ dev_err(kbdev->dev, "Invalid PID specified\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(!kbasep_adjust_prioritized_process(kbdev, false, tgid))) {
+ dev_err(kbdev->dev, "Non-existent PID specified\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(remove_prioritized_process);
-#if MALI_USE_CSF
/**
* csg_scheduling_period_store - Store callback for the csg_scheduling_period
* sysfs file.
@@ -5400,14 +5372,15 @@ int kbase_device_coherency_init(struct kbase_device *kbdev)
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t csg_scheduling_period_store(struct device *dev,
- struct device_attribute *attr,
+static ssize_t csg_scheduling_period_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct kbase_device *kbdev;
int ret;
unsigned int csg_scheduling_period;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -5422,11 +5395,10 @@ static ssize_t csg_scheduling_period_store(struct device *dev,
kbase_csf_scheduler_lock(kbdev);
kbdev->csf.scheduler.csg_scheduling_period_ms = csg_scheduling_period;
- dev_dbg(kbdev->dev, "CSG scheduling period: %ums\n",
- csg_scheduling_period);
+ dev_dbg(kbdev->dev, "CSG scheduling period: %ums\n", csg_scheduling_period);
kbase_csf_scheduler_unlock(kbdev);
- return count;
+ return (ssize_t)count;
}
/**
@@ -5440,19 +5412,19 @@ static ssize_t csg_scheduling_period_store(struct device *dev,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t csg_scheduling_period_show(struct device *dev,
- struct device_attribute *attr,
+static ssize_t csg_scheduling_period_show(struct device *dev, struct device_attribute *attr,
char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%u\n",
- kbdev->csf.scheduler.csg_scheduling_period_ms);
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->csf.scheduler.csg_scheduling_period_ms);
return ret;
}
@@ -5471,14 +5443,15 @@ static DEVICE_ATTR_RW(csg_scheduling_period);
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t fw_timeout_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
+static ssize_t fw_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct kbase_device *kbdev;
int ret;
unsigned int fw_timeout;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
@@ -5489,16 +5462,16 @@ static ssize_t fw_timeout_store(struct device *dev,
"Couldn't process fw_timeout write operation.\n"
"Use format 'fw_timeout_ms', and fw_timeout_ms > 0\n"
"Default fw_timeout: %u",
- kbase_get_timeout_ms(kbdev, CSF_FIRMWARE_PING_TIMEOUT));
+ kbase_get_timeout_ms(kbdev, CSF_FIRMWARE_TIMEOUT));
return -EINVAL;
}
kbase_csf_scheduler_lock(kbdev);
- kbdev->csf.fw_timeout_ms = fw_timeout;
+ kbase_device_set_timeout_ms(kbdev, CSF_FIRMWARE_TIMEOUT, fw_timeout);
kbase_csf_scheduler_unlock(kbdev);
dev_dbg(kbdev->dev, "Firmware timeout: %ums\n", fw_timeout);
- return count;
+ return (ssize_t)count;
}
/**
@@ -5511,17 +5484,18 @@ static ssize_t fw_timeout_store(struct device *dev,
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t fw_timeout_show(struct device *dev,
- struct device_attribute *attr, char *const buf)
+static ssize_t fw_timeout_show(struct device *dev, struct device_attribute *attr, char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
+ CSTD_UNUSED(attr);
+
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- ret = scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->csf.fw_timeout_ms);
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n", kbase_get_timeout_ms(kbdev, CSF_FIRMWARE_TIMEOUT));
return ret;
}
@@ -5543,28 +5517,30 @@ static DEVICE_ATTR_RW(fw_timeout);
*
* Return: @count if the function succeeded. An error code on failure.
*/
-static ssize_t idle_hysteresis_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t idle_hysteresis_time_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct kbase_device *kbdev;
- u32 dur = 0;
+ u32 dur_us = 0;
+
+ CSTD_UNUSED(attr);
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- if (kstrtou32(buf, 0, &dur)) {
+ if (kstrtou32(buf, 0, &dur_us)) {
dev_err(kbdev->dev, "Couldn't process idle_hysteresis_time write operation.\n"
- "Use format <idle_hysteresis_time>\n");
+ "Use format <idle_hysteresis_time>\n");
return -EINVAL;
}
/* In sysFs, The unit of the input value of idle_hysteresis_time is us.
* But the unit of the input parameter of this function is ns, so multiply by 1000
*/
- kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, dur * NSEC_PER_USEC);
+ kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, (u64)dur_us * NSEC_PER_USEC);
- return count;
+ return (ssize_t)count;
}
/**
@@ -5574,24 +5550,26 @@ static ssize_t idle_hysteresis_time_store(struct device *dev,
* @attr: The attributes of the sysfs file.
* @buf: The output buffer to receive the GPU information.
*
- * This function is called to get the current idle hysteresis duration in ms.
+ * This function is called to get the current idle hysteresis duration in us.
*
* Return: The number of bytes output to @buf.
*/
-static ssize_t idle_hysteresis_time_show(struct device *dev,
- struct device_attribute *attr, char * const buf)
+static ssize_t idle_hysteresis_time_show(struct device *dev, struct device_attribute *attr,
+ char *const buf)
{
struct kbase_device *kbdev;
ssize_t ret;
- u32 dur;
+ u64 dur_us;
+
+ CSTD_UNUSED(attr);
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- /* The unit of return value of idle_hysteresis_time_show is us, So divide by 1000.*/
- dur = kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev) / NSEC_PER_USEC;
- ret = scnprintf(buf, PAGE_SIZE, "%u\n", dur);
+ /* The unit of return value of idle_hysteresis_time_show is us, So divide by 1000 */
+ dur_us = div_u64(kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev), NSEC_PER_USEC);
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n", (u32)dur_us);
return ret;
}
@@ -5618,19 +5596,19 @@ static ssize_t idle_hysteresis_time_ns_store(struct device *dev, struct device_a
const char *buf, size_t count)
{
struct kbase_device *kbdev;
- u32 dur = 0;
+ u64 dur_ns = 0;
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- if (kstrtou32(buf, 0, &dur)) {
+ if (kstrtou64(buf, 0, &dur_ns)) {
dev_err(kbdev->dev, "Couldn't process idle_hysteresis_time_ns write operation.\n"
"Use format <idle_hysteresis_time_ns>\n");
return -EINVAL;
}
- kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, dur);
+ kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, dur_ns);
return count;
}
@@ -5652,14 +5630,14 @@ static ssize_t idle_hysteresis_time_ns_show(struct device *dev, struct device_at
{
struct kbase_device *kbdev;
ssize_t ret;
- u32 dur;
+ u64 dur_ns;
kbdev = to_kbase_device(dev);
if (!kbdev)
return -ENODEV;
- dur = kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev);
- ret = scnprintf(buf, PAGE_SIZE, "%u\n", dur);
+ dur_ns = kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev);
+ ret = scnprintf(buf, PAGE_SIZE, "%llu\n", dur_ns);
return ret;
}
@@ -5683,14 +5661,16 @@ static ssize_t mcu_shader_pwroff_timeout_show(struct device *dev, struct device_
char *const buf)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
- u32 pwroff;
+ u64 pwroff_us;
+
+ CSTD_UNUSED(attr);
if (!kbdev)
return -ENODEV;
- /* The unit of return value of the function is us, So divide by 1000.*/
- pwroff = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev) / NSEC_PER_USEC;
- return scnprintf(buf, PAGE_SIZE, "%u\n", pwroff);
+ /* The unit of return value of the function is us, So divide by 1000 */
+ pwroff_us = div_u64(kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev), NSEC_PER_USEC);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", (u32)pwroff_us);
}
/**
@@ -5711,28 +5691,30 @@ static ssize_t mcu_shader_pwroff_timeout_store(struct device *dev, struct device
const char *buf, size_t count)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
- u32 dur;
+ u32 dur_us;
const struct kbase_pm_policy *current_policy;
bool always_on;
+ CSTD_UNUSED(attr);
+
if (!kbdev)
return -ENODEV;
- if (kstrtouint(buf, 0, &dur))
+ if (kstrtou32(buf, 0, &dur_us))
return -EINVAL;
current_policy = kbase_pm_get_policy(kbdev);
always_on = current_policy == &kbase_pm_always_on_policy_ops;
- if (dur == 0 && !always_on)
+ if (dur_us == 0 && !always_on)
return -EINVAL;
/* In sysFs, The unit of the input value of mcu_shader_pwroff_timeout is us.
* But the unit of the input parameter of this function is ns, so multiply by 1000
*/
- kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, dur * NSEC_PER_USEC);
+ kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, (u64)dur_us * NSEC_PER_USEC);
- return count;
+ return (ssize_t)count;
}
static DEVICE_ATTR_RW(mcu_shader_pwroff_timeout);
@@ -5754,13 +5736,13 @@ static ssize_t mcu_shader_pwroff_timeout_ns_show(struct device *dev, struct devi
char *const buf)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
- u32 pwroff;
+ u64 pwroff_ns;
if (!kbdev)
return -ENODEV;
- pwroff = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", pwroff);
+ pwroff_ns = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", pwroff_ns);
}
/**
@@ -5781,7 +5763,7 @@ static ssize_t mcu_shader_pwroff_timeout_ns_store(struct device *dev, struct dev
const char *buf, size_t count)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
- u32 dur;
+ u64 dur_ns;
const struct kbase_pm_policy *current_policy;
bool always_on;
@@ -5789,15 +5771,15 @@ static ssize_t mcu_shader_pwroff_timeout_ns_store(struct device *dev, struct dev
if (!kbdev)
return -ENODEV;
- if (kstrtouint(buf, 0, &dur))
+ if (kstrtou64(buf, 0, &dur_ns))
return -EINVAL;
current_policy = kbase_pm_get_policy(kbdev);
always_on = current_policy == &kbase_pm_always_on_policy_ops;
- if (dur == 0 && !always_on)
+ if (dur_ns == 0 && !always_on)
return -EINVAL;
- kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, dur);
+ kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, dur_ns);
return count;
}
@@ -5832,6 +5814,8 @@ static struct attribute *kbase_attrs[] = {
&dev_attr_js_scheduling_period.attr,
#else
&dev_attr_csg_scheduling_period.attr,
+ &dev_attr_add_prioritized_process.attr,
+ &dev_attr_remove_prioritized_process.attr,
&dev_attr_fw_timeout.attr,
&dev_attr_idle_hysteresis_time.attr,
&dev_attr_idle_hysteresis_time_ns.attr,
@@ -5850,12 +5834,9 @@ static struct attribute *kbase_attrs[] = {
NULL
};
-static struct attribute *kbase_mempool_attrs[] = {
- &dev_attr_max_size.attr,
- &dev_attr_lp_max_size.attr,
- &dev_attr_ctx_default_max_size.attr,
- NULL
-};
+static struct attribute *kbase_mempool_attrs[] = { &dev_attr_max_size.attr,
+ &dev_attr_lp_max_size.attr,
+ &dev_attr_ctx_default_max_size.attr, NULL };
#define SYSFS_SCHEDULING_GROUP "scheduling"
static const struct attribute_group kbase_scheduling_attr_group = {
@@ -5887,25 +5868,18 @@ int kbase_sysfs_init(struct kbase_device *kbdev)
if (err)
return err;
- err = sysfs_create_group(&kbdev->dev->kobj,
- &kbase_scheduling_attr_group);
+ err = sysfs_create_group(&kbdev->dev->kobj, &kbase_scheduling_attr_group);
if (err) {
- dev_err(kbdev->dev, "Creation of %s sysfs group failed",
- SYSFS_SCHEDULING_GROUP);
- sysfs_remove_group(&kbdev->dev->kobj,
- &kbase_attr_group);
+ dev_err(kbdev->dev, "Creation of %s sysfs group failed", SYSFS_SCHEDULING_GROUP);
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
return err;
}
- err = sysfs_create_group(&kbdev->dev->kobj,
- &kbase_mempool_attr_group);
+ err = sysfs_create_group(&kbdev->dev->kobj, &kbase_mempool_attr_group);
if (err) {
- dev_err(kbdev->dev, "Creation of %s sysfs group failed",
- SYSFS_MEMPOOL_GROUP);
- sysfs_remove_group(&kbdev->dev->kobj,
- &kbase_scheduling_attr_group);
- sysfs_remove_group(&kbdev->dev->kobj,
- &kbase_attr_group);
+ dev_err(kbdev->dev, "Creation of %s sysfs group failed", SYSFS_MEMPOOL_GROUP);
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_scheduling_attr_group);
+ sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
}
return err;
@@ -5980,8 +5954,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
if (err) {
if (err == -EPROBE_DEFER)
- dev_info(kbdev->dev,
- "Device initialization Deferred\n");
+ dev_info(kbdev->dev, "Device initialization Deferred\n");
else
dev_err(kbdev->dev, "Device initialization failed\n");
@@ -5999,8 +5972,11 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
dev_info(kbdev->dev, KBASE_COMPILED_MMAP_MIN_ADDR_MSG);
#endif
- dev_info(kbdev->dev,
- "Probed as %s\n", dev_name(kbdev->mdev.this_device));
+ dev_info(kbdev->dev, "Probed as %s\n", dev_name(kbdev->mdev.this_device));
+ if (PAGE_SHIFT != 12)
+ dev_warn(kbdev->dev, "Experimental feature: %s with Page Size of %luKiB",
+ dev_name(kbdev->mdev.this_device), PAGE_SIZE / 1024);
+
kbase_increment_device_id();
#if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
mutex_unlock(&kbase_probe_mutex);
@@ -6167,7 +6143,6 @@ static int kbase_device_runtime_resume(struct device *dev)
}
#endif /* KBASE_PM_RUNTIME */
-
#ifdef KBASE_PM_RUNTIME
/**
* kbase_device_runtime_idle - Runtime idle callback from the OS.
@@ -6270,9 +6245,8 @@ module_init(kbase_driver_init);
module_exit(kbase_driver_exit);
#endif
MODULE_LICENSE("GPL");
-MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
- __stringify(BASE_UK_VERSION_MAJOR) "." \
- __stringify(BASE_UK_VERSION_MINOR) ")");
+MODULE_VERSION(MALI_RELEASE_NAME " (UK version " __stringify(BASE_UK_VERSION_MAJOR) "." __stringify(
+ BASE_UK_VERSION_MINOR) ")");
MODULE_SOFTDEP("pre: memory_group_manager");
MODULE_INFO(import_ns, "DMA_BUF");
@@ -6291,12 +6265,11 @@ void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value)
trace_mali_pm_status(dev_id, event, value);
}
-void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx, u8 atom_id)
+void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx,
+ u8 atom_id)
{
- trace_mali_job_slots_event(dev_id, event,
- (kctx != NULL ? kctx->tgid : 0),
- (kctx != NULL ? kctx->pid : 0),
- atom_id);
+ trace_mali_job_slots_event(dev_id, event, (kctx != NULL ? (u32)kctx->tgid : 0U),
+ (kctx != NULL ? (u32)kctx->pid : 0U), atom_id);
}
void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value)