summaryrefslogtreecommitdiff
path: root/mali_kbase/tl
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
committerJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
commit049a542207ed694271316782397b78b2e202086a (patch)
tree105e9378d4d5062dc72109fdd4a77c915bd9425d /mali_kbase/tl
parente61eb93296e9f940b32d4ad4b0c3a5557cbeaf17 (diff)
downloadgpu-049a542207ed694271316782397b78b2e202086a.tar.gz
Update KMD to r47p0
Provenance: ipdelivery@ad01e50d640910a99224382bb227e6d4de627657 Change-Id: I19ac9bce34a5c5a319c1b4a388e8b037b3dfe6e7
Diffstat (limited to 'mali_kbase/tl')
-rw-r--r--mali_kbase/tl/backend/mali_kbase_timeline_csf.c62
-rw-r--r--mali_kbase/tl/backend/mali_kbase_timeline_jm.c32
-rw-r--r--mali_kbase/tl/mali_kbase_timeline.c64
-rw-r--r--mali_kbase/tl/mali_kbase_timeline.h8
-rw-r--r--mali_kbase/tl/mali_kbase_timeline_io.c83
-rw-r--r--mali_kbase/tl/mali_kbase_timeline_priv.h20
-rw-r--r--mali_kbase/tl/mali_kbase_tl_serialize.h28
-rw-r--r--mali_kbase/tl/mali_kbase_tlstream.c118
-rw-r--r--mali_kbase/tl/mali_kbase_tlstream.h17
-rw-r--r--mali_kbase/tl/mali_kbase_tracepoints.c10
-rw-r--r--mali_kbase/tl/mali_kbase_tracepoints.h286
11 files changed, 319 insertions, 409 deletions
diff --git a/mali_kbase/tl/backend/mali_kbase_timeline_csf.c b/mali_kbase/tl/backend/mali_kbase_timeline_csf.c
index a6062f1..a91278d 100644
--- a/mali_kbase/tl/backend/mali_kbase_timeline_csf.c
+++ b/mali_kbase/tl/backend/mali_kbase_timeline_csf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,25 +25,14 @@
#include <mali_kbase.h>
-#define GPU_FEATURES_CROSS_STREAM_SYNC_MASK (1ull << 3ull)
-
void kbase_create_timeline_objects(struct kbase_device *kbdev)
{
- unsigned int as_nr;
+ int as_nr;
unsigned int slot_i;
struct kbase_context *kctx;
struct kbase_timeline *timeline = kbdev->timeline;
- struct kbase_tlstream *summary =
- &kbdev->timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
- u32 const kbdev_has_cross_stream_sync =
- (kbdev->gpu_props.props.raw_props.gpu_features &
- GPU_FEATURES_CROSS_STREAM_SYNC_MASK) ?
- 1 :
- 0;
- u32 const arch_maj = (kbdev->gpu_props.props.raw_props.gpu_id &
- GPU_ID2_ARCH_MAJOR) >>
- GPU_ID2_ARCH_MAJOR_SHIFT;
- u32 const num_sb_entries = arch_maj >= 11 ? 16 : 8;
+ struct kbase_tlstream *summary = &kbdev->timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
+ u32 const num_sb_entries = kbdev->gpu_props.gpu_id.arch_major >= 11 ? 16 : 8;
u32 const supports_gpu_sleep =
#ifdef KBASE_PM_RUNTIME
kbdev->pm.backend.gpu_sleep_supported;
@@ -53,26 +42,21 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
/* Summarize the Address Space objects. */
for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
+ __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], (u32)as_nr);
/* Create Legacy GPU object to track in AOM for dumping */
- __kbase_tlstream_tl_new_gpu(summary,
- kbdev,
- kbdev->gpu_props.props.raw_props.gpu_id,
- kbdev->gpu_props.num_cores);
-
+ __kbase_tlstream_tl_new_gpu(summary, kbdev, kbdev->id, kbdev->gpu_props.num_cores);
for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- __kbase_tlstream_tl_lifelink_as_gpu(summary,
- &kbdev->as[as_nr],
- kbdev);
+ __kbase_tlstream_tl_lifelink_as_gpu(summary, &kbdev->as[as_nr], kbdev);
/* Trace the creation of a new kbase device and set its properties. */
- __kbase_tlstream_tl_kbase_new_device(summary, kbdev->gpu_props.props.raw_props.gpu_id,
- kbdev->gpu_props.num_cores,
- kbdev->csf.global_iface.group_num,
- kbdev->nr_hw_address_spaces, num_sb_entries,
- kbdev_has_cross_stream_sync, supports_gpu_sleep);
+ __kbase_tlstream_tl_kbase_new_device(
+ summary, kbdev->id, kbdev->gpu_props.num_cores, kbdev->csf.global_iface.group_num,
+ (u32)kbdev->nr_hw_address_spaces, num_sb_entries,
+ kbdev->gpu_props.gpu_features.cross_stream_sync, supports_gpu_sleep,
+ 0
+ );
/* Lock the context list, to ensure no changes to the list are made
* while we're summarizing the contexts and their contents.
@@ -87,15 +71,12 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
mutex_lock(&kbdev->csf.scheduler.lock);
for (slot_i = 0; slot_i < kbdev->csf.global_iface.group_num; slot_i++) {
-
struct kbase_queue_group *group =
kbdev->csf.scheduler.csg_slots[slot_i].resident_group;
if (group)
__kbase_tlstream_tl_kbase_device_program_csg(
- summary,
- kbdev->gpu_props.props.raw_props.gpu_id,
- group->kctx->id, group->handle, slot_i, 0);
+ summary, kbdev->id, group->kctx->id, group->handle, slot_i, 0);
}
/* Reset body stream buffers while holding the kctx lock.
@@ -110,8 +91,7 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
/* For each context in the device... */
list_for_each_entry(kctx, &timeline->tl_kctx_list, tl_kctx_list_node) {
size_t i;
- struct kbase_tlstream *body =
- &timeline->streams[TL_STREAM_TYPE_OBJ];
+ struct kbase_tlstream *body = &timeline->streams[TL_STREAM_TYPE_OBJ];
/* Lock the context's KCPU queues, to ensure no KCPU-queue
* related actions can occur in this context from now on.
@@ -135,20 +115,14 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
* hasn't been traced yet. They may, however, cause benign
* errors to be emitted.
*/
- __kbase_tlstream_tl_kbase_new_ctx(body, kctx->id,
- kbdev->gpu_props.props.raw_props.gpu_id);
+ __kbase_tlstream_tl_kbase_new_ctx(body, kctx->id, kbdev->id);
/* Also trace with the legacy AOM tracepoint for dumping */
- __kbase_tlstream_tl_new_ctx(body,
- kctx,
- kctx->id,
- (u32)(kctx->tgid));
+ __kbase_tlstream_tl_new_ctx(body, kctx, kctx->id, (u32)(kctx->tgid));
/* Trace the currently assigned address space */
if (kctx->as_nr != KBASEP_AS_NR_INVALID)
- __kbase_tlstream_tl_kbase_ctx_assign_as(body, kctx->id,
- kctx->as_nr);
-
+ __kbase_tlstream_tl_kbase_ctx_assign_as(body, kctx->id, (u32)kctx->as_nr);
/* Trace all KCPU queues in the context into the body stream.
* As we acquired the KCPU lock after resetting the body stream,
diff --git a/mali_kbase/tl/backend/mali_kbase_timeline_jm.c b/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
index 9ba89f5..3e9e6e8 100644
--- a/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
+++ b/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,39 +28,32 @@
void kbase_create_timeline_objects(struct kbase_device *kbdev)
{
unsigned int lpu_id;
- unsigned int as_nr;
+ int as_nr;
struct kbase_context *kctx;
struct kbase_timeline *timeline = kbdev->timeline;
- struct kbase_tlstream *summary =
- &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
+ struct kbase_tlstream *summary = &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
/* Summarize the LPU objects. */
for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- u32 *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
- __kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, *lpu);
+ void *lpu = &kbdev->gpu_props.js_features[lpu_id];
+
+ __kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, 0);
}
/* Summarize the Address Space objects. */
for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
+ __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], (u32)as_nr);
/* Create GPU object and make it retain all LPUs and address spaces. */
- __kbase_tlstream_tl_new_gpu(summary,
- kbdev,
- kbdev->gpu_props.props.raw_props.gpu_id,
- kbdev->gpu_props.num_cores);
+ __kbase_tlstream_tl_new_gpu(summary, kbdev, kbdev->id, kbdev->gpu_props.num_cores);
for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
- void *lpu =
- &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ void *lpu = &kbdev->gpu_props.js_features[lpu_id];
__kbase_tlstream_tl_lifelink_lpu_gpu(summary, lpu, kbdev);
}
for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
- __kbase_tlstream_tl_lifelink_as_gpu(summary,
- &kbdev->as[as_nr],
- kbdev);
+ __kbase_tlstream_tl_lifelink_as_gpu(summary, &kbdev->as[as_nr], kbdev);
/* Lock the context list, to ensure no changes to the list are made
* while we're summarizing the contexts and their contents.
@@ -70,10 +63,7 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
/* For each context in the device... */
list_for_each_entry(kctx, &timeline->tl_kctx_list, tl_kctx_list_node) {
/* Summarize the context itself */
- __kbase_tlstream_tl_new_ctx(summary,
- kctx,
- kctx->id,
- (u32)(kctx->tgid));
+ __kbase_tlstream_tl_new_ctx(summary, kctx, kctx->id, (u32)(kctx->tgid));
}
/* Reset body stream buffers while holding the kctx lock.
diff --git a/mali_kbase/tl/mali_kbase_timeline.c b/mali_kbase/tl/mali_kbase_timeline.c
index 20356d6..b4ffdaf 100644
--- a/mali_kbase/tl/mali_kbase_timeline.c
+++ b/mali_kbase/tl/mali_kbase_timeline.c
@@ -42,13 +42,11 @@
/* These values are used in mali_kbase_tracepoints.h
* to retrieve the streams from a kbase_timeline instance.
*/
-const size_t __obj_stream_offset =
- offsetof(struct kbase_timeline, streams)
- + sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_OBJ;
+const size_t __obj_stream_offset = offsetof(struct kbase_timeline, streams) +
+ sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_OBJ;
-const size_t __aux_stream_offset =
- offsetof(struct kbase_timeline, streams)
- + sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_AUX;
+const size_t __aux_stream_offset = offsetof(struct kbase_timeline, streams) +
+ sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_AUX;
/**
* kbasep_timeline_autoflush_timer_callback - autoflush timer callback
@@ -60,14 +58,13 @@ const size_t __aux_stream_offset =
static void kbasep_timeline_autoflush_timer_callback(struct timer_list *timer)
{
enum tl_stream_type stype;
- int rcode;
+ int rcode;
struct kbase_timeline *timeline =
container_of(timer, struct kbase_timeline, autoflush_timer);
CSTD_UNUSED(timer);
- for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT;
- stype++) {
+ for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT; stype++) {
struct kbase_tlstream *stream = &timeline->streams[stype];
int af_cnt = atomic_read(&stream->autoflush_counter);
@@ -77,10 +74,7 @@ static void kbasep_timeline_autoflush_timer_callback(struct timer_list *timer)
continue;
/* Check if stream should be flushed now. */
- if (af_cnt != atomic_cmpxchg(
- &stream->autoflush_counter,
- af_cnt,
- af_cnt + 1))
+ if (af_cnt != atomic_cmpxchg(&stream->autoflush_counter, af_cnt, af_cnt + 1))
continue;
if (!af_cnt)
continue;
@@ -90,18 +84,14 @@ static void kbasep_timeline_autoflush_timer_callback(struct timer_list *timer)
}
if (atomic_read(&timeline->autoflush_timer_active))
- rcode = mod_timer(
- &timeline->autoflush_timer,
- jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ rcode = mod_timer(&timeline->autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
CSTD_UNUSED(rcode);
}
-
-
/*****************************************************************************/
-int kbase_timeline_init(struct kbase_timeline **timeline,
- atomic_t *timeline_flags)
+int kbase_timeline_init(struct kbase_timeline **timeline, atomic_t *timeline_flags)
{
enum tl_stream_type i;
struct kbase_timeline *result;
@@ -121,8 +111,7 @@ int kbase_timeline_init(struct kbase_timeline **timeline,
/* Prepare stream structures. */
for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
- kbase_tlstream_init(&result->streams[i], i,
- &result->event_queue);
+ kbase_tlstream_init(&result->streams[i], i, &result->event_queue);
/* Initialize the kctx list */
mutex_init(&result->tl_kctx_list_lock);
@@ -130,8 +119,7 @@ int kbase_timeline_init(struct kbase_timeline **timeline,
/* Initialize autoflush timer. */
atomic_set(&result->autoflush_timer_active, 0);
- kbase_timer_setup(&result->autoflush_timer,
- kbasep_timeline_autoflush_timer_callback);
+ kbase_timer_setup(&result->autoflush_timer, kbasep_timeline_autoflush_timer_callback);
result->timeline_flags = timeline_flags;
#if MALI_USE_CSF
@@ -195,7 +183,7 @@ int kbase_timeline_acquire(struct kbase_device *kbdev, u32 flags)
if (WARN_ON(!timeline))
return -EFAULT;
- if (atomic_cmpxchg(timeline->timeline_flags, 0, timeline_flags))
+ if (atomic_cmpxchg(timeline->timeline_flags, 0, (int)timeline_flags))
return -EBUSY;
#if MALI_USE_CSF
@@ -271,7 +259,7 @@ void kbase_timeline_release(struct kbase_timeline *timeline)
elapsed_time = ktime_sub(ktime_get_raw(), timeline->last_acquire_time);
elapsed_time_ms = ktime_to_ms(elapsed_time);
time_to_sleep = (elapsed_time_ms < 0 ? TIMELINE_HYSTERESIS_TIMEOUT_MS :
- TIMELINE_HYSTERESIS_TIMEOUT_MS - elapsed_time_ms);
+ TIMELINE_HYSTERESIS_TIMEOUT_MS - elapsed_time_ms);
if (time_to_sleep > 0)
msleep_interruptible(time_to_sleep);
@@ -314,13 +302,10 @@ int kbase_timeline_streams_flush(struct kbase_timeline *timeline)
void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline)
{
- kbase_tlstream_reset(
- &timeline->streams[TL_STREAM_TYPE_OBJ]);
- kbase_tlstream_reset(
- &timeline->streams[TL_STREAM_TYPE_AUX]);
+ kbase_tlstream_reset(&timeline->streams[TL_STREAM_TYPE_OBJ]);
+ kbase_tlstream_reset(&timeline->streams[TL_STREAM_TYPE_AUX]);
#if MALI_USE_CSF
- kbase_tlstream_reset(
- &timeline->streams[TL_STREAM_TYPE_CSFFW]);
+ kbase_tlstream_reset(&timeline->streams[TL_STREAM_TYPE_CSFFW]);
#endif
}
@@ -364,8 +349,7 @@ void kbase_timeline_post_kbase_context_create(struct kbase_context *kctx)
* duplicate creation tracepoints.
*/
#if MALI_USE_CSF
- KBASE_TLSTREAM_TL_KBASE_NEW_CTX(
- kbdev, kctx->id, kbdev->gpu_props.props.raw_props.gpu_id);
+ KBASE_TLSTREAM_TL_KBASE_NEW_CTX(kbdev, kctx->id, kbdev->id);
#endif
/* Trace with the AOM tracepoint even in CSF for dumping */
KBASE_TLSTREAM_TL_NEW_CTX(kbdev, kctx, kctx->id, 0);
@@ -393,8 +377,8 @@ void kbase_timeline_post_kbase_context_destroy(struct kbase_context *kctx)
}
#if MALI_UNIT_TEST
-void kbase_timeline_stats(struct kbase_timeline *timeline,
- u32 *bytes_collected, u32 *bytes_generated)
+void kbase_timeline_stats(struct kbase_timeline *timeline, u32 *bytes_collected,
+ u32 *bytes_generated)
{
enum tl_stream_type stype;
@@ -402,11 +386,9 @@ void kbase_timeline_stats(struct kbase_timeline *timeline,
/* Accumulate bytes generated per stream */
*bytes_generated = 0;
- for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT;
- stype++)
- *bytes_generated += atomic_read(
- &timeline->streams[stype].bytes_generated);
+ for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT; stype++)
+ *bytes_generated += (u32)atomic_read(&timeline->streams[stype].bytes_generated);
- *bytes_collected = atomic_read(&timeline->bytes_collected);
+ *bytes_collected = (u32)atomic_read(&timeline->bytes_collected);
}
#endif /* MALI_UNIT_TEST */
diff --git a/mali_kbase/tl/mali_kbase_timeline.h b/mali_kbase/tl/mali_kbase_timeline.h
index 62be6c6..47231c6 100644
--- a/mali_kbase/tl/mali_kbase_timeline.h
+++ b/mali_kbase/tl/mali_kbase_timeline.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,8 +37,7 @@ struct kbase_timeline;
* while timeline instance is valid.
* Return: zero on success, negative number on error
*/
-int kbase_timeline_init(struct kbase_timeline **timeline,
- atomic_t *timeline_flags);
+int kbase_timeline_init(struct kbase_timeline **timeline, atomic_t *timeline_flags);
/**
* kbase_timeline_term - terminate timeline infrastructure in kernel
@@ -114,7 +113,8 @@ void kbase_timeline_post_kbase_context_destroy(struct kbase_context *kctx);
* @bytes_collected: Will hold number of bytes read by the user
* @bytes_generated: Will hold number of bytes generated by trace points
*/
-void kbase_timeline_stats(struct kbase_timeline *timeline, u32 *bytes_collected, u32 *bytes_generated);
+void kbase_timeline_stats(struct kbase_timeline *timeline, u32 *bytes_collected,
+ u32 *bytes_generated);
#endif /* MALI_UNIT_TEST */
/**
diff --git a/mali_kbase/tl/mali_kbase_timeline_io.c b/mali_kbase/tl/mali_kbase_timeline_io.c
index ae57006..d98e228 100644
--- a/mali_kbase/tl/mali_kbase_timeline_io.c
+++ b/mali_kbase/tl/mali_kbase_timeline_io.c
@@ -58,6 +58,8 @@ static int kbase_unprivileged_global_profiling_set(const char *val, const struct
int new_val;
int ret = kstrtoint(val, 0, &new_val);
+ CSTD_UNUSED(kp);
+
if (ret == 0) {
if (new_val < 1)
return -EINVAL;
@@ -77,12 +79,11 @@ module_param_cb(kbase_unprivileged_global_profiling, &kbase_global_unprivileged_
&kbase_unprivileged_global_profiling, 0600);
/* The timeline stream file operations functions. */
-static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
- size_t size, loff_t *f_pos);
+static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer, size_t size,
+ loff_t *f_pos);
static __poll_t kbasep_timeline_io_poll(struct file *filp, poll_table *wait);
static int kbasep_timeline_io_release(struct inode *inode, struct file *filp);
-static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync);
+static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end, int datasync);
static bool timeline_is_permitted(void)
{
@@ -108,10 +109,9 @@ static bool timeline_is_permitted(void)
*
* Return: non-zero if any of timeline streams has at last one packet ready
*/
-static int
-kbasep_timeline_io_packet_pending(struct kbase_timeline *timeline,
- struct kbase_tlstream **ready_stream,
- unsigned int *rb_idx_raw)
+static int kbasep_timeline_io_packet_pending(struct kbase_timeline *timeline,
+ struct kbase_tlstream **ready_stream,
+ unsigned int *rb_idx_raw)
{
enum tl_stream_type i;
@@ -120,13 +120,13 @@ kbasep_timeline_io_packet_pending(struct kbase_timeline *timeline,
for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; ++i) {
struct kbase_tlstream *stream = &timeline->streams[i];
- *rb_idx_raw = atomic_read(&stream->rbi);
+ *rb_idx_raw = (unsigned int)atomic_read(&stream->rbi);
/* Read buffer index may be updated by writer in case of
* overflow. Read and write buffer indexes must be
* loaded in correct order.
*/
smp_rmb();
- if (atomic_read(&stream->wbi) != *rb_idx_raw) {
+ if ((uint)atomic_read(&stream->wbi) != *rb_idx_raw) {
*ready_stream = stream;
return 1;
}
@@ -165,12 +165,11 @@ static int kbasep_timeline_has_header_data(struct kbase_timeline *timeline)
*
* Return: 0 if success, -1 otherwise.
*/
-static inline int copy_stream_header(char __user *buffer, size_t size,
- ssize_t *copy_len, const char *hdr,
- size_t hdr_size, size_t *hdr_btc)
+static inline int copy_stream_header(char __user *buffer, size_t size, ssize_t *copy_len,
+ const char *hdr, size_t hdr_size, size_t *hdr_btc)
{
const size_t offset = hdr_size - *hdr_btc;
- const size_t copy_size = MIN(size - *copy_len, *hdr_btc);
+ const size_t copy_size = MIN((size_t)((ssize_t)size - *copy_len), *hdr_btc);
if (!*hdr_btc)
return 0;
@@ -182,7 +181,7 @@ static inline int copy_stream_header(char __user *buffer, size_t size,
return -1;
*hdr_btc -= copy_size;
- *copy_len += copy_size;
+ *copy_len += (ssize_t)copy_size;
return 0;
}
@@ -202,20 +201,18 @@ static inline int copy_stream_header(char __user *buffer, size_t size,
*
* Return: 0 if success, -1 if copy_to_user has failed.
*/
-static inline int kbasep_timeline_copy_headers(struct kbase_timeline *timeline,
- char __user *buffer, size_t size,
- ssize_t *copy_len)
+static inline int kbasep_timeline_copy_headers(struct kbase_timeline *timeline, char __user *buffer,
+ size_t size, ssize_t *copy_len)
{
- if (copy_stream_header(buffer, size, copy_len, obj_desc_header,
- obj_desc_header_size, &timeline->obj_header_btc))
+ if (copy_stream_header(buffer, size, copy_len, obj_desc_header, obj_desc_header_size,
+ &timeline->obj_header_btc))
return -1;
- if (copy_stream_header(buffer, size, copy_len, aux_desc_header,
- aux_desc_header_size, &timeline->aux_header_btc))
+ if (copy_stream_header(buffer, size, copy_len, aux_desc_header, aux_desc_header_size,
+ &timeline->aux_header_btc))
return -1;
#if MALI_USE_CSF
- if (copy_stream_header(buffer, size, copy_len,
- timeline->csf_tl_reader.tl_header.data,
+ if (copy_stream_header(buffer, size, copy_len, timeline->csf_tl_reader.tl_header.data,
timeline->csf_tl_reader.tl_header.size,
&timeline->csf_tl_reader.tl_header.btc))
return -1;
@@ -233,8 +230,8 @@ static inline int kbasep_timeline_copy_headers(struct kbase_timeline *timeline,
*
* Return: number of bytes stored in the buffer
*/
-static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
- size_t size, loff_t *f_pos)
+static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer, size_t size,
+ loff_t *f_pos)
{
ssize_t copy_len = 0;
struct kbase_timeline *timeline;
@@ -255,15 +252,14 @@ static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
mutex_lock(&timeline->reader_lock);
- while (copy_len < size) {
+ while (copy_len < (ssize_t)size) {
struct kbase_tlstream *stream = NULL;
unsigned int rb_idx_raw = 0;
unsigned int wb_idx_raw;
unsigned int rb_idx;
size_t rb_size;
- if (kbasep_timeline_copy_headers(timeline, buffer, size,
- &copy_len)) {
+ if (kbasep_timeline_copy_headers(timeline, buffer, size, &copy_len)) {
copy_len = -EFAULT;
break;
}
@@ -274,14 +270,12 @@ static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
* submitted.
*/
if (copy_len > 0) {
- if (!kbasep_timeline_io_packet_pending(
- timeline, &stream, &rb_idx_raw))
+ if (!kbasep_timeline_io_packet_pending(timeline, &stream, &rb_idx_raw))
break;
} else {
- if (wait_event_interruptible(
- timeline->event_queue,
- kbasep_timeline_io_packet_pending(
- timeline, &stream, &rb_idx_raw))) {
+ if (wait_event_interruptible(timeline->event_queue,
+ kbasep_timeline_io_packet_pending(
+ timeline, &stream, &rb_idx_raw))) {
copy_len = -ERESTARTSYS;
break;
}
@@ -296,11 +290,10 @@ static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
* If so copy its content.
*/
rb_idx = rb_idx_raw % PACKET_COUNT;
- rb_size = atomic_read(&stream->buffer[rb_idx].size);
- if (rb_size > size - copy_len)
+ rb_size = (size_t)atomic_read(&stream->buffer[rb_idx].size);
+ if (rb_size > (size_t)((ssize_t)size - copy_len))
break;
- if (copy_to_user(&buffer[copy_len], stream->buffer[rb_idx].data,
- rb_size)) {
+ if (copy_to_user(&buffer[copy_len], stream->buffer[rb_idx].data, rb_size)) {
copy_len = -EFAULT;
break;
}
@@ -311,20 +304,19 @@ static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
* that we have just sent to user.
*/
smp_rmb();
- wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx_raw = (unsigned int)atomic_read(&stream->wbi);
if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
- copy_len += rb_size;
+ copy_len += (ssize_t)rb_size;
atomic_inc(&stream->rbi);
#if MALI_UNIT_TEST
atomic_add(rb_size, &timeline->bytes_collected);
#endif /* MALI_UNIT_TEST */
} else {
- const unsigned int new_rb_idx_raw =
- wb_idx_raw - PACKET_COUNT + 1;
+ const unsigned int new_rb_idx_raw = wb_idx_raw - PACKET_COUNT + 1;
/* Adjust read buffer index to the next valid buffer */
- atomic_set(&stream->rbi, new_rb_idx_raw);
+ atomic_set(&stream->rbi, (int)new_rb_idx_raw);
}
}
@@ -454,8 +446,7 @@ static int kbasep_timeline_io_release(struct inode *inode, struct file *filp)
return 0;
}
-static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync)
+static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
CSTD_UNUSED(start);
CSTD_UNUSED(end);
diff --git a/mali_kbase/tl/mali_kbase_timeline_priv.h b/mali_kbase/tl/mali_kbase_timeline_priv.h
index de30bcc..ab46511 100644
--- a/mali_kbase/tl/mali_kbase_timeline_priv.h
+++ b/mali_kbase/tl/mali_kbase_timeline_priv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -59,19 +59,19 @@
*/
struct kbase_timeline {
struct kbase_tlstream streams[TL_STREAM_TYPE_COUNT];
- struct list_head tl_kctx_list;
- struct mutex tl_kctx_list_lock;
+ struct list_head tl_kctx_list;
+ struct mutex tl_kctx_list_lock;
struct timer_list autoflush_timer;
- atomic_t autoflush_timer_active;
- struct mutex reader_lock;
+ atomic_t autoflush_timer_active;
+ struct mutex reader_lock;
wait_queue_head_t event_queue;
#if MALI_UNIT_TEST
- atomic_t bytes_collected;
+ atomic_t bytes_collected;
#endif /* MALI_UNIT_TEST */
- atomic_t *timeline_flags;
- size_t obj_header_btc;
- size_t aux_header_btc;
- ktime_t last_acquire_time;
+ atomic_t *timeline_flags;
+ size_t obj_header_btc;
+ size_t aux_header_btc;
+ ktime_t last_acquire_time;
#if MALI_USE_CSF
struct kbase_csf_tl_reader csf_tl_reader;
#endif
diff --git a/mali_kbase/tl/mali_kbase_tl_serialize.h b/mali_kbase/tl/mali_kbase_tl_serialize.h
index b6aaade..cefca4c 100644
--- a/mali_kbase/tl/mali_kbase_tl_serialize.h
+++ b/mali_kbase/tl/mali_kbase_tl_serialize.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,7 +27,7 @@
#include <linux/timer.h>
/* The number of nanoseconds in a second. */
-#define NSECS_IN_SEC 1000000000ull /* ns */
+#define NSECS_IN_SEC 1000000000ull /* ns */
/**
* kbasep_serialize_bytes - serialize bytes to the message buffer
@@ -41,11 +41,7 @@
*
* Return: updated position in the buffer
*/
-static inline size_t kbasep_serialize_bytes(
- char *buffer,
- size_t pos,
- const void *bytes,
- size_t len)
+static inline size_t kbasep_serialize_bytes(char *buffer, size_t pos, const void *bytes, size_t len)
{
KBASE_DEBUG_ASSERT(buffer);
KBASE_DEBUG_ASSERT(bytes);
@@ -68,11 +64,8 @@ static inline size_t kbasep_serialize_bytes(
*
* Return: updated position in the buffer
*/
-static inline size_t kbasep_serialize_string(
- char *buffer,
- size_t pos,
- const char *string,
- size_t max_write_size)
+static inline size_t kbasep_serialize_string(char *buffer, size_t pos, const char *string,
+ size_t max_write_size)
{
u32 string_len;
@@ -84,10 +77,7 @@ static inline size_t kbasep_serialize_string(
KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
max_write_size -= sizeof(string_len);
- string_len = strscpy(
- &buffer[pos + sizeof(string_len)],
- string,
- max_write_size);
+ string_len = strscpy(&buffer[pos + sizeof(string_len)], string, max_write_size);
string_len += sizeof(char);
/* Make sure that the source string fit into the buffer. */
@@ -112,12 +102,10 @@ static inline size_t kbasep_serialize_string(
*/
static inline size_t kbasep_serialize_timestamp(void *buffer, size_t pos)
{
- u64 timestamp;
+ u64 timestamp;
timestamp = ktime_get_raw_ns();
- return kbasep_serialize_bytes(
- buffer, pos,
- &timestamp, sizeof(timestamp));
+ return kbasep_serialize_bytes(buffer, pos, &timestamp, sizeof(timestamp));
}
#endif /* _KBASE_TL_SERIALIZE_H */
diff --git a/mali_kbase/tl/mali_kbase_tlstream.c b/mali_kbase/tl/mali_kbase_tlstream.c
index 47059de..117417c 100644
--- a/mali_kbase/tl/mali_kbase_tlstream.c
+++ b/mali_kbase/tl/mali_kbase_tlstream.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -34,13 +34,9 @@
*
* Function sets up immutable part of packet header in the given buffer.
*/
-static void kbasep_packet_header_setup(
- char *buffer,
- enum tl_packet_family pkt_family,
- enum tl_packet_class pkt_class,
- enum tl_packet_type pkt_type,
- unsigned int stream_id,
- int numbered)
+static void kbasep_packet_header_setup(char *buffer, enum tl_packet_family pkt_family,
+ enum tl_packet_class pkt_class, enum tl_packet_type pkt_type,
+ unsigned int stream_id, int numbered)
{
u32 words[2] = {
MIPE_PACKET_HEADER_W0(pkt_family, pkt_class, pkt_type, stream_id),
@@ -58,10 +54,7 @@ static void kbasep_packet_header_setup(
* Function updates mutable part of packet header in the given buffer.
* Note that value of data_size must not include size of the header.
*/
-static void kbasep_packet_header_update(
- char *buffer,
- size_t data_size,
- int numbered)
+static void kbasep_packet_header_update(char *buffer, size_t data_size, int numbered)
{
u32 word1 = MIPE_PACKET_HEADER_W1((u32)data_size, !!numbered);
@@ -92,10 +85,8 @@ void kbase_tlstream_reset(struct kbase_tlstream *stream)
for (i = 0; i < PACKET_COUNT; i++) {
if (stream->numbered)
- atomic_set(
- &stream->buffer[i].size,
- PACKET_HEADER_SIZE +
- PACKET_NUMBER_SIZE);
+ atomic_set(&stream->buffer[i].size,
+ PACKET_HEADER_SIZE + PACKET_NUMBER_SIZE);
else
atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
}
@@ -107,9 +98,9 @@ void kbase_tlstream_reset(struct kbase_tlstream *stream)
/* Configuration of timeline streams generated by kernel. */
static const struct {
enum tl_packet_family pkt_family;
- enum tl_packet_class pkt_class;
- enum tl_packet_type pkt_type;
- enum tl_stream_id stream_id;
+ enum tl_packet_class pkt_class;
+ enum tl_packet_type pkt_type;
+ enum tl_stream_id stream_id;
} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
{
TL_PACKET_FAMILY_TL,
@@ -139,10 +130,8 @@ static const struct {
#endif
};
-void kbase_tlstream_init(
- struct kbase_tlstream *stream,
- enum tl_stream_type stream_type,
- wait_queue_head_t *ready_read)
+void kbase_tlstream_init(struct kbase_tlstream *stream, enum tl_stream_type stream_type,
+ wait_queue_head_t *ready_read)
{
unsigned int i;
@@ -158,13 +147,11 @@ void kbase_tlstream_init(
stream->numbered = 0;
for (i = 0; i < PACKET_COUNT; i++)
- kbasep_packet_header_setup(
- stream->buffer[i].data,
- tl_stream_cfg[stream_type].pkt_family,
- tl_stream_cfg[stream_type].pkt_class,
- tl_stream_cfg[stream_type].pkt_type,
- tl_stream_cfg[stream_type].stream_id,
- stream->numbered);
+ kbasep_packet_header_setup(stream->buffer[i].data,
+ tl_stream_cfg[stream_type].pkt_family,
+ tl_stream_cfg[stream_type].pkt_class,
+ tl_stream_cfg[stream_type].pkt_type,
+ tl_stream_cfg[stream_type].stream_id, stream->numbered);
#if MALI_UNIT_TEST
atomic_set(&stream->bytes_generated, 0);
@@ -193,25 +180,19 @@ void kbase_tlstream_term(struct kbase_tlstream *stream)
*
* Warning: the user must update the stream structure with returned value.
*/
-static size_t kbasep_tlstream_msgbuf_submit(
- struct kbase_tlstream *stream,
- unsigned int wb_idx_raw,
- unsigned int wb_size)
+static size_t kbasep_tlstream_msgbuf_submit(struct kbase_tlstream *stream, unsigned int wb_idx_raw,
+ unsigned int wb_size)
{
unsigned int wb_idx = wb_idx_raw % PACKET_COUNT;
/* Set stream as flushed. */
atomic_set(&stream->autoflush_counter, -1);
- kbasep_packet_header_update(
- stream->buffer[wb_idx].data,
- wb_size - PACKET_HEADER_SIZE,
- stream->numbered);
+ kbasep_packet_header_update(stream->buffer[wb_idx].data, wb_size - PACKET_HEADER_SIZE,
+ stream->numbered);
if (stream->numbered)
- kbasep_packet_number_update(
- stream->buffer[wb_idx].data,
- wb_idx_raw);
+ kbasep_packet_number_update(stream->buffer[wb_idx].data, wb_idx_raw);
/* Increasing write buffer index will expose this packet to the reader.
* As stream->lock is not taken on reader side we must make sure memory
@@ -230,30 +211,25 @@ static size_t kbasep_tlstream_msgbuf_submit(
return wb_size;
}
-char *kbase_tlstream_msgbuf_acquire(
- struct kbase_tlstream *stream,
- size_t msg_size,
- unsigned long *flags) __acquires(&stream->lock)
+char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream, size_t msg_size,
+ unsigned long *flags) __acquires(&stream->lock)
{
- unsigned int wb_idx_raw;
- unsigned int wb_idx;
- size_t wb_size;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
- KBASE_DEBUG_ASSERT(
- PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
- msg_size);
+ KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >= msg_size);
spin_lock_irqsave(&stream->lock, *flags);
- wb_idx_raw = atomic_read(&stream->wbi);
- wb_idx = wb_idx_raw % PACKET_COUNT;
- wb_size = atomic_read(&stream->buffer[wb_idx].size);
+ wb_idx_raw = (unsigned int)atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = (size_t)atomic_read(&stream->buffer[wb_idx].size);
/* Select next buffer if data will not fit into current one. */
if (wb_size + msg_size > PACKET_SIZE) {
- wb_size = kbasep_tlstream_msgbuf_submit(
- stream, wb_idx_raw, wb_size);
- wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ wb_size = kbasep_tlstream_msgbuf_submit(stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
}
/* Reserve space in selected buffer. */
@@ -266,9 +242,8 @@ char *kbase_tlstream_msgbuf_acquire(
return &stream->buffer[wb_idx].data[wb_size];
}
-void kbase_tlstream_msgbuf_release(
- struct kbase_tlstream *stream,
- unsigned long flags) __releases(&stream->lock)
+void kbase_tlstream_msgbuf_release(struct kbase_tlstream *stream, unsigned long flags)
+ __releases(&stream->lock)
{
/* Mark stream as containing unflushed data. */
atomic_set(&stream->autoflush_counter, 0);
@@ -276,28 +251,25 @@ void kbase_tlstream_msgbuf_release(
spin_unlock_irqrestore(&stream->lock, flags);
}
-size_t kbase_tlstream_flush_stream(
- struct kbase_tlstream *stream)
+size_t kbase_tlstream_flush_stream(struct kbase_tlstream *stream)
{
- unsigned long flags;
- unsigned int wb_idx_raw;
- unsigned int wb_idx;
- size_t wb_size;
- size_t min_size = PACKET_HEADER_SIZE;
-
+ unsigned long flags;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+ size_t min_size = PACKET_HEADER_SIZE;
if (stream->numbered)
min_size += PACKET_NUMBER_SIZE;
spin_lock_irqsave(&stream->lock, flags);
- wb_idx_raw = atomic_read(&stream->wbi);
- wb_idx = wb_idx_raw % PACKET_COUNT;
- wb_size = atomic_read(&stream->buffer[wb_idx].size);
+ wb_idx_raw = (unsigned int)atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = (size_t)atomic_read(&stream->buffer[wb_idx].size);
if (wb_size > min_size) {
- wb_size = kbasep_tlstream_msgbuf_submit(
- stream, wb_idx_raw, wb_size);
+ wb_size = kbasep_tlstream_msgbuf_submit(stream, wb_idx_raw, wb_size);
wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
atomic_set(&stream->buffer[wb_idx].size, wb_size);
} else {
diff --git a/mali_kbase/tl/mali_kbase_tlstream.h b/mali_kbase/tl/mali_kbase_tlstream.h
index c142849..fe3430e 100644
--- a/mali_kbase/tl/mali_kbase_tlstream.h
+++ b/mali_kbase/tl/mali_kbase_tlstream.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -80,7 +80,7 @@ struct kbase_tlstream {
atomic_t wbi;
atomic_t rbi;
- int numbered;
+ int numbered;
atomic_t autoflush_counter;
wait_queue_head_t *ready_read;
#if MALI_UNIT_TEST
@@ -107,9 +107,8 @@ enum tl_stream_type {
* @ready_read: Pointer to a wait queue to signal when
* timeline messages are ready for collection.
*/
-void kbase_tlstream_init(struct kbase_tlstream *stream,
- enum tl_stream_type stream_type,
- wait_queue_head_t *ready_read);
+void kbase_tlstream_init(struct kbase_tlstream *stream, enum tl_stream_type stream_type,
+ wait_queue_head_t *ready_read);
/**
* kbase_tlstream_term - terminate timeline stream
@@ -140,8 +139,8 @@ void kbase_tlstream_reset(struct kbase_tlstream *stream);
* Only atomic operations are allowed while the stream is locked
* (i.e. do not use any operation that may sleep).
*/
-char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream,
- size_t msg_size, unsigned long *flags) __acquires(&stream->lock);
+char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream, size_t msg_size,
+ unsigned long *flags) __acquires(&stream->lock);
/**
* kbase_tlstream_msgbuf_release - unlock selected stream
@@ -151,8 +150,8 @@ char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream,
* Release the stream that has been previously
* locked with a call to kbase_tlstream_msgbuf_acquire().
*/
-void kbase_tlstream_msgbuf_release(struct kbase_tlstream *stream,
- unsigned long flags) __releases(&stream->lock);
+void kbase_tlstream_msgbuf_release(struct kbase_tlstream *stream, unsigned long flags)
+ __releases(&stream->lock);
/**
* kbase_tlstream_flush_stream - flush stream
diff --git a/mali_kbase/tl/mali_kbase_tracepoints.c b/mali_kbase/tl/mali_kbase_tracepoints.c
index f62c755..7427358 100644
--- a/mali_kbase/tl/mali_kbase_tracepoints.c
+++ b/mali_kbase/tl/mali_kbase_tracepoints.c
@@ -358,8 +358,8 @@ enum tl_msg_id_obj {
"atom,edit_addr,new_addr,jit_flags,mem_flags,j_id,com_pgs,extent,va_pgs") \
TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_DEVICE, \
"New KBase Device", \
- "@IIIIIII", \
- "kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs,kbase_device_as_count,kbase_device_sb_entry_count,kbase_device_has_cross_stream_sync,kbase_device_supports_gpu_sleep") \
+ "@IIIIIIII", \
+ "kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs,kbase_device_as_count,kbase_device_sb_entry_count,kbase_device_has_cross_stream_sync,kbase_device_supports_gpu_sleep,kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db") \
TRACEPOINT_DESC(KBASE_TL_KBASE_GPUCMDQUEUE_KICK, \
"Kernel receives a request to process new GPU queue instructions", \
"@IL", \
@@ -2093,7 +2093,8 @@ void __kbase_tlstream_tl_kbase_new_device(
u32 kbase_device_as_count,
u32 kbase_device_sb_entry_count,
u32 kbase_device_has_cross_stream_sync,
- u32 kbase_device_supports_gpu_sleep
+ u32 kbase_device_supports_gpu_sleep,
+ u32 kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db
)
{
const u32 msg_id = KBASE_TL_KBASE_NEW_DEVICE;
@@ -2105,6 +2106,7 @@ void __kbase_tlstream_tl_kbase_new_device(
+ sizeof(kbase_device_sb_entry_count)
+ sizeof(kbase_device_has_cross_stream_sync)
+ sizeof(kbase_device_supports_gpu_sleep)
+ + sizeof(kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db)
;
char *buffer;
unsigned long acq_flags;
@@ -2128,6 +2130,8 @@ void __kbase_tlstream_tl_kbase_new_device(
pos, &kbase_device_has_cross_stream_sync, sizeof(kbase_device_has_cross_stream_sync));
pos = kbasep_serialize_bytes(buffer,
pos, &kbase_device_supports_gpu_sleep, sizeof(kbase_device_supports_gpu_sleep));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db, sizeof(kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db));
kbase_tlstream_msgbuf_release(stream, acq_flags);
}
diff --git a/mali_kbase/tl/mali_kbase_tracepoints.h b/mali_kbase/tl/mali_kbase_tracepoints.h
index f1f4761..f5b5b39 100644
--- a/mali_kbase/tl/mali_kbase_tracepoints.h
+++ b/mali_kbase/tl/mali_kbase_tracepoints.h
@@ -77,6 +77,11 @@ extern const size_t aux_desc_header_size;
#define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP
#define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED
+#define TL_PM_STATE_SHADER 0x100
+#define TL_PM_STATE_TILER 0x110
+#define TL_PM_STATE_L2 0x120
+#define TL_PM_STATE_STACK 0xE00
+
#define TLSTREAM_ENABLED (1u << 31)
void __kbase_tlstream_tl_new_ctx(
@@ -393,7 +398,8 @@ void __kbase_tlstream_tl_kbase_new_device(
u32 kbase_device_as_count,
u32 kbase_device_sb_entry_count,
u32 kbase_device_has_cross_stream_sync,
- u32 kbase_device_supports_gpu_sleep
+ u32 kbase_device_supports_gpu_sleep,
+ u32 kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db
);
void __kbase_tlstream_tl_kbase_gpucmdqueue_kick(
@@ -878,7 +884,7 @@ struct kbase_tlstream;
tgid \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -903,7 +909,7 @@ struct kbase_tlstream;
core_count \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_gpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -928,7 +934,7 @@ struct kbase_tlstream;
lpu_fn \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -951,7 +957,7 @@ struct kbase_tlstream;
atom_nr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_atom( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -973,7 +979,7 @@ struct kbase_tlstream;
as_nr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -993,7 +999,7 @@ struct kbase_tlstream;
ctx \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_del_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1012,7 +1018,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_del_atom( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1033,7 +1039,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_lifelink_lpu_gpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1055,7 +1061,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_lifelink_as_gpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1077,7 +1083,7 @@ struct kbase_tlstream;
lpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_ctx_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1099,7 +1105,7 @@ struct kbase_tlstream;
ctx \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_atom_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1123,7 +1129,7 @@ struct kbase_tlstream;
attrib_match_list \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_atom_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1146,7 +1152,7 @@ struct kbase_tlstream;
lpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_ctx_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1168,7 +1174,7 @@ struct kbase_tlstream;
ctx \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_atom_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1190,7 +1196,7 @@ struct kbase_tlstream;
lpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_atom_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1212,7 +1218,7 @@ struct kbase_tlstream;
ctx \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_as_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1234,7 +1240,7 @@ struct kbase_tlstream;
ctx \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_as_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1256,7 +1262,7 @@ struct kbase_tlstream;
address_space \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_atom_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1278,7 +1284,7 @@ struct kbase_tlstream;
address_space \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_atom_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1304,7 +1310,7 @@ struct kbase_tlstream;
config \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_atom_config( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1328,7 +1334,7 @@ struct kbase_tlstream;
j_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jit_usedpages( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1364,7 +1370,7 @@ struct kbase_tlstream;
usg_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_atom_jitallocinfo( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1393,7 +1399,7 @@ struct kbase_tlstream;
j_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_atom_jitfreeinfo( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1419,7 +1425,7 @@ struct kbase_tlstream;
transcfg \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_as_config( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1441,7 +1447,7 @@ struct kbase_tlstream;
lpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_lpu_softstop( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1460,7 +1466,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softstop_ex( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1479,7 +1485,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softstop_issue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1498,7 +1504,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softjob_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1517,7 +1523,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softjob_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1536,7 +1542,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_granted( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1555,7 +1561,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_started( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1574,7 +1580,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_stop_requested( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1593,7 +1599,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_stopped( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1612,7 +1618,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_requested( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1631,7 +1637,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_jd_gpu_soft_reset( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1654,7 +1660,7 @@ struct kbase_tlstream;
chunk_va \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_jd_tiler_heap_chunk_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1675,7 +1681,7 @@ struct kbase_tlstream;
dummy \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_js_sched_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1694,7 +1700,7 @@ struct kbase_tlstream;
dummy \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_js_sched_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1713,7 +1719,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_submit_atom_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1732,7 +1738,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_submit_atom_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1751,7 +1757,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_no_lock_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1770,7 +1776,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_no_lock_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1789,7 +1795,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1808,7 +1814,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1827,7 +1833,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_atom_complete( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1848,7 +1854,7 @@ struct kbase_tlstream;
atom_nr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_run_atom_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1870,7 +1876,7 @@ struct kbase_tlstream;
atom_nr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_run_atom_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1892,7 +1898,7 @@ struct kbase_tlstream;
prio \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_tl_attrib_atom_priority( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1914,7 +1920,7 @@ struct kbase_tlstream;
state \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_tl_attrib_atom_state( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1934,7 +1940,7 @@ struct kbase_tlstream;
atom \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_tl_attrib_atom_prioritized( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1969,7 +1975,7 @@ struct kbase_tlstream;
va_pgs \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \
__kbase_tlstream_tl_attrib_atom_jit( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -1996,6 +2002,7 @@ struct kbase_tlstream;
* @kbase_device_sb_entry_count: The number of entries each scoreboard set in the physical hardware has available
* @kbase_device_has_cross_stream_sync: Whether cross-stream synchronization is supported
* @kbase_device_supports_gpu_sleep: Whether GPU sleep is supported
+ * @kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db: Whether v34932631451e2dea4ed0fab0025a0d2767d5e427 is supported
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \
@@ -2006,10 +2013,11 @@ struct kbase_tlstream;
kbase_device_as_count, \
kbase_device_sb_entry_count, \
kbase_device_has_cross_stream_sync, \
- kbase_device_supports_gpu_sleep \
+ kbase_device_supports_gpu_sleep, \
+ kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_new_device( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2019,7 +2027,8 @@ struct kbase_tlstream;
kbase_device_as_count, \
kbase_device_sb_entry_count, \
kbase_device_has_cross_stream_sync, \
- kbase_device_supports_gpu_sleep \
+ kbase_device_supports_gpu_sleep, \
+ kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db \
); \
} while (0)
#else
@@ -2031,7 +2040,8 @@ struct kbase_tlstream;
kbase_device_as_count, \
kbase_device_sb_entry_count, \
kbase_device_has_cross_stream_sync, \
- kbase_device_supports_gpu_sleep \
+ kbase_device_supports_gpu_sleep, \
+ kbase_device_has_vd54d34dbb40917c8cea48cca407a8789413be0db \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
@@ -2050,7 +2060,7 @@ struct kbase_tlstream;
buffer_gpu_addr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_gpucmdqueue_kick( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2087,7 +2097,7 @@ struct kbase_tlstream;
kbase_device_csg_slot_resuming \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_program_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2124,7 +2134,7 @@ struct kbase_tlstream;
kbase_device_csg_slot_index \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_deprogram_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2157,7 +2167,7 @@ struct kbase_tlstream;
kbase_device_csg_slot_suspending \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_halting_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2190,7 +2200,7 @@ struct kbase_tlstream;
kbase_device_csg_slot_index \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_suspend_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2221,7 +2231,7 @@ struct kbase_tlstream;
kbase_device_csg_slot_index \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_csg_idle( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2252,7 +2262,7 @@ struct kbase_tlstream;
kbase_device_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_new_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2281,7 +2291,7 @@ struct kbase_tlstream;
kernel_ctx_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_del_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2310,7 +2320,7 @@ struct kbase_tlstream;
kbase_device_as_index \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_ctx_assign_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2339,7 +2349,7 @@ struct kbase_tlstream;
kernel_ctx_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_ctx_unassign_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2372,7 +2382,7 @@ struct kbase_tlstream;
kcpuq_num_pending_cmds \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_new_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2405,7 +2415,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_del_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2434,7 +2444,7 @@ struct kbase_tlstream;
fence \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2465,7 +2475,7 @@ struct kbase_tlstream;
fence \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2500,7 +2510,7 @@ struct kbase_tlstream;
inherit_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2535,7 +2545,7 @@ struct kbase_tlstream;
cqs_obj_gpu_addr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2574,7 +2584,7 @@ struct kbase_tlstream;
inherit_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait_operation( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2619,7 +2629,7 @@ struct kbase_tlstream;
data_type \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set_operation( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2656,7 +2666,7 @@ struct kbase_tlstream;
map_import_buf_gpu_addr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2687,7 +2697,7 @@ struct kbase_tlstream;
map_import_buf_gpu_addr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2718,7 +2728,7 @@ struct kbase_tlstream;
map_import_buf_gpu_addr \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2747,7 +2757,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2792,7 +2802,7 @@ struct kbase_tlstream;
jit_alloc_usage_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2837,7 +2847,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2864,7 +2874,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2893,7 +2903,7 @@ struct kbase_tlstream;
jit_alloc_jit_id \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2922,7 +2932,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2949,7 +2959,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -2980,7 +2990,7 @@ struct kbase_tlstream;
gpu_cmdq_grp_handle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3011,7 +3021,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3040,7 +3050,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3069,7 +3079,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3098,7 +3108,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3127,7 +3137,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3156,7 +3166,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3187,7 +3197,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3216,7 +3226,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3245,7 +3255,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3276,7 +3286,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set_operation( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3305,7 +3315,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3334,7 +3344,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3363,7 +3373,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3392,7 +3402,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3421,7 +3431,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3450,7 +3460,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3479,7 +3489,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3506,7 +3516,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3539,7 +3549,7 @@ struct kbase_tlstream;
jit_alloc_mmu_flags \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3572,7 +3582,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3599,7 +3609,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3626,7 +3636,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3657,7 +3667,7 @@ struct kbase_tlstream;
jit_free_pages_used \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3688,7 +3698,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3715,7 +3725,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3742,7 +3752,7 @@ struct kbase_tlstream;
kcpu_queue \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3771,7 +3781,7 @@ struct kbase_tlstream;
execute_error \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3800,7 +3810,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_reloading( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3827,7 +3837,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_enabling( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3854,7 +3864,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_request_sleep( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3881,7 +3891,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_request_wakeup( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3908,7 +3918,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_request_halt( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3935,7 +3945,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_disabling( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3962,7 +3972,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_off( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -3991,7 +4001,7 @@ struct kbase_tlstream;
csffw_cycle \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_tlstream_overflow( \
__TL_DISPATCH_STREAM(kbdev, obj), \
@@ -4021,7 +4031,7 @@ struct kbase_tlstream;
core_state_bitset \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pm_state( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4045,7 +4055,7 @@ struct kbase_tlstream;
page_cnt_change \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagefault( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4068,7 +4078,7 @@ struct kbase_tlstream;
page_cnt \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagesalloc( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4088,7 +4098,7 @@ struct kbase_tlstream;
target_freq \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_devfreq_target( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4117,7 +4127,7 @@ struct kbase_tlstream;
ph_pages \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_jit_stats( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4157,7 +4167,7 @@ struct kbase_tlstream;
nr_in_flight \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_tiler_heap_stats( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4190,7 +4200,7 @@ struct kbase_tlstream;
event \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_event_job_slot( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4212,7 +4222,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_protected_enter_start( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4231,7 +4241,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_protected_enter_end( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4258,7 +4268,7 @@ struct kbase_tlstream;
mmu_lock_page_num \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_mmu_command( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4281,7 +4291,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_aux_protected_leave_start( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4300,7 +4310,7 @@ struct kbase_tlstream;
gpu \
) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_aux_protected_leave_end( \
__TL_DISPATCH_STREAM(kbdev, aux), \
@@ -4322,7 +4332,7 @@ struct kbase_tlstream;
#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \
context, slot_nr, atom_nr, event) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
kbase_trace_mali_job_slots_event(kbdev->id, \
GATOR_MAKE_EVENT(event, slot_nr), \
context, (u8) atom_nr); \
@@ -4335,7 +4345,7 @@ struct kbase_tlstream;
#undef KBASE_TLSTREAM_AUX_PM_STATE
#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
kbase_trace_mali_pm_status(kbdev->id, \
core_type, state); \
if (enabled & TLSTREAM_ENABLED) \
@@ -4348,9 +4358,9 @@ struct kbase_tlstream;
#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \
ctx_nr, as_nr, page_cnt_change) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
kbase_trace_mali_page_fault_insert_pages(kbdev->id, \
- as_nr, \
+ (int)as_nr, \
page_cnt_change); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagefault( \
@@ -4365,9 +4375,9 @@ struct kbase_tlstream;
#undef KBASE_TLSTREAM_AUX_PAGESALLOC
#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \
do { \
- int enabled = atomic_read(&kbdev->timeline_flags); \
+ u32 enabled = (u32)atomic_read(&kbdev->timeline_flags); \
u32 global_pages_count = \
- atomic_read(&kbdev->memdev.used_pages); \
+ (u32)atomic_read(&kbdev->memdev.used_pages); \
\
kbase_trace_mali_total_alloc_pages_change(kbdev->id, \
global_pages_count); \