summaryrefslogtreecommitdiff
path: root/mali_kbase/tl
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2020-02-27 15:37:17 +0100
committerSidath Senanayake <sidaths@google.com>2020-02-27 15:37:17 +0100
commitb2b1764ee0fe59773c1c8f621ad2955c35cd9d92 (patch)
tree3bd90a5ac0b8bee1c0f877c3e6f219ce52b89050 /mali_kbase/tl
parent7ed9a0b8efa3abac35833b55f8012a2a85cc67a1 (diff)
downloadgpu-b2b1764ee0fe59773c1c8f621ad2955c35cd9d92.tar.gz
Mali Valhall DDK r23p0 KMD
Provenance: 941021020 (collaborate/EAC/v_r23p0) VX504X08X-BU-00000-r23p0-01rel0 - Android DDK VX504X08X-BU-60000-r23p0-01rel0 - Android Document Bundle Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: I0d87e7b3520751fd6f310e10e23498a468a7a68c
Diffstat (limited to 'mali_kbase/tl')
-rw-r--r--mali_kbase/tl/backend/mali_kbase_timeline_jm.c97
-rw-r--r--mali_kbase/tl/mali_kbase_timeline.c272
-rw-r--r--mali_kbase/tl/mali_kbase_timeline.h121
-rw-r--r--mali_kbase/tl/mali_kbase_timeline_io.c314
-rw-r--r--mali_kbase/tl/mali_kbase_timeline_priv.h65
-rw-r--r--mali_kbase/tl/mali_kbase_tl_serialize.h127
-rw-r--r--mali_kbase/tl/mali_kbase_tlstream.c287
-rw-r--r--mali_kbase/tl/mali_kbase_tlstream.h167
-rw-r--r--mali_kbase/tl/mali_kbase_trace_defs.h261
-rw-r--r--mali_kbase/tl/mali_kbase_tracepoints.c2991
-rw-r--r--mali_kbase/tl/mali_kbase_tracepoints.h2542
11 files changed, 7244 insertions, 0 deletions
diff --git a/mali_kbase/tl/backend/mali_kbase_timeline_jm.c b/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
new file mode 100644
index 0000000..c368ac7
--- /dev/null
+++ b/mali_kbase/tl/backend/mali_kbase_timeline_jm.c
@@ -0,0 +1,97 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "../mali_kbase_tracepoints.h"
+#include "../mali_kbase_timeline.h"
+#include "../mali_kbase_timeline_priv.h"
+
+#include <mali_kbase.h>
+
+void kbase_create_timeline_objects(struct kbase_device *kbdev)
+{
+ unsigned int lpu_id;
+ unsigned int as_nr;
+ struct kbase_context *kctx;
+ struct kbase_timeline *timeline = kbdev->timeline;
+ struct kbase_tlstream *summary =
+ &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY];
+
+ /* Summarize the LPU objects. */
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ u32 *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ __kbase_tlstream_tl_new_lpu(summary, lpu, lpu_id, *lpu);
+ }
+
+ /* Summarize the Address Space objects. */
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ __kbase_tlstream_tl_new_as(summary, &kbdev->as[as_nr], as_nr);
+
+ /* Create GPU object and make it retain all LPUs and address spaces. */
+ __kbase_tlstream_tl_new_gpu(summary,
+ kbdev,
+ kbdev->gpu_props.props.raw_props.gpu_id,
+ kbdev->gpu_props.num_cores);
+
+ for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+ void *lpu =
+ &kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+ __kbase_tlstream_tl_lifelink_lpu_gpu(summary, lpu, kbdev);
+ }
+
+ for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+ __kbase_tlstream_tl_lifelink_as_gpu(summary,
+ &kbdev->as[as_nr],
+ kbdev);
+
+ /* Lock the context list, to ensure no changes to the list are made
+ * while we're summarizing the contexts and their contents.
+ */
+ mutex_lock(&kbdev->kctx_list_lock);
+
+ /* For each context in the device... */
+ list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link) {
+ /* Summarize the context itself */
+ __kbase_tlstream_tl_new_ctx(summary,
+ kctx,
+ kctx->id,
+ (u32)(kctx->tgid));
+ };
+
+ /* Reset body stream buffers while holding the kctx lock.
+ * This ensures we can't fire both summary and normal tracepoints for
+ * the same objects.
+ * If we weren't holding the lock, it's possible that the summarized
+ * objects could have been created, destroyed, or used after we
+ * constructed the summary stream tracepoints, but before we reset
+ * the body stream, resulting in losing those object event tracepoints.
+ */
+ kbase_timeline_streams_body_reset(timeline);
+
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ /* Static object are placed into summary packet that needs to be
+ * transmitted first. Flush all streams to make it available to
+ * user space.
+ */
+ kbase_timeline_streams_flush(timeline);
+} \ No newline at end of file
diff --git a/mali_kbase/tl/mali_kbase_timeline.c b/mali_kbase/tl/mali_kbase_timeline.c
new file mode 100644
index 0000000..201b30e
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_timeline.c
@@ -0,0 +1,272 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_timeline.h"
+#include "mali_kbase_timeline_priv.h"
+#include "mali_kbase_tracepoints.h"
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+
+/* The period of autoflush checker execution in milliseconds. */
+#define AUTOFLUSH_INTERVAL 1000 /* ms */
+
+/*****************************************************************************/
+
+/* These values are used in mali_kbase_tracepoints.h
+ * to retrieve the streams from a kbase_timeline instance.
+ */
+const size_t __obj_stream_offset =
+ offsetof(struct kbase_timeline, streams)
+ + sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_OBJ;
+
+const size_t __aux_stream_offset =
+ offsetof(struct kbase_timeline, streams)
+ + sizeof(struct kbase_tlstream) * TL_STREAM_TYPE_AUX;
+
+/**
+ * kbasep_timeline_autoflush_timer_callback - autoflush timer callback
+ * @timer: Timer list
+ *
+ * Timer is executed periodically to check if any of the stream contains
+ * buffer ready to be submitted to user space.
+ */
+static void kbasep_timeline_autoflush_timer_callback(struct timer_list *timer)
+{
+ enum tl_stream_type stype;
+ int rcode;
+ struct kbase_timeline *timeline =
+ container_of(timer, struct kbase_timeline, autoflush_timer);
+
+ CSTD_UNUSED(timer);
+
+ for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT;
+ stype++) {
+ struct kbase_tlstream *stream = &timeline->streams[stype];
+
+ int af_cnt = atomic_read(&stream->autoflush_counter);
+
+ /* Check if stream contain unflushed data. */
+ if (af_cnt < 0)
+ continue;
+
+ /* Check if stream should be flushed now. */
+ if (af_cnt != atomic_cmpxchg(
+ &stream->autoflush_counter,
+ af_cnt,
+ af_cnt + 1))
+ continue;
+ if (!af_cnt)
+ continue;
+
+ /* Autoflush this stream. */
+ kbase_tlstream_flush_stream(stream);
+ }
+
+ if (atomic_read(&timeline->autoflush_timer_active))
+ rcode = mod_timer(
+ &timeline->autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+}
+
+
+
+/*****************************************************************************/
+
+int kbase_timeline_init(struct kbase_timeline **timeline,
+ atomic_t *timeline_is_enabled)
+{
+ enum tl_stream_type i;
+ struct kbase_timeline *result;
+
+ if (!timeline || !timeline_is_enabled)
+ return -EINVAL;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ mutex_init(&result->reader_lock);
+ init_waitqueue_head(&result->event_queue);
+
+ /* Prepare stream structures. */
+ for (i = 0; i < TL_STREAM_TYPE_COUNT; i++)
+ kbase_tlstream_init(&result->streams[i], i,
+ &result->event_queue);
+
+ /* Initialize autoflush timer. */
+ atomic_set(&result->autoflush_timer_active, 0);
+ kbase_timer_setup(&result->autoflush_timer,
+ kbasep_timeline_autoflush_timer_callback);
+ result->is_enabled = timeline_is_enabled;
+
+ *timeline = result;
+ return 0;
+}
+
+void kbase_timeline_term(struct kbase_timeline *timeline)
+{
+ enum tl_stream_type i;
+
+ if (!timeline)
+ return;
+
+ for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; i++)
+ kbase_tlstream_term(&timeline->streams[i]);
+
+ kfree(timeline);
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+static void kbase_tlstream_current_devfreq_target(struct kbase_device *kbdev)
+{
+ struct devfreq *devfreq = kbdev->devfreq;
+
+ /* Devfreq initialization failure isn't a fatal error, so devfreq might
+ * be null.
+ */
+ if (devfreq) {
+ unsigned long cur_freq = 0;
+
+ mutex_lock(&devfreq->lock);
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+ cur_freq = kbdev->current_nominal_freq;
+#else
+ cur_freq = devfreq->last_status.current_frequency;
+#endif
+ KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(kbdev, (u64)cur_freq);
+ mutex_unlock(&devfreq->lock);
+ }
+}
+#endif /* CONFIG_MALI_DEVFREQ */
+
+int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags)
+{
+ int ret;
+ u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
+ struct kbase_timeline *timeline = kbdev->timeline;
+
+ if (!atomic_cmpxchg(timeline->is_enabled, 0, tlstream_enabled)) {
+ int rcode;
+
+ ret = anon_inode_getfd(
+ "[mali_tlstream]",
+ &kbasep_tlstream_fops,
+ timeline,
+ O_RDONLY | O_CLOEXEC);
+ if (ret < 0) {
+ atomic_set(timeline->is_enabled, 0);
+ return ret;
+ }
+
+ /* Reset and initialize header streams. */
+ kbase_tlstream_reset(
+ &timeline->streams[TL_STREAM_TYPE_OBJ_SUMMARY]);
+
+ timeline->obj_header_btc = obj_desc_header_size;
+ timeline->aux_header_btc = aux_desc_header_size;
+
+ /* Start autoflush timer. */
+ atomic_set(&timeline->autoflush_timer_active, 1);
+ rcode = mod_timer(
+ &timeline->autoflush_timer,
+ jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+ CSTD_UNUSED(rcode);
+
+ /* If job dumping is enabled, readjust the software event's
+ * timeout as the default value of 3 seconds is often
+ * insufficient.
+ */
+ if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) {
+ dev_info(kbdev->dev,
+ "Job dumping is enabled, readjusting the software event's timeout\n");
+ atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+ 1800000);
+ }
+
+ /* Summary stream was cleared during acquire.
+ * Create static timeline objects that will be
+ * read by client.
+ */
+ kbase_create_timeline_objects(kbdev);
+
+#ifdef CONFIG_MALI_DEVFREQ
+ /* Devfreq target tracepoints are only fired when the target
+ * changes, so we won't know the current target unless we
+ * send it now.
+ */
+ kbase_tlstream_current_devfreq_target(kbdev);
+#endif /* CONFIG_MALI_DEVFREQ */
+
+ } else {
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+void kbase_timeline_streams_flush(struct kbase_timeline *timeline)
+{
+ enum tl_stream_type stype;
+
+ for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+ kbase_tlstream_flush_stream(&timeline->streams[stype]);
+}
+
+void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline)
+{
+ kbase_tlstream_reset(
+ &timeline->streams[TL_STREAM_TYPE_OBJ]);
+ kbase_tlstream_reset(
+ &timeline->streams[TL_STREAM_TYPE_AUX]);
+}
+
+#if MALI_UNIT_TEST
+void kbase_timeline_stats(struct kbase_timeline *timeline,
+ u32 *bytes_collected, u32 *bytes_generated)
+{
+ enum tl_stream_type stype;
+
+ KBASE_DEBUG_ASSERT(bytes_collected);
+
+ /* Accumulate bytes generated per stream */
+ *bytes_generated = 0;
+ for (stype = (enum tl_stream_type)0; stype < TL_STREAM_TYPE_COUNT;
+ stype++)
+ *bytes_generated += atomic_read(
+ &timeline->streams[stype].bytes_generated);
+
+ *bytes_collected = atomic_read(&timeline->bytes_collected);
+}
+#endif /* MALI_UNIT_TEST */
diff --git a/mali_kbase/tl/mali_kbase_timeline.h b/mali_kbase/tl/mali_kbase_timeline.h
new file mode 100644
index 0000000..d800288
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_timeline.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TIMELINE_H)
+#define _KBASE_TIMELINE_H
+
+#include <mali_kbase.h>
+
+/*****************************************************************************/
+
+struct kbase_timeline;
+
+/**
+ * kbase_timeline_init - initialize timeline infrastructure in kernel
+ * @timeline: Newly created instance of kbase_timeline will
+ * be stored in this pointer.
+ * @timeline_is_enabled: Timeline status will be written to this variable
+ * when a client is attached/detached. The variable
+ * must be valid while timeline instance is valid.
+ * Return: zero on success, negative number on error
+ */
+int kbase_timeline_init(struct kbase_timeline **timeline,
+ atomic_t *timeline_is_enabled);
+
+/**
+ * kbase_timeline_term - terminate timeline infrastructure in kernel
+ *
+ * @timeline: Timeline instance to be terminated. It must be previously created
+ * with kbase_timeline_init().
+ */
+void kbase_timeline_term(struct kbase_timeline *timeline);
+
+/**
+ * kbase_timeline_io_acquire - acquire timeline stream file descriptor
+ * @kbdev: Kbase device
+ * @flags: Timeline stream flags
+ *
+ * This descriptor is meant to be used by userspace timeline to gain access to
+ * kernel timeline stream. This stream is later broadcasted by user space to the
+ * timeline client.
+ * Only one entity can own the descriptor at any given time. Descriptor shall be
+ * closed if unused. If descriptor cannot be obtained (i.e. when it is already
+ * being used) return will be a negative value.
+ *
+ * Return: file descriptor on success, negative number on error
+ */
+int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags);
+
+/**
+ * kbase_timeline_streams_flush - flush timeline streams.
+ * @timeline: Timeline instance
+ *
+ * Function will flush pending data in all timeline streams.
+ */
+void kbase_timeline_streams_flush(struct kbase_timeline *timeline);
+
+/**
+ * kbase_timeline_streams_body_reset - reset timeline body streams.
+ *
+ * Function will discard pending data in all timeline body streams.
+ * @timeline: Timeline instance
+ */
+void kbase_timeline_streams_body_reset(struct kbase_timeline *timeline);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_timeline_test - start timeline stream data generator
+ * @kbdev: Kernel common context
+ * @tpw_count: Number of trace point writers in each context
+ * @msg_delay: Time delay in milliseconds between trace points written by one
+ * writer
+ * @msg_count: Number of trace points written by one writer
+ * @aux_msg: If non-zero aux messages will be included
+ *
+ * This test starts a requested number of asynchronous writers in both IRQ and
+ * thread context. Each writer will generate required number of test
+ * tracepoints (tracepoints with embedded information about writer that
+ * should be verified by user space reader). Tracepoints will be emitted in
+ * all timeline body streams. If aux_msg is non-zero writer will also
+ * generate not testable tracepoints (tracepoints without information about
+ * writer). These tracepoints are used to check correctness of remaining
+ * timeline message generating functions. Writer will wait requested time
+ * between generating another set of messages. This call blocks until all
+ * writers finish.
+ */
+void kbase_timeline_test(
+ struct kbase_device *kbdev,
+ unsigned int tpw_count,
+ unsigned int msg_delay,
+ unsigned int msg_count,
+ int aux_msg);
+
+/**
+ * kbase_timeline_stats - read timeline stream statistics
+ * @timeline: Timeline instance
+ * @bytes_collected: Will hold number of bytes read by the user
+ * @bytes_generated: Will hold number of bytes generated by trace points
+ */
+void kbase_timeline_stats(struct kbase_timeline *timeline, u32 *bytes_collected, u32 *bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* _KBASE_TIMELINE_H */
diff --git a/mali_kbase/tl/mali_kbase_timeline_io.c b/mali_kbase/tl/mali_kbase_timeline_io.c
new file mode 100644
index 0000000..9a899f2
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_timeline_io.c
@@ -0,0 +1,314 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_timeline_priv.h"
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tracepoints.h"
+
+#include <linux/poll.h>
+
+/* The timeline stream file operations functions. */
+static ssize_t kbasep_timeline_io_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos);
+static unsigned int kbasep_timeline_io_poll(struct file *filp, poll_table *wait);
+static int kbasep_timeline_io_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+const struct file_operations kbasep_tlstream_fops = {
+ .owner = THIS_MODULE,
+ .release = kbasep_timeline_io_release,
+ .read = kbasep_timeline_io_read,
+ .poll = kbasep_timeline_io_poll,
+};
+
+/**
+ * kbasep_timeline_io_packet_pending - check timeline streams for pending packets
+ * @timeline: Timeline instance
+ * @ready_stream: Pointer to variable where stream will be placed
+ * @rb_idx_raw: Pointer to variable where read buffer index will be placed
+ *
+ * Function checks all streams for pending packets. It will stop as soon as
+ * packet ready to be submitted to user space is detected. Variables under
+ * pointers, passed as the parameters to this function will be updated with
+ * values pointing to right stream and buffer.
+ *
+ * Return: non-zero if any of timeline streams has at last one packet ready
+ */
+static int kbasep_timeline_io_packet_pending(
+ struct kbase_timeline *timeline,
+ struct kbase_tlstream **ready_stream,
+ unsigned int *rb_idx_raw)
+{
+ enum tl_stream_type i;
+
+ KBASE_DEBUG_ASSERT(ready_stream);
+ KBASE_DEBUG_ASSERT(rb_idx_raw);
+
+ for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; ++i) {
+ struct kbase_tlstream *stream = &timeline->streams[i];
+ *rb_idx_raw = atomic_read(&stream->rbi);
+ /* Read buffer index may be updated by writer in case of
+ * overflow. Read and write buffer indexes must be
+ * loaded in correct order.
+ */
+ smp_rmb();
+ if (atomic_read(&stream->wbi) != *rb_idx_raw) {
+ *ready_stream = stream;
+ return 1;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * kbasep_timeline_copy_header - copy timeline headers to the user
+ * @timeline: Timeline instance
+ * @buffer: Pointer to the buffer provided by user
+ * @size: Maximum amount of data that can be stored in the buffer
+ * @copy_len: Pointer to amount of bytes that has been copied already
+ * within the read system call.
+ *
+ * This helper function checks if timeline headers have not been sent
+ * to the user, and if so, sends them. @ref copy_len is respectively
+ * updated.
+ *
+ * Returns: 0 if success, -1 if copy_to_user has failed.
+ */
+static inline int kbasep_timeline_copy_header(
+ struct kbase_timeline *timeline,
+ char __user *buffer,
+ size_t size,
+ ssize_t *copy_len)
+{
+ if (timeline->obj_header_btc) {
+ size_t offset = obj_desc_header_size -
+ timeline->obj_header_btc;
+
+ size_t header_cp_size = MIN(
+ size - *copy_len,
+ timeline->obj_header_btc);
+
+ if (copy_to_user(
+ &buffer[*copy_len],
+ &obj_desc_header[offset],
+ header_cp_size))
+ return -1;
+
+ timeline->obj_header_btc -= header_cp_size;
+ *copy_len += header_cp_size;
+ }
+
+ if (timeline->aux_header_btc) {
+ size_t offset = aux_desc_header_size -
+ timeline->aux_header_btc;
+ size_t header_cp_size = MIN(
+ size - *copy_len,
+ timeline->aux_header_btc);
+
+ if (copy_to_user(
+ &buffer[*copy_len],
+ &aux_desc_header[offset],
+ header_cp_size))
+ return -1;
+
+ timeline->aux_header_btc -= header_cp_size;
+ *copy_len += header_cp_size;
+ }
+ return 0;
+}
+
+
+/**
+ * kbasep_timeline_io_read - copy data from streams to buffer provided by user
+ * @filp: Pointer to file structure
+ * @buffer: Pointer to the buffer provided by user
+ * @size: Maximum amount of data that can be stored in the buffer
+ * @f_pos: Pointer to file offset (unused)
+ *
+ * Return: number of bytes stored in the buffer
+ */
+static ssize_t kbasep_timeline_io_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos)
+{
+ ssize_t copy_len = 0;
+ struct kbase_timeline *timeline;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(f_pos);
+
+ if (WARN_ON(!filp->private_data))
+ return -EFAULT;
+
+ timeline = (struct kbase_timeline *) filp->private_data;
+
+ if (!buffer)
+ return -EINVAL;
+
+ if ((*f_pos < 0) || (size < PACKET_SIZE))
+ return -EINVAL;
+
+ mutex_lock(&timeline->reader_lock);
+
+ while (copy_len < size) {
+ struct kbase_tlstream *stream = NULL;
+ unsigned int rb_idx_raw = 0;
+ unsigned int wb_idx_raw;
+ unsigned int rb_idx;
+ size_t rb_size;
+
+ if (kbasep_timeline_copy_header(
+ timeline, buffer, size, &copy_len)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* If we already read some packets and there is no
+ * packet pending then return back to user.
+ * If we don't have any data yet, wait for packet to be
+ * submitted.
+ */
+ if (copy_len > 0) {
+ if (!kbasep_timeline_io_packet_pending(
+ timeline,
+ &stream,
+ &rb_idx_raw))
+ break;
+ } else {
+ if (wait_event_interruptible(
+ timeline->event_queue,
+ kbasep_timeline_io_packet_pending(
+ timeline,
+ &stream,
+ &rb_idx_raw))) {
+ copy_len = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ if (WARN_ON(!stream)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* Check if this packet fits into the user buffer.
+ * If so copy its content.
+ */
+ rb_idx = rb_idx_raw % PACKET_COUNT;
+ rb_size = atomic_read(&stream->buffer[rb_idx].size);
+ if (rb_size > size - copy_len)
+ break;
+ if (copy_to_user(
+ &buffer[copy_len],
+ stream->buffer[rb_idx].data,
+ rb_size)) {
+ copy_len = -EFAULT;
+ break;
+ }
+
+ /* If the distance between read buffer index and write
+ * buffer index became more than PACKET_COUNT, then overflow
+ * happened and we need to ignore the last portion of bytes
+ * that we have just sent to user.
+ */
+ smp_rmb();
+ wb_idx_raw = atomic_read(&stream->wbi);
+
+ if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
+ copy_len += rb_size;
+ atomic_inc(&stream->rbi);
+#if MALI_UNIT_TEST
+ atomic_add(rb_size, &timeline->bytes_collected);
+#endif /* MALI_UNIT_TEST */
+
+ } else {
+ const unsigned int new_rb_idx_raw =
+ wb_idx_raw - PACKET_COUNT + 1;
+ /* Adjust read buffer index to the next valid buffer */
+ atomic_set(&stream->rbi, new_rb_idx_raw);
+ }
+ }
+
+ mutex_unlock(&timeline->reader_lock);
+
+ return copy_len;
+}
+
+/**
+ * kbasep_timeline_io_poll - poll timeline stream for packets
+ * @filp: Pointer to file structure
+ * @wait: Pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_timeline_io_poll(struct file *filp, poll_table *wait)
+{
+ struct kbase_tlstream *stream;
+ unsigned int rb_idx;
+ struct kbase_timeline *timeline;
+
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(wait);
+
+ if (WARN_ON(!filp->private_data))
+ return -EFAULT;
+
+ timeline = (struct kbase_timeline *) filp->private_data;
+
+ poll_wait(filp, &timeline->event_queue, wait);
+ if (kbasep_timeline_io_packet_pending(timeline, &stream, &rb_idx))
+ return POLLIN;
+ return 0;
+}
+
+/**
+ * kbasep_timeline_io_release - release timeline stream descriptor
+ * @inode: Pointer to inode structure
+ * @filp: Pointer to file structure
+ *
+ * Return always return zero
+ */
+static int kbasep_timeline_io_release(struct inode *inode, struct file *filp)
+{
+ struct kbase_timeline *timeline;
+
+ KBASE_DEBUG_ASSERT(inode);
+ KBASE_DEBUG_ASSERT(filp);
+ KBASE_DEBUG_ASSERT(filp->private_data);
+
+ CSTD_UNUSED(inode);
+
+ timeline = (struct kbase_timeline *) filp->private_data;
+
+ /* Stop autoflush timer before releasing access to streams. */
+ atomic_set(&timeline->autoflush_timer_active, 0);
+ del_timer_sync(&timeline->autoflush_timer);
+
+ atomic_set(timeline->is_enabled, 0);
+ return 0;
+}
diff --git a/mali_kbase/tl/mali_kbase_timeline_priv.h b/mali_kbase/tl/mali_kbase_timeline_priv.h
new file mode 100644
index 0000000..d4c4773
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_timeline_priv.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TIMELINE_PRIV_H)
+#define _KBASE_TIMELINE_PRIV_H
+
+#include <mali_kbase.h>
+#include "mali_kbase_tlstream.h"
+
+#include <linux/timer.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+
+/**
+ * struct kbase_timeline - timeline state structure
+ * @streams: The timeline streams generated by kernel
+ * @autoflush_timer: Autoflush timer
+ * @autoflush_timer_active: If non-zero autoflush timer is active
+ * @reader_lock: Reader lock. Only one reader is allowed to
+ * have access to the timeline streams at any given time.
+ * @event_queue: Timeline stream event queue
+ * @bytes_collected: Number of bytes read by user
+ * @is_enabled: Zero, if timeline is disabled. Timeline stream flags
+ * otherwise. See kbase_timeline_io_acquire().
+ * @obj_header_btc: Remaining bytes to copy for the object stream header
+ * @aux_header_btc: Remaining bytes to copy for the aux stream header
+ */
+struct kbase_timeline {
+ struct kbase_tlstream streams[TL_STREAM_TYPE_COUNT];
+ struct timer_list autoflush_timer;
+ atomic_t autoflush_timer_active;
+ struct mutex reader_lock;
+ wait_queue_head_t event_queue;
+#if MALI_UNIT_TEST
+ atomic_t bytes_collected;
+#endif /* MALI_UNIT_TEST */
+ atomic_t *is_enabled;
+ size_t obj_header_btc;
+ size_t aux_header_btc;
+};
+
+extern const struct file_operations kbasep_tlstream_fops;
+
+void kbase_create_timeline_objects(struct kbase_device *kbdev);
+
+#endif /* _KBASE_TIMELINE_PRIV_H */
diff --git a/mali_kbase/tl/mali_kbase_tl_serialize.h b/mali_kbase/tl/mali_kbase_tl_serialize.h
new file mode 100644
index 0000000..90808ce
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_tl_serialize.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TL_SERIALIZE_H)
+#define _KBASE_TL_SERIALIZE_H
+
+#include <mali_kbase.h>
+
+#include <linux/timer.h>
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC 1000000000ull /* ns */
+
+/**
+ * kbasep_serialize_bytes - serialize bytes to the message buffer
+ *
+ * Serialize bytes as is using memcpy()
+ *
+ * @buffer: Message buffer
+ * @pos: Message buffer offset
+ * @bytes: Bytes to serialize
+ * @len: Length of bytes array
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_bytes(
+ char *buffer,
+ size_t pos,
+ const void *bytes,
+ size_t len)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(bytes);
+
+ memcpy(&buffer[pos], bytes, len);
+
+ return pos + len;
+}
+
+/**
+ * kbasep_serialize_string - serialize string to the message buffer
+ *
+ * String is serialized as 4 bytes for string size,
+ * then string content and then null terminator.
+ *
+ * @buffer: Message buffer
+ * @pos: Message buffer offset
+ * @string: String to serialize
+ * @max_write_size: Number of bytes that can be stored in buffer
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_string(
+ char *buffer,
+ size_t pos,
+ const char *string,
+ size_t max_write_size)
+{
+ u32 string_len;
+
+ KBASE_DEBUG_ASSERT(buffer);
+ KBASE_DEBUG_ASSERT(string);
+ /* Timeline string consists of at least string length and nul
+ * terminator.
+ */
+ KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
+ max_write_size -= sizeof(string_len);
+
+ string_len = strlcpy(
+ &buffer[pos + sizeof(string_len)],
+ string,
+ max_write_size);
+ string_len += sizeof(char);
+
+ /* Make sure that the source string fit into the buffer. */
+ KBASE_DEBUG_ASSERT(string_len <= max_write_size);
+
+ /* Update string length. */
+ memcpy(&buffer[pos], &string_len, sizeof(string_len));
+
+ return pos + sizeof(string_len) + string_len;
+}
+
+/**
+ * kbasep_serialize_timestamp - serialize timestamp to the message buffer
+ *
+ * Get current timestamp using kbasep_get_timestamp()
+ * and serialize it as 64 bit unsigned integer.
+ *
+ * @buffer: Message buffer
+ * @pos: Message buffer offset
+ *
+ * Return: updated position in the buffer
+ */
+static inline size_t kbasep_serialize_timestamp(void *buffer, size_t pos)
+{
+ struct timespec ts;
+ u64 timestamp;
+
+ getrawmonotonic(&ts);
+ timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+
+ return kbasep_serialize_bytes(
+ buffer, pos,
+ &timestamp, sizeof(timestamp));
+}
+#endif /* _KBASE_TL_SERIALIZE_H */
+
diff --git a/mali_kbase/tl/mali_kbase_tlstream.c b/mali_kbase/tl/mali_kbase_tlstream.c
new file mode 100644
index 0000000..2a76bc0
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_tlstream.c
@@ -0,0 +1,287 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tl_serialize.h"
+#include "mali_kbase_mipe_proto.h"
+
+/**
+ * kbasep_packet_header_setup - setup the packet header
+ * @buffer: pointer to the buffer
+ * @pkt_family: packet's family
+ * @pkt_type: packet's type
+ * @pkt_class: packet's class
+ * @stream_id: stream id
+ * @numbered: non-zero if this stream is numbered
+ *
+ * Function sets up immutable part of packet header in the given buffer.
+ */
+static void kbasep_packet_header_setup(
+ char *buffer,
+ enum tl_packet_family pkt_family,
+ enum tl_packet_class pkt_class,
+ enum tl_packet_type pkt_type,
+ unsigned int stream_id,
+ int numbered)
+{
+ u32 words[2] = {
+ MIPE_PACKET_HEADER_W0(pkt_family, pkt_class, pkt_type, stream_id),
+ MIPE_PACKET_HEADER_W1(0, !!numbered),
+ };
+ memcpy(buffer, words, sizeof(words));
+}
+
+/**
+ * kbasep_packet_header_update - update the packet header
+ * @buffer: pointer to the buffer
+ * @data_size: amount of data carried in this packet
+ * @numbered: non-zero if the stream is numbered
+ *
+ * Function updates mutable part of packet header in the given buffer.
+ * Note that value of data_size must not including size of the header.
+ */
+static void kbasep_packet_header_update(
+ char *buffer,
+ size_t data_size,
+ int numbered)
+{
+ u32 word0;
+ u32 word1 = MIPE_PACKET_HEADER_W1((u32)data_size, !!numbered);
+
+ KBASE_DEBUG_ASSERT(buffer);
+ CSTD_UNUSED(word0);
+
+ memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_packet_number_update - update the packet number
+ * @buffer: pointer to the buffer
+ * @counter: value of packet counter for this packet's stream
+ *
+ * Function updates packet number embedded within the packet placed in the
+ * given buffer.
+ */
+static void kbasep_packet_number_update(char *buffer, u32 counter)
+{
+ KBASE_DEBUG_ASSERT(buffer);
+
+ memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter));
+}
+
+void kbase_tlstream_reset(struct kbase_tlstream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < PACKET_COUNT; i++) {
+ if (stream->numbered)
+ atomic_set(
+ &stream->buffer[i].size,
+ PACKET_HEADER_SIZE +
+ PACKET_NUMBER_SIZE);
+ else
+ atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
+ }
+
+ atomic_set(&stream->wbi, 0);
+ atomic_set(&stream->rbi, 0);
+}
+
+/* Configuration of timeline streams generated by kernel.
+ * Kernel emit only streams containing either timeline object events or
+ * auxiliary events. All streams have stream id value of 1 (as opposed to user
+ * space streams that have value of 0).
+ */
+static const struct {
+ enum tl_packet_family pkt_family;
+ enum tl_packet_class pkt_class;
+ enum tl_packet_type pkt_type;
+ unsigned int stream_id;
+} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY, 1},
+ {TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY, 1}
+};
+
+void kbase_tlstream_init(
+ struct kbase_tlstream *stream,
+ enum tl_stream_type stream_type,
+ wait_queue_head_t *ready_read)
+{
+ unsigned int i;
+
+ KBASE_DEBUG_ASSERT(stream);
+ KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+ spin_lock_init(&stream->lock);
+
+ /* All packets carrying tracepoints shall be numbered. */
+ if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type)
+ stream->numbered = 1;
+ else
+ stream->numbered = 0;
+
+ for (i = 0; i < PACKET_COUNT; i++)
+ kbasep_packet_header_setup(
+ stream->buffer[i].data,
+ tl_stream_cfg[stream_type].pkt_family,
+ tl_stream_cfg[stream_type].pkt_class,
+ tl_stream_cfg[stream_type].pkt_type,
+ tl_stream_cfg[stream_type].stream_id,
+ stream->numbered);
+
+#if MALI_UNIT_TEST
+ atomic_set(&stream->bytes_generated, 0);
+#endif
+ stream->ready_read = ready_read;
+
+ kbase_tlstream_reset(stream);
+}
+
+void kbase_tlstream_term(struct kbase_tlstream *stream)
+{
+ KBASE_DEBUG_ASSERT(stream);
+}
+
+/**
+ * kbase_tlstream_msgbuf_submit - submit packet to user space
+ * @stream: Pointer to the stream structure
+ * @wb_idx_raw: Write buffer index
+ * @wb_size: Length of data stored in the current buffer
+ *
+ * Updates currently written buffer with the packet header.
+ * Then write index is incremented and the buffer is handed to user space.
+ * Parameters of the new buffer are returned using provided arguments.
+ *
+ * Return: length of data in the new buffer
+ *
+ * Warning: the user must update the stream structure with returned value.
+ */
+static size_t kbasep_tlstream_msgbuf_submit(
+ struct kbase_tlstream *stream,
+ unsigned int wb_idx_raw,
+ unsigned int wb_size)
+{
+ unsigned int wb_idx = wb_idx_raw % PACKET_COUNT;
+
+ /* Set stream as flushed. */
+ atomic_set(&stream->autoflush_counter, -1);
+
+ kbasep_packet_header_update(
+ stream->buffer[wb_idx].data,
+ wb_size - PACKET_HEADER_SIZE,
+ stream->numbered);
+
+ if (stream->numbered)
+ kbasep_packet_number_update(
+ stream->buffer[wb_idx].data,
+ wb_idx_raw);
+
+ /* Increasing write buffer index will expose this packet to the reader.
+ * As stream->lock is not taken on reader side we must make sure memory
+ * is updated correctly before this will happen. */
+ smp_wmb();
+ atomic_inc(&stream->wbi);
+
+ /* Inform user that packets are ready for reading. */
+ wake_up_interruptible(stream->ready_read);
+
+ wb_size = PACKET_HEADER_SIZE;
+ if (stream->numbered)
+ wb_size += PACKET_NUMBER_SIZE;
+
+ return wb_size;
+}
+
+char *kbase_tlstream_msgbuf_acquire(
+ struct kbase_tlstream *stream,
+ size_t msg_size,
+ unsigned long *flags) __acquires(&stream->lock)
+{
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+
+ KBASE_DEBUG_ASSERT(
+ PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
+ msg_size);
+
+ spin_lock_irqsave(&stream->lock, *flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ /* Select next buffer if data will not fit into current one. */
+ if (PACKET_SIZE < wb_size + msg_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ }
+
+ /* Reserve space in selected buffer. */
+ atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size);
+
+#if MALI_UNIT_TEST
+ atomic_add(msg_size, &stream->bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+ return &stream->buffer[wb_idx].data[wb_size];
+}
+
+void kbase_tlstream_msgbuf_release(
+ struct kbase_tlstream *stream,
+ unsigned long flags) __releases(&stream->lock)
+{
+ /* Mark stream as containing unflushed data. */
+ atomic_set(&stream->autoflush_counter, 0);
+
+ spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+void kbase_tlstream_flush_stream(
+ struct kbase_tlstream *stream)
+{
+ unsigned long flags;
+ unsigned int wb_idx_raw;
+ unsigned int wb_idx;
+ size_t wb_size;
+ size_t min_size = PACKET_HEADER_SIZE;
+
+ if (stream->numbered)
+ min_size += PACKET_NUMBER_SIZE;
+
+ spin_lock_irqsave(&stream->lock, flags);
+
+ wb_idx_raw = atomic_read(&stream->wbi);
+ wb_idx = wb_idx_raw % PACKET_COUNT;
+ wb_size = atomic_read(&stream->buffer[wb_idx].size);
+
+ if (wb_size > min_size) {
+ wb_size = kbasep_tlstream_msgbuf_submit(
+ stream, wb_idx_raw, wb_size);
+ wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+ atomic_set(&stream->buffer[wb_idx].size, wb_size);
+ }
+ spin_unlock_irqrestore(&stream->lock, flags);
+}
+
diff --git a/mali_kbase/tl/mali_kbase_tlstream.h b/mali_kbase/tl/mali_kbase_tlstream.h
new file mode 100644
index 0000000..5797738
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_tlstream.h
@@ -0,0 +1,167 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TLSTREAM_H)
+#define _KBASE_TLSTREAM_H
+
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+/* The maximum size of a single packet used by timeline. */
+#define PACKET_SIZE 4096 /* bytes */
+
+/* The number of packets used by one timeline stream. */
+#if defined(CONFIG_MALI_JOB_DUMP) || defined(CONFIG_MALI_VECTOR_DUMP)
+ #define PACKET_COUNT 64
+#else
+ #define PACKET_COUNT 32
+#endif
+
+/* The maximum expected length of string in tracepoint descriptor. */
+#define STRLEN_MAX 64 /* bytes */
+
+/**
+ * struct kbase_tlstream - timeline stream structure
+ * @lock: Message order lock
+ * @buffer: Array of buffers
+ * @wbi: Write buffer index
+ * @rbi: Read buffer index
+ * @numbered: If non-zero stream's packets are sequentially numbered
+ * @autoflush_counter: Counter tracking stream's autoflush state
+ * @ready_read: Pointer to a wait queue, which is signaled when
+ * timeline messages are ready for collection.
+ * @bytes_generated: Number of bytes generated by tracepoint messages
+ *
+ * This structure holds information needed to construct proper packets in the
+ * timeline stream.
+ *
+ * Each message in the sequence must bear a timestamp that is
+ * greater than the previous message in the same stream. For this reason
+ * a lock is held throughout the process of message creation.
+ *
+ * Each stream contains a set of buffers. Each buffer will hold one MIPE
+ * packet. In case there is no free space required to store the incoming
+ * message the oldest buffer is discarded. Each packet in timeline body
+ * stream has a sequence number embedded, this value must increment
+ * monotonically and is used by the packets receiver to discover these
+ * buffer overflows.
+ *
+ * The autoflush counter is set to a negative number when there is no data
+ * pending for flush and it is set to zero on every update of the buffer. The
+ * autoflush timer will increment the counter by one on every expiry. If there
+ * is no activity on the buffer for two consecutive timer expiries, the stream
+ * buffer will be flushed.
+ */
+struct kbase_tlstream {
+ spinlock_t lock;
+
+ struct {
+ atomic_t size; /* number of bytes in buffer */
+ char data[PACKET_SIZE]; /* buffer's data */
+ } buffer[PACKET_COUNT];
+
+ atomic_t wbi;
+ atomic_t rbi;
+
+ int numbered;
+ atomic_t autoflush_counter;
+ wait_queue_head_t *ready_read;
+#if MALI_UNIT_TEST
+ atomic_t bytes_generated;
+#endif
+};
+
+/* Types of streams generated by timeline. */
+enum tl_stream_type {
+ TL_STREAM_TYPE_FIRST,
+ TL_STREAM_TYPE_OBJ_SUMMARY = TL_STREAM_TYPE_FIRST,
+ TL_STREAM_TYPE_OBJ,
+ TL_STREAM_TYPE_AUX,
+
+ TL_STREAM_TYPE_COUNT
+};
+
+/**
+ * kbase_tlstream_init - initialize timeline stream
+ * @stream: Pointer to the stream structure
+ * @stream_type: Stream type
+ * @ready_read: Pointer to a wait queue to signal when
+ * timeline messages are ready for collection.
+ */
+void kbase_tlstream_init(struct kbase_tlstream *stream,
+ enum tl_stream_type stream_type,
+ wait_queue_head_t *ready_read);
+
+/**
+ * kbase_tlstream_term - terminate timeline stream
+ * @stream: Pointer to the stream structure
+ */
+void kbase_tlstream_term(struct kbase_tlstream *stream);
+
+/**
+ * kbase_tlstream_reset - reset stream
+ * @stream: Pointer to the stream structure
+ *
+ * Function discards all pending messages and resets packet counters.
+ */
+void kbase_tlstream_reset(struct kbase_tlstream *stream);
+
+/**
+ * kbase_tlstream_msgbuf_acquire - lock selected stream and reserve a buffer
+ * @stream: Pointer to the stream structure
+ * @msg_size: Message size
+ * @flags: Pointer to store flags passed back on stream release
+ *
+ * Lock the stream and reserve the number of bytes requested
+ * in msg_size for the user.
+ *
+ * Return: pointer to the buffer where a message can be stored
+ *
+ * Warning: The stream must be released with kbase_tlstream_msgbuf_release().
+ * Only atomic operations are allowed while the stream is locked
+ * (i.e. do not use any operation that may sleep).
+ */
+char *kbase_tlstream_msgbuf_acquire(struct kbase_tlstream *stream,
+ size_t msg_size, unsigned long *flags) __acquires(&stream->lock);
+
+/**
+ * kbase_tlstream_msgbuf_release - unlock selected stream
+ * @stream: Pointer to the stream structure
+ * @flags: Value obtained during stream acquire
+ *
+ * Release the stream that has been previously
+ * locked with a call to kbase_tlstream_msgbuf_acquire().
+ */
+void kbase_tlstream_msgbuf_release(struct kbase_tlstream *stream,
+ unsigned long flags) __releases(&stream->lock);
+
+/**
+ * kbase_tlstream_flush_stream - flush stream
+ * @stream: Pointer to the stream structure
+ *
+ * Flush pending data in the timeline stream.
+ */
+void kbase_tlstream_flush_stream(struct kbase_tlstream *stream);
+
+#endif /* _KBASE_TLSTREAM_H */
+
diff --git a/mali_kbase/tl/mali_kbase_trace_defs.h b/mali_kbase/tl/mali_kbase_trace_defs.h
new file mode 100644
index 0000000..1ee6a59
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_trace_defs.h
@@ -0,0 +1,261 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2018-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE *****
+ * ***** DO NOT INCLUDE DIRECTLY *****
+ * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * The purpose of this header file is just to contain a list of trace code idenitifers
+ *
+ * Each identifier is wrapped in a macro, so that its string form and enum form can be created
+ *
+ * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block.
+ *
+ * This allows automatic creation of an enum and a corresponding array of strings
+ *
+ * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE.
+ * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE.
+ *
+ * e.g.:
+ * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X
+ * typedef enum
+ * {
+ * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X )
+ * #include "mali_kbase_trace_defs.h"
+ * #undef KBASE_TRACE_CODE_MAKE_CODE
+ * } kbase_trace_code;
+ *
+ * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE
+ *
+ *
+ * The use of the macro here is:
+ * - KBASE_TRACE_CODE_MAKE_CODE( X )
+ *
+ * Which produces:
+ * - For an enum, KBASE_TRACE_CODE_X
+ * - For a string, "X"
+ *
+ *
+ * For example:
+ * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to:
+ * - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum
+ * - "JM_JOB_COMPLETE" for the string
+ * - To use it to trace an event, do:
+ * - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val );
+ */
+
+#if 0 /* Dummy section to avoid breaking formatting */
+int dummy_array[] = {
+#endif
+
+/*
+ * Core events
+ */
+ /* no info_val, no gpu_addr, no atom */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
+ /* no info_val, no gpu_addr, no atom */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),
+ /* info_val == GPU_IRQ_STATUS register */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),
+ /* info_val == bits cleared */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR),
+ /* info_val == GPU_IRQ_STATUS register */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR),
+ /* GPU addr==dump address */
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),
+ KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES),
+/*
+ * Job Slot management events
+ */
+ /* info_val==irq rawstat at start */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),
+ /* info_val==jobs processed */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END),
+/* In the following:
+ *
+ * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases)
+ * - uatom==kernel-side mapped uatom address (for correlation with user-side)
+ */
+ /* info_val==exit code; gpu_addr==chain gpuaddr */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),
+ /* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT),
+ /* gpu_addr is as follows:
+ * - If JS_STATUS active after soft-stop, val==gpu addr written to
+ * JS_HEAD on submit
+ * - otherwise gpu_addr==0 */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),
+ /* gpu_addr==JS_TAIL read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),
+/* gpu_addr is as follows:
+ * - If JS_STATUS active before soft-stop, val==JS_HEAD
+ * - otherwise gpu_addr==0
+ */
+ /* gpu_addr==JS_HEAD read */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE),
+ /* info_val == is_scheduled */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),
+ /* info_val == is_scheduled */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE),
+ /* info_val == nr jobs submitted */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),
+ /* gpu_addr==JS_HEAD_NEXT last written */
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER),
+ KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER),
+/*
+ * Job dispatch events
+ */
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB),
+ /* gpu_addr==0, info_val==0, uatom==0 */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER),
+/*
+ * Scheduler Core events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB),
+ /* gpu_addr==last value written/would be written to JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),
+ /* gpu_addr==value to write into JS_HEAD */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED),
+ /* info_val == lower 32 bits of rechecked affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),
+ /* info_val == lower 32 bits of rechecked affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),
+ /* info_val == lower 32 bits of affinity */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE),
+ /* info_val == the ctx attribute now on ctx */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),
+ /* info_val == the ctx attribute now on runpool */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL),
+ /* info_val == the ctx attribute now off ctx */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),
+ /* info_val == the ctx attribute now off runpool */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),
+/*
+ * Scheduler Policy events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX),
+ /* info_val == whether it was evicted */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ),
+ /* gpu_addr==JS_HEAD to write if the job were run */
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START),
+ KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END),
+/*
+ * Power Management Events
+ */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER),
+ /* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF),
+ /* info_val == policy number, or -1 for "Already changing" */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY),
+ /* info_val == policy number */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),
+ /* info_val == policy number */
+ KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),
+/* Unused code just to make it easier to not have a comma at the end.
+ * All other codes MUST come before this */
+ KBASE_TRACE_CODE_MAKE_CODE(DUMMY)
+
+#if 0 /* Dummy section to avoid breaking formatting */
+};
+#endif
+
+/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
diff --git a/mali_kbase/tl/mali_kbase_tracepoints.c b/mali_kbase/tl/mali_kbase_tracepoints.c
new file mode 100644
index 0000000..bae95b4
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_tracepoints.c
@@ -0,0 +1,2991 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * DO NOT EDIT.
+ */
+
+#include "mali_kbase_tracepoints.h"
+#include "mali_kbase_tlstream.h"
+#include "mali_kbase_tl_serialize.h"
+
+/* clang-format off */
+
+/* Message ids of trace events that are recorded in the timeline stream. */
+enum tl_msg_id_obj {
+ KBASE_TL_NEW_CTX,
+ KBASE_TL_NEW_GPU,
+ KBASE_TL_NEW_LPU,
+ KBASE_TL_NEW_ATOM,
+ KBASE_TL_NEW_AS,
+ KBASE_TL_DEL_CTX,
+ KBASE_TL_DEL_ATOM,
+ KBASE_TL_LIFELINK_LPU_GPU,
+ KBASE_TL_LIFELINK_AS_GPU,
+ KBASE_TL_RET_CTX_LPU,
+ KBASE_TL_RET_ATOM_CTX,
+ KBASE_TL_RET_ATOM_LPU,
+ KBASE_TL_NRET_CTX_LPU,
+ KBASE_TL_NRET_ATOM_CTX,
+ KBASE_TL_NRET_ATOM_LPU,
+ KBASE_TL_RET_AS_CTX,
+ KBASE_TL_NRET_AS_CTX,
+ KBASE_TL_RET_ATOM_AS,
+ KBASE_TL_NRET_ATOM_AS,
+ KBASE_TL_ATTRIB_ATOM_CONFIG,
+ KBASE_TL_ATTRIB_ATOM_PRIORITY,
+ KBASE_TL_ATTRIB_ATOM_STATE,
+ KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
+ KBASE_TL_ATTRIB_ATOM_JIT,
+ KBASE_TL_JIT_USEDPAGES,
+ KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
+ KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
+ KBASE_TL_ATTRIB_AS_CONFIG,
+ KBASE_TL_EVENT_LPU_SOFTSTOP,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+ KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+ KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+ KBASE_TL_EVENT_ATOM_SOFTJOB_END,
+ KBASE_JD_GPU_SOFT_RESET,
+ KBASE_TL_KBASE_NEW_DEVICE,
+ KBASE_TL_KBASE_DEVICE_PROGRAM_CSG,
+ KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG,
+ KBASE_TL_KBASE_NEW_CTX,
+ KBASE_TL_KBASE_DEL_CTX,
+ KBASE_TL_KBASE_NEW_KCPUQUEUE,
+ KBASE_TL_KBASE_DEL_KCPUQUEUE,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
+ KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START,
+ KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END,
+ KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER,
+ KBASE_OBJ_MSG_COUNT,
+};
+
+/* Message ids of trace events that are recorded in the auxiliary stream. */
+enum tl_msg_id_aux {
+ KBASE_AUX_PM_STATE,
+ KBASE_AUX_PAGEFAULT,
+ KBASE_AUX_PAGESALLOC,
+ KBASE_AUX_DEVFREQ_TARGET,
+ KBASE_AUX_PROTECTED_ENTER_START,
+ KBASE_AUX_PROTECTED_ENTER_END,
+ KBASE_AUX_PROTECTED_LEAVE_START,
+ KBASE_AUX_PROTECTED_LEAVE_END,
+ KBASE_AUX_JIT_STATS,
+ KBASE_AUX_EVENT_JOB_SLOT,
+ KBASE_AUX_MSG_COUNT,
+};
+
+#define OBJ_TL_LIST \
+ TP_DESC(KBASE_TL_NEW_CTX, \
+ "object ctx is created", \
+ "@pII", \
+ "ctx,ctx_nr,tgid") \
+ TP_DESC(KBASE_TL_NEW_GPU, \
+ "object gpu is created", \
+ "@pII", \
+ "gpu,gpu_id,core_count") \
+ TP_DESC(KBASE_TL_NEW_LPU, \
+ "object lpu is created", \
+ "@pII", \
+ "lpu,lpu_nr,lpu_fn") \
+ TP_DESC(KBASE_TL_NEW_ATOM, \
+ "object atom is created", \
+ "@pI", \
+ "atom,atom_nr") \
+ TP_DESC(KBASE_TL_NEW_AS, \
+ "address space object is created", \
+ "@pI", \
+ "address_space,as_nr") \
+ TP_DESC(KBASE_TL_DEL_CTX, \
+ "context is destroyed", \
+ "@p", \
+ "ctx") \
+ TP_DESC(KBASE_TL_DEL_ATOM, \
+ "atom is destroyed", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_LIFELINK_LPU_GPU, \
+ "lpu is deleted with gpu", \
+ "@pp", \
+ "lpu,gpu") \
+ TP_DESC(KBASE_TL_LIFELINK_AS_GPU, \
+ "address space is deleted with gpu", \
+ "@pp", \
+ "address_space,gpu") \
+ TP_DESC(KBASE_TL_RET_CTX_LPU, \
+ "context is retained by lpu", \
+ "@pp", \
+ "ctx,lpu") \
+ TP_DESC(KBASE_TL_RET_ATOM_CTX, \
+ "atom is retained by context", \
+ "@pp", \
+ "atom,ctx") \
+ TP_DESC(KBASE_TL_RET_ATOM_LPU, \
+ "atom is retained by lpu", \
+ "@pps", \
+ "atom,lpu,attrib_match_list") \
+ TP_DESC(KBASE_TL_NRET_CTX_LPU, \
+ "context is released by lpu", \
+ "@pp", \
+ "ctx,lpu") \
+ TP_DESC(KBASE_TL_NRET_ATOM_CTX, \
+ "atom is released by context", \
+ "@pp", \
+ "atom,ctx") \
+ TP_DESC(KBASE_TL_NRET_ATOM_LPU, \
+ "atom is released by lpu", \
+ "@pp", \
+ "atom,lpu") \
+ TP_DESC(KBASE_TL_RET_AS_CTX, \
+ "address space is retained by context", \
+ "@pp", \
+ "address_space,ctx") \
+ TP_DESC(KBASE_TL_NRET_AS_CTX, \
+ "address space is released by context", \
+ "@pp", \
+ "address_space,ctx") \
+ TP_DESC(KBASE_TL_RET_ATOM_AS, \
+ "atom is retained by address space", \
+ "@pp", \
+ "atom,address_space") \
+ TP_DESC(KBASE_TL_NRET_ATOM_AS, \
+ "atom is released by address space", \
+ "@pp", \
+ "atom,address_space") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_CONFIG, \
+ "atom job slot attributes", \
+ "@pLLI", \
+ "atom,descriptor,affinity,config") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITY, \
+ "atom priority", \
+ "@pI", \
+ "atom,prio") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_STATE, \
+ "atom state", \
+ "@pI", \
+ "atom,state") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITIZED, \
+ "atom caused priority change", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_JIT, \
+ "jit done for atom", \
+ "@pLLILILLL", \
+ "atom,edit_addr,new_addr,jit_flags,mem_flags,j_id,com_pgs,extent,va_pgs") \
+ TP_DESC(KBASE_TL_JIT_USEDPAGES, \
+ "used pages for jit", \
+ "@LI", \
+ "used_pages,j_id") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_JITALLOCINFO, \
+ "Information about JIT allocations", \
+ "@pLLLIIIII", \
+ "atom,va_pgs,com_pgs,extent,j_id,bin_id,max_allocs,jit_flags,usg_id") \
+ TP_DESC(KBASE_TL_ATTRIB_ATOM_JITFREEINFO, \
+ "Information about JIT frees", \
+ "@pI", \
+ "atom,j_id") \
+ TP_DESC(KBASE_TL_ATTRIB_AS_CONFIG, \
+ "address space attributes", \
+ "@pLLL", \
+ "address_space,transtab,memattr,transcfg") \
+ TP_DESC(KBASE_TL_EVENT_LPU_SOFTSTOP, \
+ "softstop event on given lpu", \
+ "@p", \
+ "lpu") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX, \
+ "atom softstopped", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE, \
+ "atom softstop issued", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_START, \
+ "atom soft job has started", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_END, \
+ "atom soft job has completed", \
+ "@p", \
+ "atom") \
+ TP_DESC(KBASE_JD_GPU_SOFT_RESET, \
+ "gpu soft reset", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_TL_KBASE_NEW_DEVICE, \
+ "New KBase Device", \
+ "@III", \
+ "kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs") \
+ TP_DESC(KBASE_TL_KBASE_DEVICE_PROGRAM_CSG, \
+ "CSG is programmed to a slot", \
+ "@III", \
+ "kbase_device_id,gpu_cmdq_grp_handle,kbase_device_csg_slot_index") \
+ TP_DESC(KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG, \
+ "CSG is deprogrammed from a slot", \
+ "@II", \
+ "kbase_device_id,kbase_device_csg_slot_index") \
+ TP_DESC(KBASE_TL_KBASE_NEW_CTX, \
+ "New KBase Context", \
+ "@II", \
+ "kernel_ctx_id,kbase_device_id") \
+ TP_DESC(KBASE_TL_KBASE_DEL_CTX, \
+ "Delete KBase Context", \
+ "@I", \
+ "kernel_ctx_id") \
+ TP_DESC(KBASE_TL_KBASE_NEW_KCPUQUEUE, \
+ "New KCPU Queue", \
+ "@pII", \
+ "kcpu_queue,kernel_ctx_id,kcpuq_num_pending_cmds") \
+ TP_DESC(KBASE_TL_KBASE_DEL_KCPUQUEUE, \
+ "Delete KCPU Queue", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL, \
+ "KCPU Queue enqueues Signal on Fence", \
+ "@pp", \
+ "kcpu_queue,fence") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT, \
+ "KCPU Queue enqueues Wait on Fence", \
+ "@pp", \
+ "kcpu_queue,fence") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ "Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ "Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+ "@pLI", \
+ "kcpu_queue,cqs_obj_gpu_addr,cqs_obj_compare_value") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
+ "End array of KCPU Queue enqueues Wait on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ "Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ "Array item of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+ "@pL", \
+ "kcpu_queue,cqs_obj_gpu_addr") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET, \
+ "End array of KCPU Queue enqueues Set on Cross Queue Sync Object", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ "Begin array of KCPU Queue enqueues Debug Copy", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ "Array item of KCPU Queue enqueues Debug Copy", \
+ "@pL", \
+ "kcpu_queue,debugcopy_dst_size") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY, \
+ "End array of KCPU Queue enqueues Debug Copy", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
+ "KCPU Queue enqueues Map Import", \
+ "@pL", \
+ "kcpu_queue,map_import_buf_gpu_addr") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT, \
+ "KCPU Queue enqueues Unmap Import", \
+ "@pL", \
+ "kcpu_queue,map_import_buf_gpu_addr") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE, \
+ "KCPU Queue enqueues Unmap Import ignoring reference count", \
+ "@pL", \
+ "kcpu_queue,map_import_buf_gpu_addr") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ "Begin array of KCPU Queue enqueues JIT Alloc", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ "Array item of KCPU Queue enqueues JIT Alloc", \
+ "@pLLLLIIIII", \
+ "kcpu_queue,jit_alloc_gpu_alloc_addr_dest,jit_alloc_va_pages,jit_alloc_commit_pages,jit_alloc_extent,jit_alloc_jit_id,jit_alloc_bin_id,jit_alloc_max_allocations,jit_alloc_flags,jit_alloc_usage_id") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
+ "End array of KCPU Queue enqueues JIT Alloc", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ "Begin array of KCPU Queue enqueues JIT Free", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ "Array item of KCPU Queue enqueues JIT Free", \
+ "@pI", \
+ "kcpu_queue,jit_alloc_jit_id") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE, \
+ "End array of KCPU Queue enqueues JIT Free", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
+ "KCPU Queue starts a Signal on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END, \
+ "KCPU Queue ends a Signal on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START, \
+ "KCPU Queue starts a Wait on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END, \
+ "KCPU Queue ends a Wait on Fence", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
+ "KCPU Queue starts a Wait on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
+ "KCPU Queue ends a Wait on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET, \
+ "KCPU Queue executes a Set on an array of Cross Queue Sync Objects", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START, \
+ "KCPU Queue starts an array of Debug Copys", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END, \
+ "KCPU Queue ends an array of Debug Copys", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
+ "KCPU Queue starts a Map Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END, \
+ "KCPU Queue ends a Map Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START, \
+ "KCPU Queue starts an Unmap Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END, \
+ "KCPU Queue ends an Unmap Import", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START, \
+ "KCPU Queue starts an Unmap Import ignoring reference count", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END, \
+ "KCPU Queue ends an Unmap Import ignoring reference count", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START, \
+ "KCPU Queue starts an array of JIT Allocs", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ "Begin array of KCPU Queue ends an array of JIT Allocs", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ "Array item of KCPU Queue ends an array of JIT Allocs", \
+ "@pLL", \
+ "kcpu_queue,jit_alloc_gpu_alloc_addr,jit_alloc_mmu_flags") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
+ "End array of KCPU Queue ends an array of JIT Allocs", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START, \
+ "KCPU Queue starts an array of JIT Frees", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ "Begin array of KCPU Queue ends an array of JIT Frees", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ "Array item of KCPU Queue ends an array of JIT Frees", \
+ "@pL", \
+ "kcpu_queue,jit_free_pages_used") \
+ TP_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
+ "End array of KCPU Queue ends an array of JIT Frees", \
+ "@p", \
+ "kcpu_queue") \
+ TP_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER, \
+ "KCPU Queue executes an Error Barrier", \
+ "@p", \
+ "kcpu_queue") \
+
+#define MIPE_HEADER_BLOB_VAR_NAME __obj_desc_header
+#define MIPE_HEADER_TP_LIST OBJ_TL_LIST
+#define MIPE_HEADER_TP_LIST_COUNT KBASE_OBJ_MSG_COUNT
+#define MIPE_HEADER_PKT_CLASS TL_PACKET_CLASS_OBJ
+
+#include "mali_kbase_mipe_gen_header.h"
+
+const char *obj_desc_header = (const char *) &__obj_desc_header;
+const size_t obj_desc_header_size = sizeof(__obj_desc_header);
+
+#define AUX_TL_LIST \
+ TP_DESC(KBASE_AUX_PM_STATE, \
+ "PM state", \
+ "@IL", \
+ "core_type,core_state_bitset") \
+ TP_DESC(KBASE_AUX_PAGEFAULT, \
+ "Page fault", \
+ "@IIL", \
+ "ctx_nr,as_nr,page_cnt_change") \
+ TP_DESC(KBASE_AUX_PAGESALLOC, \
+ "Total alloc pages change", \
+ "@IL", \
+ "ctx_nr,page_cnt") \
+ TP_DESC(KBASE_AUX_DEVFREQ_TARGET, \
+ "New device frequency target", \
+ "@L", \
+ "target_freq") \
+ TP_DESC(KBASE_AUX_PROTECTED_ENTER_START, \
+ "enter protected mode start", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_PROTECTED_ENTER_END, \
+ "enter protected mode end", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_PROTECTED_LEAVE_START, \
+ "leave protected mode start", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_PROTECTED_LEAVE_END, \
+ "leave protected mode end", \
+ "@p", \
+ "gpu") \
+ TP_DESC(KBASE_AUX_JIT_STATS, \
+ "per-bin JIT statistics", \
+ "@IIIIII", \
+ "ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages") \
+ TP_DESC(KBASE_AUX_EVENT_JOB_SLOT, \
+ "event on a given job slot", \
+ "@pIII", \
+ "ctx,slot_nr,atom_nr,event") \
+
+#define MIPE_HEADER_BLOB_VAR_NAME __aux_desc_header
+#define MIPE_HEADER_TP_LIST AUX_TL_LIST
+#define MIPE_HEADER_TP_LIST_COUNT KBASE_AUX_MSG_COUNT
+#define MIPE_HEADER_PKT_CLASS TL_PACKET_CLASS_AUX
+
+#include "mali_kbase_mipe_gen_header.h"
+
+const char *aux_desc_header = (const char *) &__aux_desc_header;
+const size_t aux_desc_header_size = sizeof(__aux_desc_header);
+
+void __kbase_tlstream_tl_new_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 ctx_nr,
+ u32 tgid)
+{
+ const u32 msg_id = KBASE_TL_NEW_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(ctx_nr)
+ + sizeof(tgid)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &tgid, sizeof(tgid));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_gpu(
+ struct kbase_tlstream *stream,
+ const void *gpu,
+ u32 gpu_id,
+ u32 core_count)
+{
+ const u32 msg_id = KBASE_TL_NEW_GPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ + sizeof(gpu_id)
+ + sizeof(core_count)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu_id, sizeof(gpu_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &core_count, sizeof(core_count));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_lpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ u32 lpu_nr,
+ u32 lpu_fn)
+{
+ const u32 msg_id = KBASE_TL_NEW_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(lpu)
+ + sizeof(lpu_nr)
+ + sizeof(lpu_fn)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu_nr, sizeof(lpu_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu_fn, sizeof(lpu_fn));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_atom(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 atom_nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_ATOM;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(atom_nr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom_nr, sizeof(atom_nr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_new_as(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u32 as_nr)
+{
+ const u32 msg_id = KBASE_TL_NEW_AS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(as_nr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &as_nr, sizeof(as_nr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_DEL_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_del_atom(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_DEL_ATOM;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_lifelink_lpu_gpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(lpu)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_lifelink_as_gpu(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_RET_CTX_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu,
+ const char *attrib_match_list)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
+ const size_t s0 = sizeof(u32) + sizeof(char)
+ + strnlen(attrib_match_list, STRLEN_MAX);
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(lpu)
+ + s0
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+ pos = kbasep_serialize_string(buffer,
+ pos, attrib_match_list, s0);
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_RET_AS_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx)
+{
+ const u32 msg_id = KBASE_TL_NRET_AS_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(ctx)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space)
+{
+ const u32 msg_id = KBASE_TL_RET_ATOM_AS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(address_space)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space)
+{
+ const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(address_space)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_config(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 descriptor,
+ u64 affinity,
+ u32 config)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(descriptor)
+ + sizeof(affinity)
+ + sizeof(config)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &descriptor, sizeof(descriptor));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &affinity, sizeof(affinity));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &config, sizeof(config));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 prio)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(prio)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &prio, sizeof(prio));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 state)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(state)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &state, sizeof(state));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jit(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 edit_addr,
+ u64 new_addr,
+ u32 jit_flags,
+ u64 mem_flags,
+ u32 j_id,
+ u64 com_pgs,
+ u64 extent,
+ u64 va_pgs)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(edit_addr)
+ + sizeof(new_addr)
+ + sizeof(jit_flags)
+ + sizeof(mem_flags)
+ + sizeof(j_id)
+ + sizeof(com_pgs)
+ + sizeof(extent)
+ + sizeof(va_pgs)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &edit_addr, sizeof(edit_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &new_addr, sizeof(new_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_flags, sizeof(jit_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &mem_flags, sizeof(mem_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &com_pgs, sizeof(com_pgs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &extent, sizeof(extent));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &va_pgs, sizeof(va_pgs));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jit_usedpages(
+ struct kbase_tlstream *stream,
+ u64 used_pages,
+ u32 j_id)
+{
+ const u32 msg_id = KBASE_TL_JIT_USEDPAGES;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(used_pages)
+ + sizeof(j_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &used_pages, sizeof(used_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 va_pgs,
+ u64 com_pgs,
+ u64 extent,
+ u32 j_id,
+ u32 bin_id,
+ u32 max_allocs,
+ u32 jit_flags,
+ u32 usg_id)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(va_pgs)
+ + sizeof(com_pgs)
+ + sizeof(extent)
+ + sizeof(j_id)
+ + sizeof(bin_id)
+ + sizeof(max_allocs)
+ + sizeof(jit_flags)
+ + sizeof(usg_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &va_pgs, sizeof(va_pgs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &com_pgs, sizeof(com_pgs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &extent, sizeof(extent));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &bin_id, sizeof(bin_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &max_allocs, sizeof(max_allocs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_flags, sizeof(jit_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &usg_id, sizeof(usg_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 j_id)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ + sizeof(j_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &j_id, sizeof(j_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_as_config(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u64 transtab,
+ u64 memattr,
+ u64 transcfg)
+{
+ const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(address_space)
+ + sizeof(transtab)
+ + sizeof(memattr)
+ + sizeof(transcfg)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &address_space, sizeof(address_space));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &transtab, sizeof(transtab));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &memattr, sizeof(memattr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &transcfg, sizeof(transcfg));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_lpu_softstop(
+ struct kbase_tlstream *stream,
+ const void *lpu)
+{
+ const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(lpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &lpu, sizeof(lpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_ex(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_issue(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_start(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_end(
+ struct kbase_tlstream *stream,
+ const void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(atom)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom, sizeof(atom));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_jd_gpu_soft_reset(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pm_state(
+ struct kbase_tlstream *stream,
+ u32 core_type,
+ u64 core_state_bitset)
+{
+ const u32 msg_id = KBASE_AUX_PM_STATE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(core_type)
+ + sizeof(core_state_bitset)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &core_type, sizeof(core_type));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &core_state_bitset, sizeof(core_state_bitset));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagefault(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 as_nr,
+ u64 page_cnt_change)
+{
+ const u32 msg_id = KBASE_AUX_PAGEFAULT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx_nr)
+ + sizeof(as_nr)
+ + sizeof(page_cnt_change)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &as_nr, sizeof(as_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &page_cnt_change, sizeof(page_cnt_change));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u64 page_cnt)
+{
+ const u32 msg_id = KBASE_AUX_PAGESALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx_nr)
+ + sizeof(page_cnt)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &page_cnt, sizeof(page_cnt));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(
+ struct kbase_tlstream *stream,
+ u64 target_freq)
+{
+ const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(target_freq)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &target_freq, sizeof(target_freq));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_start(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_end(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_start(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_end(
+ struct kbase_tlstream *stream,
+ const void *gpu)
+{
+ const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(gpu)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu, sizeof(gpu));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_jit_stats(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 bid,
+ u32 max_allocs,
+ u32 allocs,
+ u32 va_pages,
+ u32 ph_pages)
+{
+ const u32 msg_id = KBASE_AUX_JIT_STATS;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx_nr)
+ + sizeof(bid)
+ + sizeof(max_allocs)
+ + sizeof(allocs)
+ + sizeof(va_pages)
+ + sizeof(ph_pages)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx_nr, sizeof(ctx_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &bid, sizeof(bid));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &max_allocs, sizeof(max_allocs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &allocs, sizeof(allocs));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &va_pages, sizeof(va_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ph_pages, sizeof(ph_pages));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_event_job_slot(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 slot_nr,
+ u32 atom_nr,
+ u32 event)
+{
+ const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(ctx)
+ + sizeof(slot_nr)
+ + sizeof(atom_nr)
+ + sizeof(event)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &ctx, sizeof(ctx));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &slot_nr, sizeof(slot_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &atom_nr, sizeof(atom_nr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &event, sizeof(event));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_new_device(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_gpu_core_count,
+ u32 kbase_device_max_num_csgs)
+{
+ const u32 msg_id = KBASE_TL_KBASE_NEW_DEVICE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kbase_device_id)
+ + sizeof(kbase_device_gpu_core_count)
+ + sizeof(kbase_device_max_num_csgs)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_gpu_core_count, sizeof(kbase_device_gpu_core_count));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_max_num_csgs, sizeof(kbase_device_max_num_csgs));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_program_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 gpu_cmdq_grp_handle,
+ u32 kbase_device_csg_slot_index)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEVICE_PROGRAM_CSG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kbase_device_id)
+ + sizeof(gpu_cmdq_grp_handle)
+ + sizeof(kbase_device_csg_slot_index)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_deprogram_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_csg_slot_index)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kbase_device_id)
+ + sizeof(kbase_device_csg_slot_index)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_new_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id,
+ u32 kbase_device_id)
+{
+ const u32 msg_id = KBASE_TL_KBASE_NEW_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kernel_ctx_id)
+ + sizeof(kbase_device_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kbase_device_id, sizeof(kbase_device_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_del_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEL_CTX;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kernel_ctx_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_new_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u32 kernel_ctx_id,
+ u32 kcpuq_num_pending_cmds)
+{
+ const u32 msg_id = KBASE_TL_KBASE_NEW_KCPUQUEUE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(kernel_ctx_id)
+ + sizeof(kcpuq_num_pending_cmds)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpuq_num_pending_cmds, sizeof(kcpuq_num_pending_cmds));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_del_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_DEL_KCPUQUEUE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *fence)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(fence)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &fence, sizeof(fence));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *fence)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(fence)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &fence, sizeof(fence));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr,
+ u32 cqs_obj_compare_value)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(cqs_obj_gpu_addr)
+ + sizeof(cqs_obj_compare_value)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &cqs_obj_compare_value, sizeof(cqs_obj_compare_value));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(cqs_obj_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 debugcopy_dst_size)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(debugcopy_dst_size)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &debugcopy_dst_size, sizeof(debugcopy_dst_size));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(map_import_buf_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(map_import_buf_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(map_import_buf_gpu_addr)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr_dest,
+ u64 jit_alloc_va_pages,
+ u64 jit_alloc_commit_pages,
+ u64 jit_alloc_extent,
+ u32 jit_alloc_jit_id,
+ u32 jit_alloc_bin_id,
+ u32 jit_alloc_max_allocations,
+ u32 jit_alloc_flags,
+ u32 jit_alloc_usage_id)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_alloc_gpu_alloc_addr_dest)
+ + sizeof(jit_alloc_va_pages)
+ + sizeof(jit_alloc_commit_pages)
+ + sizeof(jit_alloc_extent)
+ + sizeof(jit_alloc_jit_id)
+ + sizeof(jit_alloc_bin_id)
+ + sizeof(jit_alloc_max_allocations)
+ + sizeof(jit_alloc_flags)
+ + sizeof(jit_alloc_usage_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_gpu_alloc_addr_dest, sizeof(jit_alloc_gpu_alloc_addr_dest));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_va_pages, sizeof(jit_alloc_va_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_commit_pages, sizeof(jit_alloc_commit_pages));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_extent, sizeof(jit_alloc_extent));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_bin_id, sizeof(jit_alloc_bin_id));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_max_allocations, sizeof(jit_alloc_max_allocations));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_flags, sizeof(jit_alloc_flags));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_usage_id, sizeof(jit_alloc_usage_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u32 jit_alloc_jit_id)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_alloc_jit_id)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr,
+ u64 jit_alloc_mmu_flags)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_alloc_gpu_alloc_addr)
+ + sizeof(jit_alloc_mmu_flags)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_gpu_alloc_addr, sizeof(jit_alloc_gpu_alloc_addr));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_alloc_mmu_flags, sizeof(jit_alloc_mmu_flags));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_free_pages_used)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ + sizeof(jit_free_pages_used)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &jit_free_pages_used, sizeof(jit_free_pages_used));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_errorbarrier(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue)
+{
+ const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER;
+ const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+ + sizeof(kcpu_queue)
+ ;
+ char *buffer;
+ unsigned long acq_flags;
+ size_t pos = 0;
+
+ buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+ pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_serialize_timestamp(buffer, pos);
+ pos = kbasep_serialize_bytes(buffer,
+ pos, &kcpu_queue, sizeof(kcpu_queue));
+
+ kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+/* clang-format on */
diff --git a/mali_kbase/tl/mali_kbase_tracepoints.h b/mali_kbase/tl/mali_kbase_tracepoints.h
new file mode 100644
index 0000000..b2c20ae
--- /dev/null
+++ b/mali_kbase/tl/mali_kbase_tracepoints.h
@@ -0,0 +1,2542 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * DO NOT EDIT.
+ */
+
+#if !defined(_KBASE_TRACEPOINTS_H)
+#define _KBASE_TRACEPOINTS_H
+
+/* Tracepoints are abstract callbacks notifying that some important
+ * software or hardware event has happened.
+ *
+ * In this particular implementation, it results into a MIPE
+ * timeline event and, in some cases, it also fires an ftrace event
+ * (a.k.a. Gator events, see details below).
+ */
+
+#include "mali_kbase.h"
+#include "mali_kbase_gator.h"
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+
+/* clang-format off */
+
+struct kbase_tlstream;
+
+extern const size_t __obj_stream_offset;
+extern const size_t __aux_stream_offset;
+
+/* This macro dispatches a kbase_tlstream from
+ * a kbase_device instance. Only AUX or OBJ
+ * streams can be dispatched. It is aware of
+ * kbase_timeline binary representation and
+ * relies on offset variables:
+ * __obj_stream_offset and __aux_stream_offset.
+ */
+#define __TL_DISPATCH_STREAM(kbdev, stype) \
+ ((struct kbase_tlstream *) \
+ ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset))
+
+struct tp_desc;
+
+/* Descriptors of timeline messages transmitted in object events stream. */
+extern const char *obj_desc_header;
+extern const size_t obj_desc_header_size;
+/* Descriptors of timeline messages transmitted in auxiliary events stream. */
+extern const char *aux_desc_header;
+extern const size_t aux_desc_header_size;
+
+#define TL_ATOM_STATE_IDLE 0
+#define TL_ATOM_STATE_READY 1
+#define TL_ATOM_STATE_DONE 2
+#define TL_ATOM_STATE_POSTED 3
+
+#define TL_JS_EVENT_START GATOR_JOB_SLOT_START
+#define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP
+#define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED
+
+#define TLSTREAM_ENABLED (1 << 31)
+
+void __kbase_tlstream_tl_new_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 ctx_nr,
+ u32 tgid);
+void __kbase_tlstream_tl_new_gpu(
+ struct kbase_tlstream *stream,
+ const void *gpu,
+ u32 gpu_id,
+ u32 core_count);
+void __kbase_tlstream_tl_new_lpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ u32 lpu_nr,
+ u32 lpu_fn);
+void __kbase_tlstream_tl_new_atom(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 atom_nr);
+void __kbase_tlstream_tl_new_as(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u32 as_nr);
+void __kbase_tlstream_tl_del_ctx(
+ struct kbase_tlstream *stream,
+ const void *ctx);
+void __kbase_tlstream_tl_del_atom(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_lifelink_lpu_gpu(
+ struct kbase_tlstream *stream,
+ const void *lpu,
+ const void *gpu);
+void __kbase_tlstream_tl_lifelink_as_gpu(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *gpu);
+void __kbase_tlstream_tl_ret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu);
+void __kbase_tlstream_tl_ret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx);
+void __kbase_tlstream_tl_ret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu,
+ const char *attrib_match_list);
+void __kbase_tlstream_tl_nret_ctx_lpu(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ const void *lpu);
+void __kbase_tlstream_tl_nret_atom_ctx(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *ctx);
+void __kbase_tlstream_tl_nret_atom_lpu(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *lpu);
+void __kbase_tlstream_tl_ret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx);
+void __kbase_tlstream_tl_nret_as_ctx(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ const void *ctx);
+void __kbase_tlstream_tl_ret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space);
+void __kbase_tlstream_tl_nret_atom_as(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ const void *address_space);
+void __kbase_tlstream_tl_attrib_atom_config(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 descriptor,
+ u64 affinity,
+ u32 config);
+void __kbase_tlstream_tl_attrib_atom_priority(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 prio);
+void __kbase_tlstream_tl_attrib_atom_state(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 state);
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_attrib_atom_jit(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 edit_addr,
+ u64 new_addr,
+ u32 jit_flags,
+ u64 mem_flags,
+ u32 j_id,
+ u64 com_pgs,
+ u64 extent,
+ u64 va_pgs);
+void __kbase_tlstream_tl_jit_usedpages(
+ struct kbase_tlstream *stream,
+ u64 used_pages,
+ u32 j_id);
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u64 va_pgs,
+ u64 com_pgs,
+ u64 extent,
+ u32 j_id,
+ u32 bin_id,
+ u32 max_allocs,
+ u32 jit_flags,
+ u32 usg_id);
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
+ struct kbase_tlstream *stream,
+ const void *atom,
+ u32 j_id);
+void __kbase_tlstream_tl_attrib_as_config(
+ struct kbase_tlstream *stream,
+ const void *address_space,
+ u64 transtab,
+ u64 memattr,
+ u64 transcfg);
+void __kbase_tlstream_tl_event_lpu_softstop(
+ struct kbase_tlstream *stream,
+ const void *lpu);
+void __kbase_tlstream_tl_event_atom_softstop_ex(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_event_atom_softstop_issue(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_start(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_end(
+ struct kbase_tlstream *stream,
+ const void *atom);
+void __kbase_tlstream_jd_gpu_soft_reset(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_pm_state(
+ struct kbase_tlstream *stream,
+ u32 core_type,
+ u64 core_state_bitset);
+void __kbase_tlstream_aux_pagefault(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 as_nr,
+ u64 page_cnt_change);
+void __kbase_tlstream_aux_pagesalloc(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u64 page_cnt);
+void __kbase_tlstream_aux_devfreq_target(
+ struct kbase_tlstream *stream,
+ u64 target_freq);
+void __kbase_tlstream_aux_protected_enter_start(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_protected_enter_end(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_protected_leave_start(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_protected_leave_end(
+ struct kbase_tlstream *stream,
+ const void *gpu);
+void __kbase_tlstream_aux_jit_stats(
+ struct kbase_tlstream *stream,
+ u32 ctx_nr,
+ u32 bid,
+ u32 max_allocs,
+ u32 allocs,
+ u32 va_pages,
+ u32 ph_pages);
+void __kbase_tlstream_aux_event_job_slot(
+ struct kbase_tlstream *stream,
+ const void *ctx,
+ u32 slot_nr,
+ u32 atom_nr,
+ u32 event);
+void __kbase_tlstream_tl_kbase_new_device(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_gpu_core_count,
+ u32 kbase_device_max_num_csgs);
+void __kbase_tlstream_tl_kbase_device_program_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 gpu_cmdq_grp_handle,
+ u32 kbase_device_csg_slot_index);
+void __kbase_tlstream_tl_kbase_device_deprogram_csg(
+ struct kbase_tlstream *stream,
+ u32 kbase_device_id,
+ u32 kbase_device_csg_slot_index);
+void __kbase_tlstream_tl_kbase_new_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id,
+ u32 kbase_device_id);
+void __kbase_tlstream_tl_kbase_del_ctx(
+ struct kbase_tlstream *stream,
+ u32 kernel_ctx_id);
+void __kbase_tlstream_tl_kbase_new_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u32 kernel_ctx_id,
+ u32 kcpuq_num_pending_cmds);
+void __kbase_tlstream_tl_kbase_del_kcpuqueue(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *fence);
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ const void *fence);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr,
+ u32 cqs_obj_compare_value);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_wait(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 cqs_obj_gpu_addr);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 debugcopy_dst_size);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_debugcopy(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 map_import_buf_gpu_addr);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr_dest,
+ u64 jit_alloc_va_pages,
+ u64 jit_alloc_commit_pages,
+ u64 jit_alloc_extent,
+ u32 jit_alloc_jit_id,
+ u32 jit_alloc_bin_id,
+ u32 jit_alloc_max_allocations,
+ u32 jit_alloc_flags,
+ u32 jit_alloc_usage_id);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u32 jit_alloc_jit_id);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_alloc_gpu_alloc_addr,
+ u64 jit_alloc_mmu_flags);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue,
+ u64 jit_free_pages_used);
+void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_errorbarrier(
+ struct kbase_tlstream *stream,
+ const void *kcpu_queue);
+
+struct kbase_tlstream;
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_CTX -
+ * object ctx is created
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @ctx_nr: Kernel context number
+ * @tgid: Thread Group Id
+ */
+#define KBASE_TLSTREAM_TL_NEW_CTX( \
+ kbdev, \
+ ctx, \
+ ctx_nr, \
+ tgid \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx, ctx_nr, tgid); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_GPU -
+ * object gpu is created
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ * @gpu_id: Name of the GPU object
+ * @core_count: Number of cores this GPU hosts
+ */
+#define KBASE_TLSTREAM_TL_NEW_GPU( \
+ kbdev, \
+ gpu, \
+ gpu_id, \
+ core_count \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_gpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ gpu, gpu_id, core_count); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_LPU -
+ * object lpu is created
+ *
+ * @kbdev: Kbase device
+ * @lpu: Name of the Logical Processing Unit object
+ * @lpu_nr: Sequential number assigned to the newly created LPU
+ * @lpu_fn: Property describing functional abilities of this LPU
+ */
+#define KBASE_TLSTREAM_TL_NEW_LPU( \
+ kbdev, \
+ lpu, \
+ lpu_nr, \
+ lpu_fn \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ lpu, lpu_nr, lpu_fn); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_ATOM -
+ * object atom is created
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @atom_nr: Sequential number of an atom
+ */
+#define KBASE_TLSTREAM_TL_NEW_ATOM( \
+ kbdev, \
+ atom, \
+ atom_nr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_atom( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, atom_nr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_AS -
+ * address space object is created
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @as_nr: Address space number
+ */
+#define KBASE_TLSTREAM_TL_NEW_AS( \
+ kbdev, \
+ address_space, \
+ as_nr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_new_as( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, as_nr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_CTX -
+ * context is destroyed
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_DEL_CTX( \
+ kbdev, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_del_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_ATOM -
+ * atom is destroyed
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_DEL_ATOM( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_del_atom( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU -
+ * lpu is deleted with gpu
+ *
+ * @kbdev: Kbase device
+ * @lpu: Name of the Logical Processing Unit object
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU( \
+ kbdev, \
+ lpu, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_lifelink_lpu_gpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ lpu, gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_LIFELINK_AS_GPU -
+ * address space is deleted with gpu
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU( \
+ kbdev, \
+ address_space, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_lifelink_as_gpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_CTX_LPU -
+ * context is retained by lpu
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_RET_CTX_LPU( \
+ kbdev, \
+ ctx, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_ctx_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx, lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_CTX -
+ * atom is retained by context
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_CTX( \
+ kbdev, \
+ atom, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_atom_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_LPU -
+ * atom is retained by lpu
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @lpu: Name of the Logical Processing Unit object
+ * @attrib_match_list: List containing match operator attributes
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_LPU( \
+ kbdev, \
+ atom, \
+ lpu, \
+ attrib_match_list \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_atom_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, lpu, attrib_match_list); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_CTX_LPU -
+ * context is released by lpu
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_NRET_CTX_LPU( \
+ kbdev, \
+ ctx, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_ctx_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ ctx, lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_CTX -
+ * atom is released by context
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX( \
+ kbdev, \
+ atom, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_atom_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_LPU -
+ * atom is released by lpu
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU( \
+ kbdev, \
+ atom, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_atom_lpu( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_AS_CTX -
+ * address space is retained by context
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_RET_AS_CTX( \
+ kbdev, \
+ address_space, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_as_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_AS_CTX -
+ * address space is released by context
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @ctx: Name of the context object
+ */
+#define KBASE_TLSTREAM_TL_NRET_AS_CTX( \
+ kbdev, \
+ address_space, \
+ ctx \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_as_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, ctx); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_AS -
+ * atom is retained by address space
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @address_space: Name of the address space object
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_AS( \
+ kbdev, \
+ atom, \
+ address_space \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_ret_atom_as( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, address_space); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_AS -
+ * atom is released by address space
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @address_space: Name of the address space object
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_AS( \
+ kbdev, \
+ atom, \
+ address_space \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_nret_atom_as( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, address_space); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG -
+ * atom job slot attributes
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @descriptor: Job descriptor address
+ * @affinity: Job affinity
+ * @config: Job config
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG( \
+ kbdev, \
+ atom, \
+ descriptor, \
+ affinity, \
+ config \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_config( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, descriptor, affinity, config); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY -
+ * atom priority
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @prio: Atom priority
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY( \
+ kbdev, \
+ atom, \
+ prio \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_tl_attrib_atom_priority( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, prio); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE -
+ * atom state
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @state: Atom state
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE( \
+ kbdev, \
+ atom, \
+ state \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_tl_attrib_atom_state( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, state); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED -
+ * atom caused priority change
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_tl_attrib_atom_prioritized( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT -
+ * jit done for atom
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @edit_addr: Address edited by jit
+ * @new_addr: Address placed into the edited location
+ * @jit_flags: Flags specifying the special requirements for
+ * the JIT allocation.
+ * @mem_flags: Flags defining the properties of a memory region
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * @com_pgs: The minimum number of physical pages which
+ * should back the allocation.
+ * @extent: Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @va_pgs: The minimum number of virtual pages required
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( \
+ kbdev, \
+ atom, \
+ edit_addr, \
+ new_addr, \
+ jit_flags, \
+ mem_flags, \
+ j_id, \
+ com_pgs, \
+ extent, \
+ va_pgs \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_jit( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, edit_addr, new_addr, jit_flags, mem_flags, j_id, com_pgs, extent, va_pgs); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_JIT_USEDPAGES -
+ * used pages for jit
+ *
+ * @kbdev: Kbase device
+ * @used_pages: Number of pages used for jit
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_JIT_USEDPAGES( \
+ kbdev, \
+ used_pages, \
+ j_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_jit_usedpages( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ used_pages, j_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO -
+ * Information about JIT allocations
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @va_pgs: The minimum number of virtual pages required
+ * @com_pgs: The minimum number of physical pages which
+ * should back the allocation.
+ * @extent: Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * @bin_id: The JIT allocation bin, used in conjunction with
+ * max_allocations to limit the number of each
+ * type of JIT allocation.
+ * @max_allocs: Maximum allocations allowed in this bin.
+ * @jit_flags: Flags specifying the special requirements for
+ * the JIT allocation.
+ * @usg_id: A hint about which allocation should be reused.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO( \
+ kbdev, \
+ atom, \
+ va_pgs, \
+ com_pgs, \
+ extent, \
+ j_id, \
+ bin_id, \
+ max_allocs, \
+ jit_flags, \
+ usg_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_jitallocinfo( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, va_pgs, com_pgs, extent, j_id, bin_id, max_allocs, jit_flags, usg_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO -
+ * Information about JIT frees
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ * @j_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO( \
+ kbdev, \
+ atom, \
+ j_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_atom_jitfreeinfo( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom, j_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG -
+ * address space attributes
+ *
+ * @kbdev: Kbase device
+ * @address_space: Name of the address space object
+ * @transtab: Configuration of the TRANSTAB register
+ * @memattr: Configuration of the MEMATTR register
+ * @transcfg: Configuration of the TRANSCFG register (or zero if not present)
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG( \
+ kbdev, \
+ address_space, \
+ transtab, \
+ memattr, \
+ transcfg \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_attrib_as_config( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ address_space, transtab, memattr, transcfg); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP -
+ * softstop event on given lpu
+ *
+ * @kbdev: Kbase device
+ * @lpu: Name of the Logical Processing Unit object
+ */
+#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( \
+ kbdev, \
+ lpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_lpu_softstop( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ lpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX -
+ * atom softstopped
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softstop_ex( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE -
+ * atom softstop issued
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softstop_issue( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START -
+ * atom soft job has started
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softjob_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END -
+ * atom soft job has completed
+ *
+ * @kbdev: Kbase device
+ * @atom: Atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END( \
+ kbdev, \
+ atom \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_event_atom_softjob_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ atom); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_JD_GPU_SOFT_RESET -
+ * gpu soft reset
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_jd_gpu_soft_reset( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PM_STATE -
+ * PM state
+ *
+ * @kbdev: Kbase device
+ * @core_type: Core type (shader, tiler, l2 cache, l3 cache)
+ * @core_state_bitset: 64bits bitmask reporting power state of the cores
+ * (1-ON, 0-OFF)
+ */
+#define KBASE_TLSTREAM_AUX_PM_STATE( \
+ kbdev, \
+ core_type, \
+ core_state_bitset \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pm_state( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ core_type, core_state_bitset); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGEFAULT -
+ * Page fault
+ *
+ * @kbdev: Kbase device
+ * @ctx_nr: Kernel context number
+ * @as_nr: Address space number
+ * @page_cnt_change: Number of pages to be added
+ */
+#define KBASE_TLSTREAM_AUX_PAGEFAULT( \
+ kbdev, \
+ ctx_nr, \
+ as_nr, \
+ page_cnt_change \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagefault( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, as_nr, page_cnt_change); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGESALLOC -
+ * Total alloc pages change
+ *
+ * @kbdev: Kbase device
+ * @ctx_nr: Kernel context number
+ * @page_cnt: Number of pages used by the context
+ */
+#define KBASE_TLSTREAM_AUX_PAGESALLOC( \
+ kbdev, \
+ ctx_nr, \
+ page_cnt \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagesalloc( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, page_cnt); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET -
+ * New device frequency target
+ *
+ * @kbdev: Kbase device
+ * @target_freq: New target frequency
+ */
+#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET( \
+ kbdev, \
+ target_freq \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_devfreq_target( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ target_freq); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START -
+ * enter protected mode start
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_enter_start( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END -
+ * enter protected mode end
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_enter_end( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START -
+ * leave protected mode start
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_leave_start( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END -
+ * leave protected mode end
+ *
+ * @kbdev: Kbase device
+ * @gpu: Name of the GPU object
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END( \
+ kbdev, \
+ gpu \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+ __kbase_tlstream_aux_protected_leave_end( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ gpu); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_JIT_STATS -
+ * per-bin JIT statistics
+ *
+ * @kbdev: Kbase device
+ * @ctx_nr: Kernel context number
+ * @bid: JIT bin id
+ * @max_allocs: Maximum allocations allowed in this bin.
+ * @allocs: Number of active allocations in this bin
+ * @va_pages: Number of virtual pages allocated in this bin
+ * @ph_pages: Number of physical pages allocated in this bin
+ */
+#define KBASE_TLSTREAM_AUX_JIT_STATS( \
+ kbdev, \
+ ctx_nr, \
+ bid, \
+ max_allocs, \
+ allocs, \
+ va_pages, \
+ ph_pages \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_jit_stats( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT -
+ * event on a given job slot
+ *
+ * @kbdev: Kbase device
+ * @ctx: Name of the context object
+ * @slot_nr: Job slot number
+ * @atom_nr: Sequential number of an atom
+ * @event: Event type. One of TL_JS_EVENT values
+ */
+#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT( \
+ kbdev, \
+ ctx, \
+ slot_nr, \
+ atom_nr, \
+ event \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_event_job_slot( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx, slot_nr, atom_nr, event); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE -
+ * New KBase Device
+ *
+ * @kbdev: Kbase device
+ * @kbase_device_id: The id of the physical hardware
+ * @kbase_device_gpu_core_count: The number of gpu cores in the physical hardware
+ * @kbase_device_max_num_csgs: The max number of CSGs the physical hardware supports
+ */
+#define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \
+ kbdev, \
+ kbase_device_id, \
+ kbase_device_gpu_core_count, \
+ kbase_device_max_num_csgs \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_new_device( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kbase_device_id, kbase_device_gpu_core_count, kbase_device_max_num_csgs); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG -
+ * CSG is programmed to a slot
+ *
+ * @kbdev: Kbase device
+ * @kbase_device_id: The id of the physical hardware
+ * @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace
+ * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \
+ kbdev, \
+ kbase_device_id, \
+ gpu_cmdq_grp_handle, \
+ kbase_device_csg_slot_index \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_device_program_csg( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kbase_device_id, gpu_cmdq_grp_handle, kbase_device_csg_slot_index); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG -
+ * CSG is deprogrammed from a slot
+ *
+ * @kbdev: Kbase device
+ * @kbase_device_id: The id of the physical hardware
+ * @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \
+ kbdev, \
+ kbase_device_id, \
+ kbase_device_csg_slot_index \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_device_deprogram_csg( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kbase_device_id, kbase_device_csg_slot_index); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_NEW_CTX -
+ * New KBase Context
+ *
+ * @kbdev: Kbase device
+ * @kernel_ctx_id: Unique ID for the KBase Context
+ * @kbase_device_id: The id of the physical hardware
+ */
+#define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \
+ kbdev, \
+ kernel_ctx_id, \
+ kbase_device_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_new_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kernel_ctx_id, kbase_device_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEL_CTX -
+ * Delete KBase Context
+ *
+ * @kbdev: Kbase device
+ * @kernel_ctx_id: Unique ID for the KBase Context
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \
+ kbdev, \
+ kernel_ctx_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_del_ctx( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kernel_ctx_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE -
+ * New KCPU Queue
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @kernel_ctx_id: Unique ID for the KBase Context
+ * @kcpuq_num_pending_cmds: Number of commands already enqueued
+ * in the KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \
+ kbdev, \
+ kcpu_queue, \
+ kernel_ctx_id, \
+ kcpuq_num_pending_cmds \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_new_kcpuqueue( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, kernel_ctx_id, kcpuq_num_pending_cmds); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE -
+ * Delete KCPU Queue
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_del_kcpuqueue( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL -
+ * KCPU Queue enqueues Signal on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @fence: Fence object handle
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \
+ kbdev, \
+ kcpu_queue, \
+ fence \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, fence); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT -
+ * KCPU Queue enqueues Wait on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @fence: Fence object handle
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \
+ kbdev, \
+ kcpu_queue, \
+ fence \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, fence); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @cqs_obj_gpu_addr: CQS Object GPU ptr
+ * @cqs_obj_compare_value: Semaphore value that should be exceeded
+ * for the WAIT to pass
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+ kbdev, \
+ kcpu_queue, \
+ cqs_obj_gpu_addr, \
+ cqs_obj_compare_value \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, cqs_obj_gpu_addr, cqs_obj_compare_value); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT -
+ * End array of KCPU Queue enqueues Wait on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_wait( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * Array item of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @cqs_obj_gpu_addr: CQS Object GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET( \
+ kbdev, \
+ kcpu_queue, \
+ cqs_obj_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, cqs_obj_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET -
+ * End array of KCPU Queue enqueues Set on Cross Queue Sync Object
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * Begin array of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_debugcopy( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * Array item of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @debugcopy_dst_size: Debug Copy destination size
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+ kbdev, \
+ kcpu_queue, \
+ debugcopy_dst_size \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_debugcopy( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, debugcopy_dst_size); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY -
+ * End array of KCPU Queue enqueues Debug Copy
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_debugcopy( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT -
+ * KCPU Queue enqueues Map Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @map_import_buf_gpu_addr: Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \
+ kbdev, \
+ kcpu_queue, \
+ map_import_buf_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, map_import_buf_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT -
+ * KCPU Queue enqueues Unmap Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @map_import_buf_gpu_addr: Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \
+ kbdev, \
+ kcpu_queue, \
+ map_import_buf_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, map_import_buf_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE -
+ * KCPU Queue enqueues Unmap Import ignoring reference count
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @map_import_buf_gpu_addr: Map import buffer GPU ptr
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \
+ kbdev, \
+ kcpu_queue, \
+ map_import_buf_gpu_addr \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, map_import_buf_gpu_addr); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * Begin array of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * Array item of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_alloc_gpu_alloc_addr_dest: The GPU virtual address to write
+ * the JIT allocated GPU virtual address to
+ * @jit_alloc_va_pages: The minimum number of virtual pages required
+ * @jit_alloc_commit_pages: The minimum number of physical pages which
+ * should back the allocation
+ * @jit_alloc_extent: Granularity of physical pages to grow the allocation
+ * by during a fault
+ * @jit_alloc_jit_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests. Zero is not a valid value
+ * @jit_alloc_bin_id: The JIT allocation bin, used in conjunction with
+ * max_allocations to limit the number of each type of JIT allocation
+ * @jit_alloc_max_allocations: The maximum number of allocations
+ * allowed within the bin specified by bin_id. Should be the same for all
+ * JIT allocations within the same bin.
+ * @jit_alloc_flags: Flags specifying the special requirements for the
+ * JIT allocation
+ * @jit_alloc_usage_id: A hint about which allocation should be
+ * reused. The kernel should attempt to use a previous allocation with the same
+ * usage_id
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+ kbdev, \
+ kcpu_queue, \
+ jit_alloc_gpu_alloc_addr_dest, \
+ jit_alloc_va_pages, \
+ jit_alloc_commit_pages, \
+ jit_alloc_extent, \
+ jit_alloc_jit_id, \
+ jit_alloc_bin_id, \
+ jit_alloc_max_allocations, \
+ jit_alloc_flags, \
+ jit_alloc_usage_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_alloc_gpu_alloc_addr_dest, jit_alloc_va_pages, jit_alloc_commit_pages, jit_alloc_extent, jit_alloc_jit_id, jit_alloc_bin_id, jit_alloc_max_allocations, jit_alloc_flags, jit_alloc_usage_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC -
+ * End array of KCPU Queue enqueues JIT Alloc
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * Begin array of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * Array item of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_alloc_jit_id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests. Zero is not a valid value
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+ kbdev, \
+ kcpu_queue, \
+ jit_alloc_jit_id \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_alloc_jit_id); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE -
+ * End array of KCPU Queue enqueues JIT Free
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START -
+ * KCPU Queue starts a Signal on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END -
+ * KCPU Queue ends a Signal on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START -
+ * KCPU Queue starts a Wait on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END -
+ * KCPU Queue ends a Wait on Fence
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START -
+ * KCPU Queue starts a Wait on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END -
+ * KCPU Queue ends a Wait on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET -
+ * KCPU Queue executes a Set on an array of Cross Queue Sync Objects
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START -
+ * KCPU Queue starts an array of Debug Copys
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END -
+ * KCPU Queue ends an array of Debug Copys
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_DEBUGCOPY_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_debugcopy_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START -
+ * KCPU Queue starts a Map Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END -
+ * KCPU Queue ends a Map Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START -
+ * KCPU Queue starts an Unmap Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END -
+ * KCPU Queue ends an Unmap Import
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START -
+ * KCPU Queue starts an Unmap Import ignoring reference count
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END -
+ * KCPU Queue ends an Unmap Import ignoring reference count
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START -
+ * KCPU Queue starts an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * Begin array of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * Array item of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address
+ * @jit_alloc_mmu_flags: The MMU flags for the JIT allocation
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+ kbdev, \
+ kcpu_queue, \
+ jit_alloc_gpu_alloc_addr, \
+ jit_alloc_mmu_flags \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_alloc_gpu_alloc_addr, jit_alloc_mmu_flags); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END -
+ * End array of KCPU Queue ends an array of JIT Allocs
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START -
+ * KCPU Queue starts an array of JIT Frees
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * Begin array of KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * Array item of KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ * @jit_free_pages_used: The actual number of pages used by the JIT
+ * allocation
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+ kbdev, \
+ kcpu_queue, \
+ jit_free_pages_used \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue, jit_free_pages_used); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END -
+ * End array of KCPU Queue ends an array of JIT Frees
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+/**
+ * KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER -
+ * KCPU Queue executes an Error Barrier
+ *
+ * @kbdev: Kbase device
+ * @kcpu_queue: KCPU queue
+ */
+#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERRORBARRIER( \
+ kbdev, \
+ kcpu_queue \
+ ) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_tl_kbase_kcpuqueue_execute_errorbarrier( \
+ __TL_DISPATCH_STREAM(kbdev, obj), \
+ kcpu_queue); \
+ } while (0)
+
+
+/* Gator tracepoints are hooked into TLSTREAM interface.
+ * When the following tracepoints are called, corresponding
+ * Gator tracepoint will be called as well.
+ */
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+/* `event` is one of TL_JS_EVENT values here.
+ * The values of TL_JS_EVENT are guaranteed to match
+ * with corresponding GATOR_JOB_SLOT values.
+ */
+#undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT
+#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \
+ context, slot_nr, atom_nr, event) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ kbase_trace_mali_job_slots_event(kbdev->id, \
+ GATOR_MAKE_EVENT(event, slot_nr), \
+ context, (u8) atom_nr); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_event_job_slot( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ context, slot_nr, atom_nr, event); \
+ } while (0)
+
+#undef KBASE_TLSTREAM_AUX_PM_STATE
+#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ kbase_trace_mali_pm_status(kbdev->id, \
+ core_type, state); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pm_state( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ core_type, state); \
+ } while (0)
+
+#undef KBASE_TLSTREAM_AUX_PAGEFAULT
+#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \
+ ctx_nr, as_nr, page_cnt_change) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ kbase_trace_mali_page_fault_insert_pages(kbdev->id, \
+ as_nr, \
+ page_cnt_change); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagefault( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, as_nr, page_cnt_change); \
+ } while (0)
+
+/* kbase_trace_mali_total_alloc_pages_change is handled differently here.
+ * We stream the total amount of pages allocated for `kbdev` rather
+ * than `page_count`, which is per-context.
+ */
+#undef KBASE_TLSTREAM_AUX_PAGESALLOC
+#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \
+ do { \
+ int enabled = atomic_read(&kbdev->timeline_is_enabled); \
+ u32 global_pages_count = \
+ atomic_read(&kbdev->memdev.used_pages); \
+ \
+ kbase_trace_mali_total_alloc_pages_change(kbdev->id, \
+ global_pages_count); \
+ if (enabled & TLSTREAM_ENABLED) \
+ __kbase_tlstream_aux_pagesalloc( \
+ __TL_DISPATCH_STREAM(kbdev, aux), \
+ ctx_nr, page_cnt); \
+ } while (0)
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+/* clang-format on */
+#endif